summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/sysctl/net.rst3
-rw-r--r--Documentation/bpf/bpf_devel_QA.rst29
-rw-r--r--Documentation/bpf/bpf_lsm.rst142
-rw-r--r--Documentation/bpf/drgn.rst213
-rw-r--r--Documentation/bpf/index.rst6
-rw-r--r--Documentation/devicetree/bindings/net/dsa/ocelot.txt116
-rw-r--r--Documentation/devicetree/bindings/net/marvell,mvusb.yaml65
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipa.yaml198
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml53
-rw-r--r--Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt2
-rw-r--r--Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml225
-rw-r--r--Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt29
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt7
-rw-r--r--Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt3
-rw-r--r--Documentation/devicetree/bindings/ptp/ptp-idt82p33.yaml45
-rw-r--r--Documentation/networking/6lowpan.rst (renamed from Documentation/networking/6lowpan.txt)29
-rw-r--r--Documentation/networking/bareudp.rst52
-rw-r--r--Documentation/networking/device_drivers/mellanox/mlx5.rst2
-rw-r--r--Documentation/networking/device_drivers/stmicro/stmmac.rst7
-rw-r--r--Documentation/networking/devlink/bnxt.rst14
-rw-r--r--Documentation/networking/devlink/devlink-flash.rst93
-rw-r--r--Documentation/networking/devlink/devlink-info.rst144
-rw-r--r--Documentation/networking/devlink/devlink-params.rst2
-rw-r--r--Documentation/networking/devlink/devlink-region.rst14
-rw-r--r--Documentation/networking/devlink/devlink-trap.rst35
-rw-r--r--Documentation/networking/devlink/ice.rst96
-rw-r--r--Documentation/networking/devlink/index.rst2
-rw-r--r--Documentation/networking/devlink/mlx5.rst6
-rw-r--r--Documentation/networking/ethtool-netlink.rst497
-rw-r--r--Documentation/networking/filter.txt2
-rw-r--r--Documentation/networking/index.rst2
-rw-r--r--Documentation/networking/ip-sysctl.txt9
-rw-r--r--Documentation/networking/page_pool.rst159
-rw-r--r--Documentation/networking/sfp-phylink.rst49
-rw-r--r--MAINTAINERS48
-rw-r--r--arch/arm/boot/dts/imx6qdl-apalis.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi49
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65.dtsi1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am654-base-board.dts42
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts43
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi49
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e.dtsi1
-rw-r--r--arch/arm64/configs/defconfig3
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S6
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/net/Makefile9
-rw-r--r--arch/riscv/net/bpf_jit.h514
-rw-r--r--arch/riscv/net/bpf_jit_comp32.c1310
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c (renamed from arch/riscv/net/bpf_jit_comp.c)605
-rw-r--r--arch/riscv/net/bpf_jit_core.c166
-rw-r--r--arch/s390/include/asm/qdio.h9
-rw-r--r--arch/um/drivers/vector_kern.c1
-rw-r--r--arch/x86/mm/init_32.c14
-rw-r--r--arch/x86/net/bpf_jit_comp.c260
-rw-r--r--drivers/base/core.c120
-rw-r--r--drivers/base/power/power.h3
-rw-r--r--drivers/base/power/sysfs.c55
-rw-r--r--drivers/bluetooth/Kconfig4
-rw-r--r--drivers/bluetooth/bfusb.c33
-rw-r--r--drivers/bluetooth/btintel.c4
-rw-r--r--drivers/bluetooth/btqca.c10
-rw-r--r--drivers/bluetooth/btqca.h6
-rw-r--r--drivers/bluetooth/btrtl.c12
-rw-r--r--drivers/bluetooth/btrtl.h4
-rw-r--r--drivers/bluetooth/btusb.c32
-rw-r--r--drivers/bluetooth/hci_ag6xx.c2
-rw-r--r--drivers/bluetooth/hci_h4.c4
-rw-r--r--drivers/bluetooth/hci_h5.c49
-rw-r--r--drivers/bluetooth/hci_intel.c2
-rw-r--r--drivers/bluetooth/hci_qca.c174
-rw-r--r--drivers/crypto/chelsio/Kconfig11
-rw-r--r--drivers/crypto/chelsio/Makefile3
-rw-r--r--drivers/crypto/chelsio/chcr_common.h135
-rw-r--r--drivers/crypto/chelsio/chcr_core.c85
-rw-r--r--drivers/crypto/chelsio/chcr_core.h7
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c2
-rw-r--r--drivers/crypto/chelsio/chcr_ktls.c2020
-rw-r--r--drivers/crypto/chelsio/chcr_ktls.h98
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c29
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c73
-rw-r--r--drivers/infiniband/hw/mlx5/main.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h5
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c65
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c2
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/bareudp.c817
-rw-r--r--drivers/net/bonding/bond_main.c8
-rw-r--r--drivers/net/bonding/bonding_priv.h5
-rw-r--r--drivers/net/can/slcan.c7
-rw-r--r--drivers/net/dsa/b53/b53_common.c58
-rw-r--r--drivers/net/dsa/b53/b53_priv.h4
-rw-r--r--drivers/net/dsa/bcm_sf2.c14
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c139
-rw-r--r--drivers/net/dsa/lantiq_gswip.c4
-rw-r--r--drivers/net/dsa/microchip/Kconfig1
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c26
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h3
-rw-r--r--drivers/net/dsa/mt7530.c121
-rw-r--r--drivers/net/dsa/mt7530.h11
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c492
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h40
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c285
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h29
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c452
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h39
-rw-r--r--drivers/net/dsa/ocelot/felix.c97
-rw-r--r--drivers/net/dsa/ocelot/felix.h3
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c134
-rw-r--r--drivers/net/dsa/qca/ar9331.c4
-rw-r--r--drivers/net/dsa/sja1105/Makefile1
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h49
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c4
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c24
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c133
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c340
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c400
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c285
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h31
-rw-r--r--drivers/net/dsa/sja1105/sja1105_sgmii.h53
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c7
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c5
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h1
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c27
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c30
-rw-r--r--drivers/net/dummy.c3
-rw-r--r--drivers/net/ethernet/3com/3c509.c8
-rw-r--r--drivers/net/ethernet/3com/3c515.c16
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/typhoon.c283
-rw-r--r--drivers/net/ethernet/3com/typhoon.h4
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c19
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c1
-rw-r--r--drivers/net/ethernet/agere/et131x.h1
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c3
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c5
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c9
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c30
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h16
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c5
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c5
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c9
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c7
-rw-r--r--drivers/net/ethernet/amd/sunlance.c10
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c27
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h1
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/ethtool.c2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c1
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.h1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h1
-rw-r--r--drivers/net/ethernet/aquantia/Kconfig1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c176
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c1777
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.h133
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h53
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c69
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Egress_registers.h73
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Ingress_registers.h77
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c2473
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.h323
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h914
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h12
-rw-r--r--drivers/net/ethernet/arc/emac.h1
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c1
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c2
-rw-r--r--drivers/net/ethernet/atheros/Kconfig2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c188
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c5
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c11
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c11
-rw-r--r--drivers/net/ethernet/broadcom/b44.c5
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c11
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c12
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c199
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c70
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c81
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c16
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c11
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.h1
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c60
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c12
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c9
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/version.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c303
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c132
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c79
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c54
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h62
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c11
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c2
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h4
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c24
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h8
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.h2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c15
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c14
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c26
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c13
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c12
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c9
-rw-r--r--drivers/net/ethernet/dlink/sundance.c20
-rw-r--r--drivers/net/ethernet/dnet.c1
-rw-r--r--drivers/net/ethernet/dnet.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c5
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c5
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c3
-rw-r--r--drivers/net/ethernet/fealnx.c20
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c18
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c17
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c11
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c61
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h1
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig16
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c70
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h35
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c22
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h11
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c19
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c13
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c50
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c10
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c10
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c13
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c35
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h54
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c159
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c42
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c387
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c370
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c50
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c59
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h3
-rw-r--r--drivers/net/ethernet/intel/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c24
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c10
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c27
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c95
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c84
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c416
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c180
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c325
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c278
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c508
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c711
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h45
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c22
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c6
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h12
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_dump.c323
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c83
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c144
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c1
-rw-r--r--drivers/net/ethernet/jme.c10
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c365
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c104
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c192
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h38
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c169
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c79
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c287
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c163
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c182
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c861
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c648
-rw-r--r--drivers/net/ethernet/marvell/skge.c1
-rw-r--r--drivers/net/ethernet/marvell/skge.h8
-rw-r--r--drivers/net/ethernet/marvell/sky2.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.h8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c286
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c218
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c253
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c268
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c1369
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h180
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c134
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c210
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c269
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1143
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c)220
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h)21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c374
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c985
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c130
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h106
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c296
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c205
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h115
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c563
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c221
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h96
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c192
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c268
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c267
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c543
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c263
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c596
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c123
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h10
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.c619
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.h30
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c163
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c272
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.c27
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.h11
-rw-r--r--drivers/net/ethernet/mscc/ocelot_tc.c24
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.h403
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c31
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c48
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c66
-rw-r--r--drivers/net/ethernet/natsemi/sonic.h2
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h2
-rw-r--r--drivers/net/ethernet/ni/nixge.c22
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c27
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c46
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c61
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h38
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c455
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h20
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c29
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c20
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c25
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c38
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c13
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c15
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c23
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c7
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c36
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c13
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c444
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c18
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c104
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c66
-rw-r--r--drivers/net/ethernet/sfc/efx.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.h18
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c25
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c9
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c42
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon_boards.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h8
-rw-r--r--drivers/net/ethernet/sfc/rx.c2
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c6
-rw-r--r--drivers/net/ethernet/sfc/tx.c3
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c29
-rw-r--r--drivers/net/ethernet/socionext/netsec.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c592
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c195
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c99
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxlgmac2.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c165
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c332
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c9
-rw-r--r--drivers/net/ethernet/sun/cassini.c28
-rw-r--r--drivers/net/ethernet/sun/sungem.c30
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c17
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.h2
-rw-r--r--drivers/net/ethernet/ti/Kconfig20
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c747
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c1965
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h142
-rw-r--r--drivers/net/ethernet/ti/cpsw.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c38
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h4
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c1
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.c126
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.h30
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.h2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h8
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c181
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h19
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c444
-rw-r--r--drivers/net/fddi/skfp/drvfbi.c4
-rw-r--r--drivers/net/fddi/skfp/h/skfbi.h5
-rw-r--r--drivers/net/hyperv/netvsc.c9
-rw-r--r--drivers/net/hyperv/netvsc_drv.c46
-rw-r--r--drivers/net/ieee802154/ca8210.c3
-rw-r--r--drivers/net/ipa/Kconfig19
-rw-r--r--drivers/net/ipa/Makefile12
-rw-r--r--drivers/net/ipa/gsi.c2055
-rw-r--r--drivers/net/ipa/gsi.h257
-rw-r--r--drivers/net/ipa/gsi_private.h118
-rw-r--r--drivers/net/ipa/gsi_reg.h417
-rw-r--r--drivers/net/ipa/gsi_trans.c786
-rw-r--r--drivers/net/ipa/gsi_trans.h226
-rw-r--r--drivers/net/ipa/ipa.h148
-rw-r--r--drivers/net/ipa/ipa_clock.c313
-rw-r--r--drivers/net/ipa/ipa_clock.h53
-rw-r--r--drivers/net/ipa/ipa_cmd.c680
-rw-r--r--drivers/net/ipa/ipa_cmd.h195
-rw-r--r--drivers/net/ipa/ipa_data-sc7180.c307
-rw-r--r--drivers/net/ipa/ipa_data-sdm845.c329
-rw-r--r--drivers/net/ipa/ipa_data.h280
-rw-r--r--drivers/net/ipa/ipa_endpoint.c1706
-rw-r--r--drivers/net/ipa/ipa_endpoint.h110
-rw-r--r--drivers/net/ipa/ipa_gsi.c54
-rw-r--r--drivers/net/ipa/ipa_gsi.h60
-rw-r--r--drivers/net/ipa/ipa_interrupt.c253
-rw-r--r--drivers/net/ipa/ipa_interrupt.h117
-rw-r--r--drivers/net/ipa/ipa_main.c953
-rw-r--r--drivers/net/ipa/ipa_mem.c314
-rw-r--r--drivers/net/ipa/ipa_mem.h90
-rw-r--r--drivers/net/ipa/ipa_modem.c383
-rw-r--r--drivers/net/ipa/ipa_modem.h31
-rw-r--r--drivers/net/ipa/ipa_qmi.c538
-rw-r--r--drivers/net/ipa/ipa_qmi.h41
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c663
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h252
-rw-r--r--drivers/net/ipa/ipa_reg.c38
-rw-r--r--drivers/net/ipa/ipa_reg.h476
-rw-r--r--drivers/net/ipa/ipa_smp2p.c335
-rw-r--r--drivers/net/ipa/ipa_smp2p.h48
-rw-r--r--drivers/net/ipa/ipa_table.c700
-rw-r--r--drivers/net/ipa/ipa_table.h103
-rw-r--r--drivers/net/ipa/ipa_uc.c211
-rw-r--r--drivers/net/ipa/ipa_uc.h32
-rw-r--r--drivers/net/ipa/ipa_version.h23
-rw-r--r--drivers/net/macsec.c754
-rw-r--r--drivers/net/netdevsim/dev.c288
-rw-r--r--drivers/net/netdevsim/health.c4
-rw-r--r--drivers/net/netdevsim/netdevsim.h5
-rw-r--r--drivers/net/phy/Kconfig25
-rw-r--r--drivers/net/phy/Makefile8
-rw-r--r--drivers/net/phy/aquantia_main.c38
-rw-r--r--drivers/net/phy/bcm-phy-lib.c22
-rw-r--r--drivers/net/phy/bcm-phy-lib.h1
-rw-r--r--drivers/net/phy/bcm7xxx.c4
-rw-r--r--drivers/net/phy/bcm84881.c27
-rw-r--r--drivers/net/phy/broadcom.c24
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/dp83867.c150
-rw-r--r--drivers/net/phy/linkmode.c95
-rw-r--r--drivers/net/phy/marvell.c24
-rw-r--r--drivers/net/phy/marvell10g.c313
-rw-r--r--drivers/net/phy/mdio-ipq8064.c166
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c14
-rw-r--r--drivers/net/phy/mdio-mvusb.c120
-rw-r--r--drivers/net/phy/mdio-xpcs.c716
-rw-r--r--drivers/net/phy/mdio_bus.c85
-rw-r--r--drivers/net/phy/micrel.c50
-rw-r--r--drivers/net/phy/mscc/Makefile10
-rw-r--r--drivers/net/phy/mscc/mscc.h400
-rw-r--r--drivers/net/phy/mscc/mscc_fc_buffer.h (renamed from drivers/net/phy/mscc_fc_buffer.h)8
-rw-r--r--drivers/net/phy/mscc/mscc_mac.h (renamed from drivers/net/phy/mscc_mac.h)8
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c1051
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.h (renamed from drivers/net/phy/mscc_macsec.h)66
-rw-r--r--drivers/net/phy/mscc/mscc_main.c (renamed from drivers/net/phy/mscc.c)1589
-rw-r--r--drivers/net/phy/nxp-tja11xx.c16
-rw-r--r--drivers/net/phy/phy-c45.c5
-rw-r--r--drivers/net/phy/phy-core.c71
-rw-r--r--drivers/net/phy/phy.c30
-rw-r--r--drivers/net/phy/phy_device.c97
-rw-r--r--drivers/net/phy/phylink.c522
-rw-r--r--drivers/net/phy/realtek.c60
-rw-r--r--drivers/net/phy/smsc.c16
-rw-r--r--drivers/net/slip/slip.c7
-rw-r--r--drivers/net/tun.c108
-rw-r--r--drivers/net/usb/cdc-phonet.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c411
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c8
-rw-r--r--drivers/net/usb/r8152.c33
-rw-r--r--drivers/net/veth.c257
-rw-r--r--drivers/net/virtio_net.c109
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c5
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c24
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/wan/farsync.h2
-rw-r--r--drivers/net/wan/wanxl.c2
-rw-r--r--drivers/net/wireless/admtek/adm8211.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c82
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c31
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c25
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c19
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h7
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c14
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h15
-rw-r--r--drivers/net/wireless/ath/ath11k/debug_htt_stats.c56
-rw-r--r--drivers/net/wireless/ath/ath11k/debug_htt_stats.h28
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c14
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c76
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h35
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c1492
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c66
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h23
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c42
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h30
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c123
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c224
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.h53
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c260
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h96
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c174
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c7
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c1
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.h2
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/sysfs.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c127
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c16
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c48
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c4
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c347
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c189
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/soc.h87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c169
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c947
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_common.h2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_download.c10
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_wlan.h2
-rw-r--r--drivers/net/wireless/intersil/orinoco/fw.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes.h2
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes_dld.c6
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/intersil/p54/eeprom.h8
-rw-r--r--drivers/net/wireless/intersil/p54/lmac.h6
-rw-r--r--drivers/net/wireless/intersil/p54/p54.h2
-rw-r--r--drivers/net/wireless/intersil/prism54/oid_mgt.c34
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c339
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h21
-rw-r--r--drivers/net/wireless/marvell/libertas/host.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c5
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.h2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11ac.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11ac.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/decl.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ethtool.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h48
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.h6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.h8
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/airtime.c326
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c49
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c392
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h168
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c45
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c120
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c220
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c92
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c407
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c1015
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h88
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c404
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c2528
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h262
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c174
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h253
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c104
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h248
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c77
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/trace.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c91
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.h46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.h54
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c471
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h14
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c117
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c689
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c65
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h31
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c121
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h360
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h45
-rw-r--r--drivers/net/wireless/rayctl.h2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h23
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c85
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c495
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c62
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c276
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h27
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h46
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c25
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c59
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c34
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h57
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c208
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c60
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c80
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h5
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c137
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h16
-rw-r--r--drivers/net/wireless/realtek/rtw88/wow.c39
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c9
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h4
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c32
-rw-r--r--drivers/net/wireless/ti/wl1251/wl12xx_80211.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/wl12xx_80211.h2
-rw-r--r--drivers/net/wireless/virt_wifi.c12
-rw-r--r--drivers/net/wireless/wl3501.h2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.h8
-rw-r--r--drivers/nfc/fdp/fdp.c2
-rw-r--r--drivers/nfc/st21nfca/dep.c4
-rw-r--r--drivers/pci/pci-bridge-emul.c14
-rw-r--r--drivers/pci/pci.c57
-rw-r--r--drivers/phy/ti/Kconfig3
-rw-r--r--drivers/ptp/Kconfig24
-rw-r--r--drivers/ptp/Makefile2
-rw-r--r--drivers/ptp/ptp_chardev.c9
-rw-r--r--drivers/ptp/ptp_clock.c17
-rw-r--r--drivers/ptp/ptp_idt82p33.c1008
-rw-r--r--drivers/ptp/ptp_idt82p33.h171
-rw-r--r--drivers/ptp/ptp_qoriq.c29
-rw-r--r--drivers/ptp/ptp_vmw.c144
-rw-r--r--drivers/remoteproc/Kconfig4
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/qcom_q6v5_ipa_notify.c85
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c38
-rw-r--r--drivers/s390/cio/qdio.h11
-rw-r--r--drivers/s390/cio/qdio_debug.c4
-rw-r--r--drivers/s390/cio/qdio_main.c50
-rw-r--r--drivers/s390/cio/qdio_setup.c9
-rw-r--r--drivers/s390/cio/qdio_thinint.c38
-rw-r--r--drivers/s390/net/Kconfig17
-rw-r--r--drivers/s390/net/qeth_core.h46
-rw-r--r--drivers/s390/net/qeth_core_main.c398
-rw-r--r--drivers/s390/net/qeth_core_mpc.h21
-rw-r--r--drivers/s390/net/qeth_core_sys.c10
-rw-r--r--drivers/s390/net/qeth_ethtool.c150
-rw-r--r--drivers/s390/net/qeth_l2_main.c89
-rw-r--r--drivers/s390/net/qeth_l3_main.c74
-rw-r--r--drivers/s390/net/qeth_l3_sys.c35
-rw-r--r--drivers/scsi/qedf/qedf_main.c18
-rw-r--r--drivers/ssb/sprom.c4
-rw-r--r--drivers/staging/qlge/qlge_ethtool.c2
-rw-r--r--fs/nsfs.c14
-rw-r--r--fs/sysfs/file.c148
-rw-r--r--fs/sysfs/group.c115
-rw-r--r--include/asm-generic/vmlinux.lds.h15
-rw-r--r--include/linux/bitfield.h14
-rw-r--r--include/linux/bpf-cgroup.h43
-rw-r--r--include/linux/bpf.h163
-rw-r--r--include/linux/bpf_lsm.h33
-rw-r--r--include/linux/bpf_types.h4
-rw-r--r--include/linux/bpf_verifier.h4
-rw-r--r--include/linux/brcmphy.h2
-rw-r--r--include/linux/dccp.h2
-rw-r--r--include/linux/device.h1
-rw-r--r--include/linux/ethtool.h66
-rw-r--r--include/linux/filter.h48
-rw-r--r--include/linux/fsl/ptp_qoriq.h2
-rw-r--r--include/linux/ieee80211.h26
-rw-r--r--include/linux/inet_diag.h27
-rw-r--r--include/linux/iopoll.h40
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/limits.h1
-rw-r--r--include/linux/linkmode.h8
-rw-r--r--include/linux/lsm_hook_defs.h381
-rw-r--r--include/linux/lsm_hooks.h628
-rw-r--r--include/linux/mdio-xpcs.h41
-rw-r--r--include/linux/mdio.h4
-rw-r--r--include/linux/mii.h57
-rw-r--r--include/linux/mlx5/driver.h22
-rw-r--r--include/linux/mlx5/eswitch.h38
-rw-r--r--include/linux/mlx5/fs.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h40
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h2
-rw-r--r--include/linux/netdev_features.h3
-rw-r--r--include/linux/netdevice.h22
-rw-r--r--include/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/linux/netfilter/x_tables.h8
-rw-r--r--include/linux/netfilter_arp/arp_tables.h2
-rw-r--r--include/linux/netfilter_bridge/ebtables.h2
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h2
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h2
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/pci.h14
-rw-r--r--include/linux/phy.h59
-rw-r--r--include/linux/phylink.h182
-rw-r--r--include/linux/proc_ns.h2
-rw-r--r--include/linux/ptp_clock_kernel.h19
-rw-r--r--include/linux/qed/qed_chain.h24
-rw-r--r--include/linux/remoteproc/qcom_q6v5_ipa_notify.h82
-rw-r--r--include/linux/skbuff.h16
-rw-r--r--include/linux/skmsg.h68
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/sysfs.h46
-rw-r--r--include/linux/tcp.h26
-rw-r--r--include/linux/tnum.h12
-rw-r--r--include/linux/usb/cdc_ncm.h15
-rw-r--r--include/net/6lowpan.h2
-rw-r--r--include/net/act_api.h6
-rw-r--r--include/net/addrconf.h3
-rw-r--r--include/net/af_unix.h4
-rw-r--r--include/net/bareudp.h20
-rw-r--r--include/net/bluetooth/bluetooth.h17
-rw-r--r--include/net/bluetooth/hci.h74
-rw-r--r--include/net/bluetooth/hci_core.h46
-rw-r--r--include/net/bluetooth/hci_sock.h6
-rw-r--r--include/net/bluetooth/l2cap.h57
-rw-r--r--include/net/bluetooth/mgmt.h5
-rw-r--r--include/net/bluetooth/rfcomm.h3
-rw-r--r--include/net/bonding.h2
-rw-r--r--include/net/bpf_sk_storage.h27
-rw-r--r--include/net/cfg80211.h213
-rw-r--r--include/net/cfg802154.h2
-rw-r--r--include/net/cls_cgroup.h7
-rw-r--r--include/net/devlink.h171
-rw-r--r--include/net/dn_fib.h2
-rw-r--r--include/net/drop_monitor.h3
-rw-r--r--include/net/dsa.h39
-rw-r--r--include/net/dst.h1
-rw-r--r--include/net/esp.h16
-rw-r--r--include/net/flow_offload.h134
-rw-r--r--include/net/inet6_hashtables.h3
-rw-r--r--include/net/inet_connection_sock.h6
-rw-r--r--include/net/inet_hashtables.h3
-rw-r--r--include/net/inet_sock.h2
-rw-r--r--include/net/ip6_checksum.h9
-rw-r--r--include/net/ip6_fib.h5
-rw-r--r--include/net/ip6_route.h2
-rw-r--r--include/net/ip_fib.h9
-rw-r--r--include/net/ipv6.h6
-rw-r--r--include/net/lwtunnel.h8
-rw-r--r--include/net/mac80211.h55
-rw-r--r--include/net/macsec.h77
-rw-r--r--include/net/mip6.h2
-rw-r--r--include/net/mld.h6
-rw-r--r--include/net/mpls_iptunnel.h2
-rw-r--r--include/net/mptcp.h26
-rw-r--r--include/net/ndisc.h7
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/net_namespace.h5
-rw-r--r--include/net/netfilter/nf_conntrack_acct.h11
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h2
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h2
-rw-r--r--include/net/netfilter/nf_flow_table.h41
-rw-r--r--include/net/netfilter/nf_queue.h7
-rw-r--r--include/net/netfilter/nf_tables.h39
-rw-r--r--include/net/netfilter/nf_tables_core.h13
-rw-r--r--include/net/netlink.h15
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/mib.h3
-rw-r--r--include/net/nexthop.h2
-rw-r--r--include/net/nfc/nci.h14
-rw-r--r--include/net/nfc/nfc.h2
-rw-r--r--include/net/page_pool.h36
-rw-r--r--include/net/pie.h31
-rw-r--r--include/net/pkt_cls.h44
-rw-r--r--include/net/pkt_sched.h12
-rw-r--r--include/net/red.h38
-rw-r--r--include/net/route.h8
-rw-r--r--include/net/rpl.h46
-rw-r--r--include/net/sch_generic.h3
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/sock.h83
-rw-r--r--include/net/sock_reuseport.h4
-rw-r--r--include/net/tc_act/tc_ct.h25
-rw-r--r--include/net/tc_act/tc_police.h6
-rw-r--r--include/net/tc_act/tc_skbedit.h41
-rw-r--r--include/net/tc_act/tc_tunnel_key.h10
-rw-r--r--include/net/tcp.h15
-rw-r--r--include/net/udp.h5
-rw-r--r--include/soc/mscc/ocelot.h106
-rw-r--r--include/soc/mscc/ocelot_vcap.h205
-rw-r--r--include/trace/bpf_probe.h18
-rw-r--r--include/uapi/linux/bpf.h324
-rw-r--r--include/uapi/linux/devlink.h16
-rw-r--r--include/uapi/linux/ethtool.h10
-rw-r--r--include/uapi/linux/ethtool_netlink.h175
-rw-r--r--include/uapi/linux/if_arcnet.h6
-rw-r--r--include/uapi/linux/if_bridge.h46
-rw-r--r--include/uapi/linux/if_link.h18
-rw-r--r--include/uapi/linux/if_macsec.h8
-rw-r--r--include/uapi/linux/inet_diag.h6
-rw-r--r--include/uapi/linux/ipv6.h2
-rw-r--r--include/uapi/linux/lwtunnel.h1
-rw-r--r--include/uapi/linux/mii.h5
-rw-r--r--include/uapi/linux/mptcp.h89
-rw-r--r--include/uapi/linux/net_dropmon.h5
-rw-r--r--include/uapi/linux/net_tstamp.h6
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h25
-rw-r--r--include/uapi/linux/netfilter/xt_IDLETIMER.h12
-rw-r--r--include/uapi/linux/netfilter_bridge/ebt_among.h2
-rw-r--r--include/uapi/linux/nl80211.h241
-rw-r--r--include/uapi/linux/openvswitch.h7
-rw-r--r--include/uapi/linux/pkt_cls.h22
-rw-r--r--include/uapi/linux/pkt_sched.h23
-rw-r--r--include/uapi/linux/rpl.h48
-rw-r--r--include/uapi/linux/rpl_iptunnel.h21
-rw-r--r--include/uapi/linux/sock_diag.h26
-rw-r--r--include/uapi/linux/tcp.h3
-rw-r--r--include/uapi/linux/virtio_ids.h1
-rw-r--r--include/uapi/linux/wireless.h5
-rw-r--r--init/Kconfig13
-rw-r--r--kernel/bpf/Makefile1
-rw-r--r--kernel/bpf/bpf_lsm.c54
-rw-r--r--kernel/bpf/bpf_struct_ops.c12
-rw-r--r--kernel/bpf/btf.c62
-rw-r--r--kernel/bpf/cgroup.c505
-rw-r--r--kernel/bpf/core.c122
-rw-r--r--kernel/bpf/dispatcher.c5
-rw-r--r--kernel/bpf/hashtab.c174
-rw-r--r--kernel/bpf/helpers.c63
-rw-r--r--kernel/bpf/inode.c42
-rw-r--r--kernel/bpf/lpm_trie.c14
-rw-r--r--kernel/bpf/percpu_freelist.c20
-rw-r--r--kernel/bpf/reuseport_array.c5
-rw-r--r--kernel/bpf/stackmap.c18
-rw-r--r--kernel/bpf/syscall.c656
-rw-r--r--kernel/bpf/sysfs_btf.c11
-rw-r--r--kernel/bpf/tnum.c15
-rw-r--r--kernel/bpf/trampoline.c178
-rw-r--r--kernel/bpf/verifier.c1649
-rw-r--r--kernel/cgroup/cgroup.c41
-rw-r--r--kernel/events/core.c11
-rw-r--r--kernel/extable.c2
-rw-r--r--kernel/seccomp.c4
-rw-r--r--kernel/trace/bpf_trace.c77
-rw-r--r--kernel/trace/trace_uprobe.c11
-rw-r--r--lib/objagg.c4
-rw-r--r--lib/test_bpf.c4
-rw-r--r--net/802/psnap.c2
-rw-r--r--net/8021q/vlan_core.c5
-rw-r--r--net/batman-adv/distributed-arp-table.c2
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/translation-table.c8
-rw-r--r--net/bluetooth/a2mp.h10
-rw-r--r--net/bluetooth/bnep/bnep.h6
-rw-r--r--net/bluetooth/hci_conn.c146
-rw-r--r--net/bluetooth/hci_core.c143
-rw-r--r--net/bluetooth/hci_event.c114
-rw-r--r--net/bluetooth/hci_request.c350
-rw-r--r--net/bluetooth/hci_request.h2
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/l2cap_core.c734
-rw-r--r--net/bluetooth/l2cap_sock.c67
-rw-r--r--net/bluetooth/mgmt.c113
-rw-r--r--net/bluetooth/rfcomm/core.c13
-rw-r--r--net/bluetooth/rfcomm/tty.c4
-rw-r--r--net/bluetooth/sco.c13
-rw-r--r--net/bluetooth/smp.c29
-rw-r--r--net/bpf/test_run.c76
-rw-r--r--net/bpfilter/main.c1
-rw-r--r--net/bridge/br_netlink_tunnel.c12
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/bridge/br_private_tunnel.h17
-rw-r--r--net/bridge/br_vlan.c76
-rw-r--r--net/bridge/br_vlan_options.c112
-rw-r--r--net/bridge/br_vlan_tunnel.c5
-rw-r--r--net/bridge/netfilter/ebtables.c2
-rw-r--r--net/core/bpf_sk_storage.c285
-rw-r--r--net/core/datagram.c39
-rw-r--r--net/core/dev.c47
-rw-r--r--net/core/dev_ioctl.c6
-rw-r--r--net/core/devlink.c1267
-rw-r--r--net/core/drop_monitor.c35
-rw-r--r--net/core/filter.c234
-rw-r--r--net/core/flow_dissector.c4
-rw-r--r--net/core/flow_offload.c34
-rw-r--r--net/core/lwt_bpf.c2
-rw-r--r--net/core/lwtunnel.c6
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/net-sysfs.c133
-rw-r--r--net/core/net-sysfs.h2
-rw-r--r--net/core/net_namespace.c15
-rw-r--r--net/core/page_pool.c78
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/core/rtnetlink.c32
-rw-r--r--net/core/skbuff.c24
-rw-r--r--net/core/skmsg.c10
-rw-r--r--net/core/sock.c26
-rw-r--r--net/core/sock_map.c306
-rw-r--r--net/core/sock_reuseport.c50
-rw-r--r--net/core/xdp.c2
-rw-r--r--net/dccp/ccid.h2
-rw-r--r--net/dccp/diag.c9
-rw-r--r--net/dccp/minisocks.c1
-rw-r--r--net/decnet/dn_route.c4
-rw-r--r--net/dsa/dsa.c6
-rw-r--r--net/dsa/dsa2.c2
-rw-r--r--net/dsa/dsa_priv.h15
-rw-r--r--net/dsa/master.c21
-rw-r--r--net/dsa/port.c27
-rw-r--r--net/dsa/slave.c429
-rw-r--r--net/dsa/switch.c37
-rw-r--r--net/dsa/tag_brcm.c23
-rw-r--r--net/dsa/tag_ocelot.c3
-rw-r--r--net/ethtool/Makefile3
-rw-r--r--net/ethtool/bitset.c94
-rw-r--r--net/ethtool/bitset.h4
-rw-r--r--net/ethtool/channels.c227
-rw-r--r--net/ethtool/coalesce.c353
-rw-r--r--net/ethtool/common.c114
-rw-r--r--net/ethtool/common.h9
-rw-r--r--net/ethtool/debug.c6
-rw-r--r--net/ethtool/eee.c206
-rw-r--r--net/ethtool/features.c304
-rw-r--r--net/ethtool/ioctl.c195
-rw-r--r--net/ethtool/linkinfo.c6
-rw-r--r--net/ethtool/linkmodes.c7
-rw-r--r--net/ethtool/netlink.c152
-rw-r--r--net/ethtool/netlink.h22
-rw-r--r--net/ethtool/pause.c145
-rw-r--r--net/ethtool/privflags.c211
-rw-r--r--net/ethtool/rings.c200
-rw-r--r--net/ethtool/strset.c15
-rw-r--r--net/ethtool/tsinfo.c143
-rw-r--r--net/ethtool/wol.c5
-rw-r--r--net/hsr/hsr_debugfs.c5
-rw-r--r--net/hsr/hsr_device.c64
-rw-r--r--net/hsr/hsr_device.h3
-rw-r--r--net/hsr/hsr_framereg.c3
-rw-r--r--net/hsr/hsr_main.c3
-rw-r--r--net/hsr/hsr_main.h1
-rw-r--r--net/hsr/hsr_netlink.c49
-rw-r--r--net/hsr/hsr_slave.c63
-rw-r--r--net/hsr/hsr_slave.h2
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c8
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/bpf_tcp_ca.c33
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/esp4.c16
-rw-r--r--net/ipv4/esp4_offload.c32
-rw-r--r--net/ipv4/fib_lookup.h2
-rw-r--r--net/ipv4/fib_semantics.c26
-rw-r--r--net/ipv4/fib_trie.c10
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_connection_sock.c36
-rw-r--r--net/ipv4/inet_diag.c307
-rw-r--r--net/ipv4/ip_input.c3
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/ip_tunnel.c6
-rw-r--r--net/ipv4/ip_tunnel_core.c4
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c4
-rw-r--r--net/ipv4/netfilter/nf_log_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c4
-rw-r--r--net/ipv4/nexthop.c2
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/raw_diag.c24
-rw-r--r--net/ipv4/route.c61
-rw-r--r--net/ipv4/sysctl_net_ipv4.c33
-rw-r--r--net/ipv4/tcp.c29
-rw-r--r--net/ipv4/tcp_bic.c11
-rw-r--r--net/ipv4/tcp_bpf.c272
-rw-r--r--net/ipv4/tcp_diag.c8
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv4/tcp_ipv4.c10
-rw-r--r--net/ipv4/tcp_minisocks.c9
-rw-r--r--net/ipv4/tcp_scalable.c17
-rw-r--r--net/ipv4/tcp_ulp.c9
-rw-r--r--net/ipv4/tcp_veno.c47
-rw-r--r--net/ipv4/tcp_yeah.c41
-rw-r--r--net/ipv4/udp.c24
-rw-r--r--net/ipv4/udp_bpf.c53
-rw-r--r--net/ipv4/udp_diag.c41
-rw-r--r--net/ipv4/udp_offload.c1
-rw-r--r--net/ipv6/Kconfig10
-rw-r--r--net/ipv6/Makefile3
-rw-r--r--net/ipv6/addrconf.c69
-rw-r--r--net/ipv6/af_inet6.c7
-rw-r--r--net/ipv6/ah6.c4
-rw-r--r--net/ipv6/esp6.c16
-rw-r--r--net/ipv6/esp6_offload.c36
-rw-r--r--net/ipv6/exthdrs.c203
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/ila/ila_lwt.c2
-rw-r--r--net/ipv6/ip6_fib.c8
-rw-r--r--net/ipv6/ip6_input.c3
-rw-r--r--net/ipv6/ip6_output.c70
-rw-r--r--net/ipv6/ip6mr.c5
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/ipv6/netfilter/ip6_tables.c4
-rw-r--r--net/ipv6/netfilter/nf_log_ipv6.c2
-rw-r--r--net/ipv6/raw.c8
-rw-r--r--net/ipv6/route.c10
-rw-r--r--net/ipv6/rpl.c123
-rw-r--r--net/ipv6/rpl_iptunnel.c382
-rw-r--r--net/ipv6/seg6_iptunnel.c4
-rw-r--r--net/ipv6/seg6_local.c5
-rw-r--r--net/ipv6/sysctl_net_ipv6.c21
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/udp.c9
-rw-r--r--net/kcm/kcmproc.c2
-rw-r--r--net/kcm/kcmsock.c4
-rw-r--r--net/l2tp/l2tp_core.h2
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/llc/llc_proc.c2
-rw-r--r--net/mac80211/aes_cmac.c21
-rw-r--r--net/mac80211/aes_gmac.c24
-rw-r--r--net/mac80211/cfg.c193
-rw-r--r--net/mac80211/debugfs.c56
-rw-r--r--net/mac80211/debugfs_key.c31
-rw-r--r--net/mac80211/debugfs_key.h10
-rw-r--r--net/mac80211/debugfs_netdev.c13
-rw-r--r--net/mac80211/debugfs_sta.c6
-rw-r--r--net/mac80211/driver-ops.h27
-rw-r--r--net/mac80211/he.c4
-rw-r--r--net/mac80211/ht.c64
-rw-r--r--net/mac80211/ieee80211_i.h32
-rw-r--r--net/mac80211/iface.c86
-rw-r--r--net/mac80211/key.c59
-rw-r--r--net/mac80211/key.h3
-rw-r--r--net/mac80211/main.c39
-rw-r--r--net/mac80211/mlme.c140
-rw-r--r--net/mac80211/rx.c96
-rw-r--r--net/mac80211/scan.c3
-rw-r--r--net/mac80211/sta_info.c49
-rw-r--r--net/mac80211/sta_info.h4
-rw-r--r--net/mac80211/status.c91
-rw-r--r--net/mac80211/tx.c209
-rw-r--r--net/mac80211/util.c87
-rw-r--r--net/mac80211/vht.c58
-rw-r--r--net/mac80211/wep.c4
-rw-r--r--net/mac80211/wep.h2
-rw-r--r--net/mpls/internal.h4
-rw-r--r--net/mpls/mpls_iptunnel.c2
-rw-r--r--net/mptcp/Makefile3
-rw-r--r--net/mptcp/crypto.c17
-rw-r--r--net/mptcp/diag.c104
-rw-r--r--net/mptcp/mib.c69
-rw-r--r--net/mptcp/mib.h40
-rw-r--r--net/mptcp/options.c553
-rw-r--r--net/mptcp/pm.c242
-rw-r--r--net/mptcp/pm_netlink.c857
-rw-r--r--net/mptcp/protocol.c1056
-rw-r--r--net/mptcp/protocol.h204
-rw-r--r--net/mptcp/subflow.c403
-rw-r--r--net/mptcp/token.c38
-rw-r--r--net/netfilter/Kconfig8
-rw-r--r--net/netfilter/Makefile13
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c2
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c2
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c45
-rw-r--r--net/netfilter/nf_conntrack_core.c19
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_conntrack_standalone.c19
-rw-r--r--net/netfilter/nf_flow_table_core.c59
-rw-r--r--net/netfilter/nf_flow_table_ip.c22
-rw-r--r--net/netfilter/nf_flow_table_offload.c330
-rw-r--r--net/netfilter/nf_queue.c96
-rw-r--r--net/netfilter/nf_tables_api.c238
-rw-r--r--net/netfilter/nf_tables_offload.c2
-rw-r--r--net/netfilter/nf_tables_set_core.c31
-rw-r--r--net/netfilter/nfnetlink_acct.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c10
-rw-r--r--net/netfilter/nft_bitwise.c14
-rw-r--r--net/netfilter/nft_dynset.c45
-rw-r--r--net/netfilter/nft_exthdr.c8
-rw-r--r--net/netfilter/nft_lookup.c1
-rw-r--r--net/netfilter/nft_set_bitmap.c6
-rw-r--r--net/netfilter/nft_set_hash.c9
-rw-r--r--net/netfilter/nft_set_pipapo.c637
-rw-r--r--net/netfilter/nft_set_pipapo.h280
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c1223
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.h14
-rw-r--r--net/netfilter/nft_set_rbtree.c3
-rw-r--r--net/netfilter/nft_tunnel.c110
-rw-r--r--net/netfilter/xt_IDLETIMER.c248
-rw-r--r--net/netfilter/xt_SECMARK.c2
-rw-r--r--net/netfilter/xt_hashlimit.c2
-rw-r--r--net/netfilter/xt_recent.c4
-rw-r--r--net/netlink/af_netlink.c3
-rw-r--r--net/netrom/af_netrom.c2
-rw-r--r--net/netrom/nr_route.c4
-rw-r--r--net/nfc/digital_dep.c4
-rw-r--r--net/openvswitch/actions.c67
-rw-r--r--net/openvswitch/datapath.c2
-rw-r--r--net/openvswitch/flow_netlink.c70
-rw-r--r--net/qrtr/Makefile2
-rw-r--r--net/qrtr/ns.c757
-rw-r--r--net/qrtr/qrtr.c43
-rw-r--r--net/qrtr/qrtr.h4
-rw-r--r--net/sched/Kconfig2
-rw-r--r--net/sched/act_api.c43
-rw-r--r--net/sched/act_bpf.c3
-rw-r--r--net/sched/act_ct.c569
-rw-r--r--net/sched/act_pedit.c11
-rw-r--r--net/sched/act_sample.c2
-rw-r--r--net/sched/act_skbedit.c11
-rw-r--r--net/sched/cls_api.c168
-rw-r--r--net/sched/cls_flower.c70
-rw-r--r--net/sched/cls_matchall.c8
-rw-r--r--net/sched/em_ipt.c2
-rw-r--r--net/sched/em_nbyte.c2
-rw-r--r--net/sched/sch_api.c21
-rw-r--r--net/sched/sch_atm.c2
-rw-r--r--net/sched/sch_fifo.c97
-rw-r--r--net/sched/sch_fq.c21
-rw-r--r--net/sched/sch_fq_pie.c1
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_ingress.c11
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sched/sch_pie.c49
-rw-r--r--net/sched/sch_red.c69
-rw-r--r--net/sctp/diag.c7
-rw-r--r--net/sctp/input.c1
-rw-r--r--net/sctp/ipv6.c20
-rw-r--r--net/sctp/protocol.c28
-rw-r--r--net/sctp/socket.c35
-rw-r--r--net/smc/smc_clc.c5
-rw-r--r--net/smc/smc_core.c26
-rw-r--r--net/smc/smc_core.h8
-rw-r--r--net/smc/smc_ib.c63
-rw-r--r--net/smc/smc_ib.h1
-rw-r--r--net/smc/smc_llc.c2
-rw-r--r--net/smc/smc_tx.c2
-rw-r--r--net/switchdev/switchdev.c11
-rw-r--r--net/tipc/monitor.c12
-rw-r--r--net/tipc/msg.c3
-rw-r--r--net/tipc/msg.h5
-rw-r--r--net/tipc/node.c3
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/tls/tls_device.c2
-rw-r--r--net/tls/tls_main.c31
-rw-r--r--net/unix/af_unix.c29
-rw-r--r--net/vmw_vsock/virtio_transport_common.c1
-rw-r--r--net/wireless/core.c6
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/mlme.c33
-rw-r--r--net/wireless/nl80211.c420
-rw-r--r--net/wireless/pmsr.c32
-rw-r--r--net/wireless/rdev-ops.h37
-rw-r--r--net/wireless/reg.c2
-rw-r--r--net/wireless/scan.c11
-rw-r--r--net/wireless/sme.c11
-rw-r--r--net/wireless/trace.h54
-rw-r--r--net/wireless/util.c9
-rw-r--r--net/x25/x25_forward.c12
-rw-r--r--net/xdp/xsk_queue.h4
-rw-r--r--net/xfrm/espintcp.c2
-rw-r--r--net/xfrm/xfrm_device.c28
-rw-r--r--net/xfrm/xfrm_output.c4
-rw-r--r--net/xfrm/xfrm_policy.c3
-rw-r--r--net/xfrm/xfrm_state.c2
-rw-r--r--samples/bpf/Makefile8
-rw-r--r--samples/bpf/bpf_load.c20
-rw-r--r--samples/bpf/bpf_load.h1
-rw-r--r--samples/bpf/sampleip_user.c98
-rw-r--r--samples/bpf/trace_event_user.c139
-rw-r--r--samples/bpf/tracex1_user.c1
-rw-r--r--samples/bpf/tracex5_user.c1
-rwxr-xr-xscripts/bpf_helpers_doc.py4
-rwxr-xr-xscripts/link-vmlinux.sh42
-rw-r--r--security/Kconfig10
-rw-r--r--security/Makefile2
-rw-r--r--security/bpf/Makefile5
-rw-r--r--security/bpf/hooks.c26
-rw-r--r--security/security.c41
-rw-r--r--sound/pci/bt87x.c7
-rw-r--r--tools/bpf/bpftool/.gitignore2
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-feature.rst19
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst22
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst116
-rw-r--r--tools/bpf/bpftool/Makefile36
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool110
-rw-r--r--tools/bpf/bpftool/btf.c5
-rw-r--r--tools/bpf/bpftool/btf_dumper.c199
-rw-r--r--tools/bpf/bpftool/common.c40
-rw-r--r--tools/bpf/bpftool/feature.c283
-rw-r--r--tools/bpf/bpftool/main.c10
-rw-r--r--tools/bpf/bpftool/main.h12
-rw-r--r--tools/bpf/bpftool/map.c2
-rw-r--r--tools/bpf/bpftool/prog.c458
-rw-r--r--tools/bpf/bpftool/skeleton/profiler.bpf.c119
-rw-r--r--tools/bpf/bpftool/skeleton/profiler.h46
-rw-r--r--tools/bpf/bpftool/struct_ops.c596
-rw-r--r--tools/bpf/runqslower/runqslower.bpf.c4
-rw-r--r--tools/build/feature/Makefile9
-rw-r--r--tools/build/feature/test-clang-bpf-global-var.c4
-rw-r--r--tools/include/uapi/linux/bpf.h324
-rw-r--r--tools/include/uapi/linux/if_link.h6
-rw-r--r--tools/include/uapi/linux/types.h (renamed from tools/testing/selftests/bpf/include/uapi/linux/types.h)0
-rw-r--r--tools/lib/bpf/bpf.c37
-rw-r--r--tools/lib/bpf/bpf.h19
-rw-r--r--tools/lib/bpf/bpf_tracing.h223
-rw-r--r--tools/lib/bpf/btf.c20
-rw-r--r--tools/lib/bpf/btf_dump.c10
-rw-r--r--tools/lib/bpf/libbpf.c324
-rw-r--r--tools/lib/bpf/libbpf.h31
-rw-r--r--tools/lib/bpf/libbpf.map19
-rw-r--r--tools/lib/bpf/libbpf_probes.c1
-rw-r--r--tools/lib/bpf/netlink.c34
-rw-r--r--tools/lib/bpf/xsk.c16
-rw-r--r--tools/scripts/Makefile.include1
-rw-r--r--tools/testing/selftests/.gitignore5
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/Makefile33
-rw-r--r--tools/testing/selftests/bpf/bpf_tcp_helpers.h2
-rw-r--r--tools/testing/selftests/bpf/bpf_trace_helpers.h120
-rw-r--r--tools/testing/selftests/bpf/config2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c39
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dump.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_link.c244
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_fexit.c12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_test.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_test.c69
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_data_init.c61
-rw-r--r--tools/testing/selftests/bpf/prog_tests/link_pinning.c105
-rw-r--r--tools/testing/selftests/bpf/prog_tests/modify_return.c65
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c88
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_branches.c170
-rw-r--r--tools/testing/selftests/bpf/prog_tests/select_reuseport.c73
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_assign.c309
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_ctx.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c124
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c1635
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_rtt.c32
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_lsm.c86
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trampoline_count.c25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/vmlinux.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_attach.c62
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c69
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp.c18
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c2
-rw-r--r--tools/testing/selftests/bpf/progs/fentry_test.c2
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c2
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_bpf2bpf_simple.c2
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_test.c2
-rw-r--r--tools/testing/selftests/bpf/progs/kfree_skb.c2
-rw-r--r--tools/testing/selftests/bpf/progs/lsm.c48
-rw-r--r--tools/testing/selftests/bpf/progs/modify_return.c49
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_parse_prog.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_probe.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_cgroup_link.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_data.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_link_pinning.c25
-rw-r--r--tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c37
-rw-r--r--tools/testing/selftests/bpf/progs/test_overhead.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_branches.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_buffer.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_probe_user.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_assign.c204
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_ctx.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_listen.c98
-rw-r--r--tools/testing/selftests/bpf/progs/test_trampoline_count.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_vmlinux.c84
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c30
-rw-r--r--tools/testing/selftests/bpf/test_bpftool.py178
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool.sh5
-rw-r--r--tools/testing/selftests/bpf/test_current_pid_tgid_new_ns.c159
-rw-r--r--tools/testing/selftests/bpf/test_maps.c6
-rw-r--r--tools/testing/selftests/bpf/test_progs.c102
-rw-r--r--tools/testing/selftests/bpf/test_progs.h9
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c23
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.h1
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds.c57
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_get_stack.c8
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx.c105
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_skb.c47
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh5
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_acl_drops.sh151
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh28
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh55
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh384
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/extack.sh45
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh13
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh18
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh30
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/router_scale.sh53
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh68
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_ets.sh14
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh533
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh94
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_red_prio.sh5
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh68
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh222
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sharedbuffer_configuration.py416
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh5
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower_scale.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/tc_action_hw_stats.sh130
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/tc_flower_restrictions.sh186
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh31
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan.sh229
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink.sh15
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink_trap.sh121
-rw-r--r--tools/testing/selftests/net/.gitignore5
-rw-r--r--tools/testing/selftests/net/Makefile4
-rw-r--r--tools/testing/selftests/net/config2
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh72
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh88
-rwxr-xr-xtools/testing/selftests/net/forwarding/pedit_dsfield.sh238
-rwxr-xr-xtools/testing/selftests/net/forwarding/sch_ets.sh9
-rw-r--r--tools/testing/selftests/net/forwarding/sch_ets_tests.sh10
-rwxr-xr-xtools/testing/selftests/net/forwarding/skbedit_priority.sh168
-rw-r--r--tools/testing/selftests/net/forwarding/tc_common.sh32
-rw-r--r--tools/testing/selftests/net/hwtstamp_config.c (renamed from tools/testing/selftests/networking/timestamping/hwtstamp_config.c)0
-rw-r--r--tools/testing/selftests/net/mptcp/.gitignore1
-rw-r--r--tools/testing/selftests/net/mptcp/Makefile7
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c71
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh24
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh357
-rwxr-xr-xtools/testing/selftests/net/mptcp/pm_netlink.sh130
-rw-r--r--tools/testing/selftests/net/mptcp/pm_nl_ctl.c616
-rw-r--r--tools/testing/selftests/net/reuseaddr_ports_exhausted.c162
-rwxr-xr-xtools/testing/selftests/net/reuseaddr_ports_exhausted.sh35
-rw-r--r--tools/testing/selftests/net/rxtimestamp.c (renamed from tools/testing/selftests/networking/timestamping/rxtimestamp.c)0
-rw-r--r--tools/testing/selftests/net/timestamping.c (renamed from tools/testing/selftests/networking/timestamping/timestamping.c)0
-rw-r--r--tools/testing/selftests/net/txtimestamp.c (renamed from tools/testing/selftests/networking/timestamping/txtimestamp.c)179
-rwxr-xr-xtools/testing/selftests/net/txtimestamp.sh (renamed from tools/testing/selftests/networking/timestamping/txtimestamp.sh)33
-rw-r--r--tools/testing/selftests/networking/timestamping/.gitignore4
-rw-r--r--tools/testing/selftests/networking/timestamping/Makefile11
-rw-r--r--tools/testing/selftests/networking/timestamping/config2
-rw-r--r--tools/testing/selftests/tc-testing/config6
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/basic.json902
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/red.json185
-rw-r--r--tools/testing/vsock/vsock_test.c77
1772 files changed, 113633 insertions, 26287 deletions
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index 287b98708a40..e043c9213388 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -67,7 +67,8 @@ two flavors of JITs, the newer eBPF JIT currently supported on:
- sparc64
- mips64
- s390x
- - riscv
+ - riscv64
+ - riscv32
And the older cBPF JIT supported on the following archs:
diff --git a/Documentation/bpf/bpf_devel_QA.rst b/Documentation/bpf/bpf_devel_QA.rst
index c9856b927055..38c15c6fcb14 100644
--- a/Documentation/bpf/bpf_devel_QA.rst
+++ b/Documentation/bpf/bpf_devel_QA.rst
@@ -20,11 +20,11 @@ Reporting bugs
Q: How do I report bugs for BPF kernel code?
--------------------------------------------
A: Since all BPF kernel development as well as bpftool and iproute2 BPF
-loader development happens through the netdev kernel mailing list,
+loader development happens through the bpf kernel mailing list,
please report any found issues around BPF to the following mailing
list:
- netdev@vger.kernel.org
+ bpf@vger.kernel.org
This may also include issues related to XDP, BPF tracing, etc.
@@ -46,17 +46,12 @@ Submitting patches
Q: To which mailing list do I need to submit my BPF patches?
------------------------------------------------------------
-A: Please submit your BPF patches to the netdev kernel mailing list:
+A: Please submit your BPF patches to the bpf kernel mailing list:
- netdev@vger.kernel.org
-
-Historically, BPF came out of networking and has always been maintained
-by the kernel networking community. Although these days BPF touches
-many other subsystems as well, the patches are still routed mainly
-through the networking community.
+ bpf@vger.kernel.org
In case your patch has changes in various different subsystems (e.g.
-tracing, security, etc), make sure to Cc the related kernel mailing
+networking, tracing, security, etc), make sure to Cc the related kernel mailing
lists and maintainers from there as well, so they are able to review
the changes and provide their Acked-by's to the patches.
@@ -168,7 +163,7 @@ a BPF point of view.
Be aware that this is not a final verdict that the patch will
automatically get accepted into net or net-next trees eventually:
-On the netdev kernel mailing list reviews can come in at any point
+On the bpf kernel mailing list reviews can come in at any point
in time. If discussions around a patch conclude that they cannot
get included as-is, we will either apply a follow-up fix or drop
them from the trees entirely. Therefore, we also reserve to rebase
@@ -494,15 +489,15 @@ A: You need cmake and gcc-c++ as build requisites for LLVM. Once you have
that set up, proceed with building the latest LLVM and clang version
from the git repositories::
- $ git clone http://llvm.org/git/llvm.git
- $ cd llvm/tools
- $ git clone --depth 1 http://llvm.org/git/clang.git
- $ cd ..; mkdir build; cd build
- $ cmake .. -DLLVM_TARGETS_TO_BUILD="BPF;X86" \
+ $ git clone https://github.com/llvm/llvm-project.git
+ $ mkdir -p llvm-project/llvm/build/install
+ $ cd llvm-project/llvm/build
+ $ cmake .. -G "Ninja" -DLLVM_TARGETS_TO_BUILD="BPF;X86" \
+ -DLLVM_ENABLE_PROJECTS="clang" \
-DBUILD_SHARED_LIBS=OFF \
-DCMAKE_BUILD_TYPE=Release \
-DLLVM_BUILD_RUNTIME=OFF
- $ make -j $(getconf _NPROCESSORS_ONLN)
+ $ ninja
The built binaries can then be found in the build/bin/ directory, where
you can point the PATH variable to.
diff --git a/Documentation/bpf/bpf_lsm.rst b/Documentation/bpf/bpf_lsm.rst
new file mode 100644
index 000000000000..1c0a75a51d79
--- /dev/null
+++ b/Documentation/bpf/bpf_lsm.rst
@@ -0,0 +1,142 @@
+.. SPDX-License-Identifier: GPL-2.0+
+.. Copyright (C) 2020 Google LLC.
+
+================
+LSM BPF Programs
+================
+
+These BPF programs allow runtime instrumentation of the LSM hooks by privileged
+users to implement system-wide MAC (Mandatory Access Control) and Audit
+policies using eBPF.
+
+Structure
+---------
+
+The example shows an eBPF program that can be attached to the ``file_mprotect``
+LSM hook:
+
+.. c:function:: int file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot);
+
+Other LSM hooks which can be instrumented can be found in
+``include/linux/lsm_hooks.h``.
+
+eBPF programs that use :doc:`/bpf/btf` do not need to include kernel headers
+for accessing information from the attached eBPF program's context. They can
+simply declare the structures in the eBPF program and only specify the fields
+that need to be accessed.
+
+.. code-block:: c
+
+ struct mm_struct {
+ unsigned long start_brk, brk, start_stack;
+ } __attribute__((preserve_access_index));
+
+ struct vm_area_struct {
+ unsigned long start_brk, brk, start_stack;
+ unsigned long vm_start, vm_end;
+ struct mm_struct *vm_mm;
+ } __attribute__((preserve_access_index));
+
+
+.. note:: The order of the fields is irrelevant.
+
+This can be further simplified (if one has access to the BTF information at
+build time) by generating the ``vmlinux.h`` with:
+
+.. code-block:: console
+
+ # bpftool btf dump file <path-to-btf-vmlinux> format c > vmlinux.h
+
+.. note:: ``path-to-btf-vmlinux`` can be ``/sys/kernel/btf/vmlinux`` if the
+ build environment matches the environment the BPF programs are
+ deployed in.
+
+The ``vmlinux.h`` can then simply be included in the BPF programs without
+requiring the definition of the types.
+
+The eBPF programs can be declared using the``BPF_PROG``
+macros defined in `tools/lib/bpf/bpf_tracing.h`_. In this
+example:
+
+ * ``"lsm/file_mprotect"`` indicates the LSM hook that the program must
+ be attached to
+ * ``mprotect_audit`` is the name of the eBPF program
+
+.. code-block:: c
+
+ SEC("lsm/file_mprotect")
+ int BPF_PROG(mprotect_audit, struct vm_area_struct *vma,
+ unsigned long reqprot, unsigned long prot, int ret)
+ {
+ /* ret is the return value from the previous BPF program
+ * or 0 if it's the first hook.
+ */
+ if (ret != 0)
+ return ret;
+
+ int is_heap;
+
+ is_heap = (vma->vm_start >= vma->vm_mm->start_brk &&
+ vma->vm_end <= vma->vm_mm->brk);
+
+ /* Return an -EPERM or write information to the perf events buffer
+ * for auditing
+ */
+ if (is_heap)
+ return -EPERM;
+ }
+
+The ``__attribute__((preserve_access_index))`` is a clang feature that allows
+the BPF verifier to update the offsets for the access at runtime using the
+:doc:`/bpf/btf` information. Since the BPF verifier is aware of the types, it
+also validates all the accesses made to the various types in the eBPF program.
+
+Loading
+-------
+
+eBPF programs can be loaded with the :manpage:`bpf(2)` syscall's
+``BPF_PROG_LOAD`` operation:
+
+.. code-block:: c
+
+ struct bpf_object *obj;
+
+ obj = bpf_object__open("./my_prog.o");
+ bpf_object__load(obj);
+
+This can be simplified by using a skeleton header generated by ``bpftool``:
+
+.. code-block:: console
+
+ # bpftool gen skeleton my_prog.o > my_prog.skel.h
+
+and the program can be loaded by including ``my_prog.skel.h`` and using
+the generated helper, ``my_prog__open_and_load``.
+
+Attachment to LSM Hooks
+-----------------------
+
+The LSM allows attachment of eBPF programs as LSM hooks using :manpage:`bpf(2)`
+syscall's ``BPF_RAW_TRACEPOINT_OPEN`` operation or more simply by
+using the libbpf helper ``bpf_program__attach_lsm``.
+
+The program can be detached from the LSM hook by *destroying* the ``link``
+link returned by ``bpf_program__attach_lsm`` using ``bpf_link__destroy``.
+
+One can also use the helpers generated in ``my_prog.skel.h`` i.e.
+``my_prog__attach`` for attachment and ``my_prog__destroy`` for cleaning up.
+
+Examples
+--------
+
+An example eBPF program can be found in
+`tools/testing/selftests/bpf/progs/lsm.c`_ and the corresponding
+userspace code in `tools/testing/selftests/bpf/prog_tests/test_lsm.c`_
+
+.. Links
+.. _tools/lib/bpf/bpf_tracing.h:
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/tools/lib/bpf/bpf_tracing.h
+.. _tools/testing/selftests/bpf/progs/lsm.c:
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/tools/testing/selftests/bpf/progs/lsm.c
+.. _tools/testing/selftests/bpf/prog_tests/test_lsm.c:
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/tools/testing/selftests/bpf/prog_tests/test_lsm.c
diff --git a/Documentation/bpf/drgn.rst b/Documentation/bpf/drgn.rst
new file mode 100644
index 000000000000..41f223c3161e
--- /dev/null
+++ b/Documentation/bpf/drgn.rst
@@ -0,0 +1,213 @@
+.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+==============
+BPF drgn tools
+==============
+
+drgn scripts is a convenient and easy to use mechanism to retrieve arbitrary
+kernel data structures. drgn is not relying on kernel UAPI to read the data.
+Instead it's reading directly from ``/proc/kcore`` or vmcore and pretty prints
+the data based on DWARF debug information from vmlinux.
+
+This document describes BPF related drgn tools.
+
+See `drgn/tools`_ for all tools available at the moment and `drgn/doc`_ for
+more details on drgn itself.
+
+bpf_inspect.py
+--------------
+
+Description
+===========
+
+`bpf_inspect.py`_ is a tool intended to inspect BPF programs and maps. It can
+iterate over all programs and maps in the system and print basic information
+about these objects, including id, type and name.
+
+The main use-case `bpf_inspect.py`_ covers is to show BPF programs of types
+``BPF_PROG_TYPE_EXT`` and ``BPF_PROG_TYPE_TRACING`` attached to other BPF
+programs via ``freplace``/``fentry``/``fexit`` mechanisms, since there is no
+user-space API to get this information.
+
+Getting started
+===============
+
+List BPF programs (full names are obtained from BTF)::
+
+ % sudo bpf_inspect.py prog
+ 27: BPF_PROG_TYPE_TRACEPOINT tracepoint__tcp__tcp_send_reset
+ 4632: BPF_PROG_TYPE_CGROUP_SOCK_ADDR tw_ipt_bind
+ 49464: BPF_PROG_TYPE_RAW_TRACEPOINT raw_tracepoint__sched_process_exit
+
+List BPF maps::
+
+ % sudo bpf_inspect.py map
+ 2577: BPF_MAP_TYPE_HASH tw_ipt_vips
+ 4050: BPF_MAP_TYPE_STACK_TRACE stack_traces
+ 4069: BPF_MAP_TYPE_PERCPU_ARRAY ned_dctcp_cntr
+
+Find BPF programs attached to BPF program ``test_pkt_access``::
+
+ % sudo bpf_inspect.py p | grep test_pkt_access
+ 650: BPF_PROG_TYPE_SCHED_CLS test_pkt_access
+ 654: BPF_PROG_TYPE_TRACING test_main linked:[650->25: BPF_TRAMP_FEXIT test_pkt_access->test_pkt_access()]
+ 655: BPF_PROG_TYPE_TRACING test_subprog1 linked:[650->29: BPF_TRAMP_FEXIT test_pkt_access->test_pkt_access_subprog1()]
+ 656: BPF_PROG_TYPE_TRACING test_subprog2 linked:[650->31: BPF_TRAMP_FEXIT test_pkt_access->test_pkt_access_subprog2()]
+ 657: BPF_PROG_TYPE_TRACING test_subprog3 linked:[650->21: BPF_TRAMP_FEXIT test_pkt_access->test_pkt_access_subprog3()]
+ 658: BPF_PROG_TYPE_EXT new_get_skb_len linked:[650->16: BPF_TRAMP_REPLACE test_pkt_access->get_skb_len()]
+ 659: BPF_PROG_TYPE_EXT new_get_skb_ifindex linked:[650->23: BPF_TRAMP_REPLACE test_pkt_access->get_skb_ifindex()]
+ 660: BPF_PROG_TYPE_EXT new_get_constant linked:[650->19: BPF_TRAMP_REPLACE test_pkt_access->get_constant()]
+
+It can be seen that there is a program ``test_pkt_access``, id 650 and there
+are multiple other tracing and ext programs attached to functions in
+``test_pkt_access``.
+
+For example the line::
+
+ 658: BPF_PROG_TYPE_EXT new_get_skb_len linked:[650->16: BPF_TRAMP_REPLACE test_pkt_access->get_skb_len()]
+
+, means that BPF program id 658, type ``BPF_PROG_TYPE_EXT``, name
+``new_get_skb_len`` replaces (``BPF_TRAMP_REPLACE``) function ``get_skb_len()``
+that has BTF id 16 in BPF program id 650, name ``test_pkt_access``.
+
+Getting help:
+
+.. code-block:: none
+
+ % sudo bpf_inspect.py
+ usage: bpf_inspect.py [-h] {prog,p,map,m} ...
+
+ drgn script to list BPF programs or maps and their properties
+ unavailable via kernel API.
+
+ See https://github.com/osandov/drgn/ for more details on drgn.
+
+ optional arguments:
+ -h, --help show this help message and exit
+
+ subcommands:
+ {prog,p,map,m}
+ prog (p) list BPF programs
+ map (m) list BPF maps
+
+Customization
+=============
+
+The script is intended to be customized by developers to print relevant
+information about BPF programs, maps and other objects.
+
+For example, to print ``struct bpf_prog_aux`` for BPF program id 53077:
+
+.. code-block:: none
+
+ % git diff
+ diff --git a/tools/bpf_inspect.py b/tools/bpf_inspect.py
+ index 650e228..aea2357 100755
+ --- a/tools/bpf_inspect.py
+ +++ b/tools/bpf_inspect.py
+ @@ -112,7 +112,9 @@ def list_bpf_progs(args):
+ if linked:
+ linked = f" linked:[{linked}]"
+
+ - print(f"{id_:>6}: {type_:32} {name:32} {linked}")
+ + if id_ == 53077:
+ + print(f"{id_:>6}: {type_:32} {name:32}")
+ + print(f"{bpf_prog.aux}")
+
+
+ def list_bpf_maps(args):
+
+It produces the output::
+
+ % sudo bpf_inspect.py p
+ 53077: BPF_PROG_TYPE_XDP tw_xdp_policer
+ *(struct bpf_prog_aux *)0xffff8893fad4b400 = {
+ .refcnt = (atomic64_t){
+ .counter = (long)58,
+ },
+ .used_map_cnt = (u32)1,
+ .max_ctx_offset = (u32)8,
+ .max_pkt_offset = (u32)15,
+ .max_tp_access = (u32)0,
+ .stack_depth = (u32)8,
+ .id = (u32)53077,
+ .func_cnt = (u32)0,
+ .func_idx = (u32)0,
+ .attach_btf_id = (u32)0,
+ .linked_prog = (struct bpf_prog *)0x0,
+ .verifier_zext = (bool)0,
+ .offload_requested = (bool)0,
+ .attach_btf_trace = (bool)0,
+ .func_proto_unreliable = (bool)0,
+ .trampoline_prog_type = (enum bpf_tramp_prog_type)BPF_TRAMP_FENTRY,
+ .trampoline = (struct bpf_trampoline *)0x0,
+ .tramp_hlist = (struct hlist_node){
+ .next = (struct hlist_node *)0x0,
+ .pprev = (struct hlist_node **)0x0,
+ },
+ .attach_func_proto = (const struct btf_type *)0x0,
+ .attach_func_name = (const char *)0x0,
+ .func = (struct bpf_prog **)0x0,
+ .jit_data = (void *)0x0,
+ .poke_tab = (struct bpf_jit_poke_descriptor *)0x0,
+ .size_poke_tab = (u32)0,
+ .ksym_tnode = (struct latch_tree_node){
+ .node = (struct rb_node [2]){
+ {
+ .__rb_parent_color = (unsigned long)18446612956263126665,
+ .rb_right = (struct rb_node *)0x0,
+ .rb_left = (struct rb_node *)0xffff88a0be3d0088,
+ },
+ {
+ .__rb_parent_color = (unsigned long)18446612956263126689,
+ .rb_right = (struct rb_node *)0x0,
+ .rb_left = (struct rb_node *)0xffff88a0be3d00a0,
+ },
+ },
+ },
+ .ksym_lnode = (struct list_head){
+ .next = (struct list_head *)0xffff88bf481830b8,
+ .prev = (struct list_head *)0xffff888309f536b8,
+ },
+ .ops = (const struct bpf_prog_ops *)xdp_prog_ops+0x0 = 0xffffffff820fa350,
+ .used_maps = (struct bpf_map **)0xffff889ff795de98,
+ .prog = (struct bpf_prog *)0xffffc9000cf2d000,
+ .user = (struct user_struct *)root_user+0x0 = 0xffffffff82444820,
+ .load_time = (u64)2408348759285319,
+ .cgroup_storage = (struct bpf_map *[2]){},
+ .name = (char [16])"tw_xdp_policer",
+ .security = (void *)0xffff889ff795d548,
+ .offload = (struct bpf_prog_offload *)0x0,
+ .btf = (struct btf *)0xffff8890ce6d0580,
+ .func_info = (struct bpf_func_info *)0xffff889ff795d240,
+ .func_info_aux = (struct bpf_func_info_aux *)0xffff889ff795de20,
+ .linfo = (struct bpf_line_info *)0xffff888a707afc00,
+ .jited_linfo = (void **)0xffff8893fad48600,
+ .func_info_cnt = (u32)1,
+ .nr_linfo = (u32)37,
+ .linfo_idx = (u32)0,
+ .num_exentries = (u32)0,
+ .extable = (struct exception_table_entry *)0xffffffffa032d950,
+ .stats = (struct bpf_prog_stats *)0x603fe3a1f6d0,
+ .work = (struct work_struct){
+ .data = (atomic_long_t){
+ .counter = (long)0,
+ },
+ .entry = (struct list_head){
+ .next = (struct list_head *)0x0,
+ .prev = (struct list_head *)0x0,
+ },
+ .func = (work_func_t)0x0,
+ },
+ .rcu = (struct callback_head){
+ .next = (struct callback_head *)0x0,
+ .func = (void (*)(struct callback_head *))0x0,
+ },
+ }
+
+
+.. Links
+.. _drgn/doc: https://drgn.readthedocs.io/en/latest/
+.. _drgn/tools: https://github.com/osandov/drgn/tree/master/tools
+.. _bpf_inspect.py:
+ https://github.com/osandov/drgn/blob/master/tools/bpf_inspect.py
diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst
index 4f5410b61441..f99677f3572f 100644
--- a/Documentation/bpf/index.rst
+++ b/Documentation/bpf/index.rst
@@ -45,14 +45,16 @@ Program types
prog_cgroup_sockopt
prog_cgroup_sysctl
prog_flow_dissector
+ bpf_lsm
-Testing BPF
-===========
+Testing and debugging BPF
+=========================
.. toctree::
:maxdepth: 1
+ drgn
s390
diff --git a/Documentation/devicetree/bindings/net/dsa/ocelot.txt b/Documentation/devicetree/bindings/net/dsa/ocelot.txt
new file mode 100644
index 000000000000..66a129fea705
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/ocelot.txt
@@ -0,0 +1,116 @@
+Microchip Ocelot switch driver family
+=====================================
+
+Felix
+-----
+
+The VSC9959 core is currently the only switch supported by the driver, and is
+found in the NXP LS1028A. It is a PCI device, part of the larger ENETC root
+complex. As a result, the ethernet-switch node is a sub-node of the PCIe root
+complex node and its "reg" property conforms to the parent node bindings:
+
+* reg: Specifies PCIe Device Number and Function Number of the endpoint device,
+ in this case for the Ethernet L2Switch it is PF5 (of device 0, bus 0).
+
+It does not require a "compatible" string.
+
+The interrupt line is used to signal availability of PTP TX timestamps and for
+TSN frame preemption.
+
+For the external switch ports, depending on board configuration, "phy-mode" and
+"phy-handle" are populated by board specific device tree instances. Ports 4 and
+5 are fixed as internal ports in the NXP LS1028A instantiation.
+
+The CPU port property ("ethernet") configures the feature called "NPI port" in
+the Ocelot hardware core. The CPU port in Ocelot is a set of queues, which are
+connected, in the Node Processor Interface (NPI) mode, to an Ethernet port.
+By default, in fsl-ls1028a.dtsi, the NPI port is assigned to the internal
+2.5Gbps port@4, but can be moved to the 1Gbps port@5, depending on the specific
+use case. Moving the NPI port to an external switch port is hardware possible,
+but there is no platform support for the Linux system on the LS1028A chip to
+operate as an entire slave DSA chip. NPI functionality (and therefore DSA
+tagging) is supported on a single port at a time.
+
+Any port can be disabled (and in fsl-ls1028a.dtsi, they are indeed all disabled
+by default, and should be enabled on a per-board basis). But if any external
+switch port is enabled at all, the ENETC PF2 (enetc_port2) should be enabled as
+well, regardless of whether it is configured as the DSA master or not. This is
+because the Felix PHYLINK implementation accesses the MAC PCS registers, which
+in hardware truly belong to the ENETC port #2 and not to Felix.
+
+Supported PHY interface types (appropriate SerDes protocol setting changes are
+needed in the RCW binary):
+
+* phy_mode = "internal": on ports 4 and 5
+* phy_mode = "sgmii": on ports 0, 1, 2, 3
+* phy_mode = "qsgmii": on ports 0, 1, 2, 3
+* phy_mode = "usxgmii": on ports 0, 1, 2, 3
+* phy_mode = "2500base-x": on ports 0, 1, 2, 3
+
+For the rest of the device tree binding definitions, which are standard DSA and
+PCI, refer to the following documents:
+
+Documentation/devicetree/bindings/net/dsa/dsa.txt
+Documentation/devicetree/bindings/pci/pci.txt
+
+Example:
+
+&soc {
+ pcie@1f0000000 { /* Integrated Endpoint Root Complex */
+ ethernet-switch@0,5 {
+ reg = <0x000500 0 0 0 0>;
+ /* IEP INT_B */
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* External ports */
+ port@0 {
+ reg = <0>;
+ label = "swp0";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "swp1";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "swp2";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "swp3";
+ };
+
+ /* Tagging CPU port */
+ port@4 {
+ reg = <4>;
+ ethernet = <&enetc_port2>;
+ phy-mode = "internal";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ };
+ };
+
+ /* Non-tagging CPU port */
+ port@5 {
+ reg = <5>;
+ phy-mode = "internal";
+ status = "disabled";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/marvell,mvusb.yaml b/Documentation/devicetree/bindings/net/marvell,mvusb.yaml
new file mode 100644
index 000000000000..9458f6659be1
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/marvell,mvusb.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/marvell,mvusb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell USB to MDIO Controller
+
+maintainers:
+ - Tobias Waldekranz <tobias@waldekranz.com>
+
+description: |+
+ This controller is mounted on development boards for Marvell's Link Street
+ family of Ethernet switches. It allows you to configure the switch's registers
+ using the standard MDIO interface.
+
+ Since the device is connected over USB, there is no strict requirement of
+ having a device tree representation of the device. But in order to use it with
+ the mv88e6xxx driver, you need a device tree node in which to place the switch
+ definition.
+
+allOf:
+ - $ref: "mdio.yaml#"
+
+properties:
+ compatible:
+ const: usb1286,1fa4
+ reg:
+ maxItems: 1
+ description: The USB port number on the host controller
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+
+examples:
+ - |
+ /* USB host controller */
+ &usb1 {
+ mvusb: mdio@1 {
+ compatible = "usb1286,1fa4";
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ /* MV88E6390X devboard */
+ &mvusb {
+ switch@0 {
+ compatible = "marvell,mv88e6190";
+ status = "ok";
+ reg = <0x0>;
+
+ ports {
+ /* Port definitions */
+ };
+
+ mdio {
+ /* PHY definitions */
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/qcom,ipa.yaml b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
new file mode 100644
index 000000000000..140f15245654
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
@@ -0,0 +1,198 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/qcom,ipa.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm IP Accelerator (IPA)
+
+maintainers:
+ - Alex Elder <elder@kernel.org>
+
+description:
+ This binding describes the Qualcomm IPA. The IPA is capable of offloading
+ certain network processing tasks (e.g. filtering, routing, and NAT) from
+ the main processor.
+
+ The IPA sits between multiple independent "execution environments,"
+ including the Application Processor (AP) and the modem. The IPA presents
+ a Generic Software Interface (GSI) to each execution environment.
+ The GSI is an integral part of the IPA, but it is logically isolated
+ and has a distinct interrupt and a separately-defined address space.
+
+ See also soc/qcom/qcom,smp2p.txt and interconnect/interconnect.txt.
+
+ - |
+ -------- ---------
+ | | | |
+ | AP +<---. .----+ Modem |
+ | +--. | | .->+ |
+ | | | | | | | |
+ -------- | | | | ---------
+ v | v |
+ --+-+---+-+--
+ | GSI |
+ |-----------|
+ | |
+ | IPA |
+ | |
+ -------------
+
+properties:
+ compatible:
+ const: "qcom,sdm845-ipa"
+
+ reg:
+ items:
+ - description: IPA registers
+ - description: IPA shared memory
+ - description: GSI registers
+
+ reg-names:
+ items:
+ - const: ipa-reg
+ - const: ipa-shared
+ - const: gsi
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: core
+
+ interrupts:
+ items:
+ - description: IPA interrupt (hardware IRQ)
+ - description: GSI interrupt (hardware IRQ)
+ - description: Modem clock query interrupt (smp2p interrupt)
+ - description: Modem setup ready interrupt (smp2p interrupt)
+
+ interrupt-names:
+ items:
+ - const: ipa
+ - const: gsi
+ - const: ipa-clock-query
+ - const: ipa-setup-ready
+
+ interconnects:
+ items:
+ - description: Interconnect path between IPA and main memory
+ - description: Interconnect path between IPA and internal memory
+ - description: Interconnect path between IPA and the AP subsystem
+
+ interconnect-names:
+ items:
+ - const: memory
+ - const: imem
+ - const: config
+
+ qcom,smem-states:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: State bits used in by the AP to signal the modem.
+ items:
+ - description: Whether the "ipa-clock-enabled" state bit is valid
+ - description: Whether the IPA clock is enabled (if valid)
+
+ qcom,smem-state-names:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/string-array
+ description: The names of the state bits used for SMP2P output
+ items:
+ - const: ipa-clock-enabled-valid
+ - const: ipa-clock-enabled
+
+ modem-init:
+ type: boolean
+ description:
+ If present, it indicates that the modem is responsible for
+ performing early IPA initialization, including loading and
+ validating firwmare used by the GSI.
+
+ modem-remoteproc:
+ $ref: /schemas/types.yaml#definitions/phandle
+ description:
+ This defines the phandle to the remoteproc node representing
+ the modem subsystem. This is requied so the IPA driver can
+ receive and act on notifications of modem up/down events.
+
+ memory-region:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+ description:
+ If present, a phandle for a reserved memory area that holds
+ the firmware passed to Trust Zone for authentication. Required
+ when Trust Zone (not the modem) performs early initialization.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+ - interconnects
+ - qcom,smem-states
+ - modem-remoteproc
+
+oneOf:
+ - required:
+ - modem-init
+ - required:
+ - memory-region
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/interconnect/qcom,sdm845.h>
+
+ smp2p-mpss {
+ compatible = "qcom,smp2p";
+ ipa_smp2p_out: ipa-ap-to-modem {
+ qcom,entry-name = "ipa";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ ipa_smp2p_in: ipa-modem-to-ap {
+ qcom,entry-name = "ipa";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+ ipa@1e40000 {
+ compatible = "qcom,sdm845-ipa";
+
+ modem-init;
+ modem-remoteproc = <&mss_pil>;
+
+ reg = <0 0x1e40000 0 0x7000>,
+ <0 0x1e47000 0 0x2000>,
+ <0 0x1e04000 0 0x2c000>;
+ reg-names = "ipa-reg",
+ "ipa-shared",
+ "gsi";
+
+ interrupts-extended = <&intc 0 311 IRQ_TYPE_EDGE_RISING>,
+ <&intc 0 432 IRQ_TYPE_LEVEL_HIGH>,
+ <&ipa_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&ipa_smp2p_in 1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ipa",
+ "gsi",
+ "ipa-clock-query",
+ "ipa-setup-ready";
+
+ clocks = <&rpmhcc RPMH_IPA_CLK>;
+ clock-names = "core";
+
+ interconnects =
+ <&rsc_hlos MASTER_IPA &rsc_hlos SLAVE_EBI1>,
+ <&rsc_hlos MASTER_IPA &rsc_hlos SLAVE_IMEM>,
+ <&rsc_hlos MASTER_APPSS_PROC &rsc_hlos SLAVE_IPA_CFG>;
+ interconnect-names = "memory",
+ "imem",
+ "config";
+
+ qcom,smem-states = <&ipa_smp2p_out 0>,
+ <&ipa_smp2p_out 1>;
+ qcom,smem-state-names = "ipa-clock-enabled-valid",
+ "ipa-clock-enabled";
+ };
diff --git a/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml b/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml
new file mode 100644
index 000000000000..b9f90081046f
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qcom,ipq8064-mdio.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/qcom,ipq8064-mdio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm ipq806x MDIO bus controller
+
+maintainers:
+ - Ansuel Smith <ansuelsmth@gmail.com>
+
+description:
+ The ipq806x soc have a MDIO dedicated controller that is
+ used to communicate with the gmac phy connected.
+
+allOf:
+ - $ref: "mdio.yaml#"
+
+properties:
+ compatible:
+ const: qcom,ipq8064-mdio
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - "#address-cells"
+ - "#size-cells"
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-ipq806x.h>
+
+ mdio0: mdio@37000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "qcom,ipq8064-mdio";
+ reg = <0x37000000 0x200000>;
+
+ clocks = <&gcc GMAC_CORE1_CLK>;
+
+ switch@10 {
+ compatible = "qca,qca8337";
+ /* ... */
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt b/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
index 999aceadb985..beca6466d59a 100644
--- a/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
+++ b/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
@@ -31,6 +31,7 @@ Optional properties for compatible string qcom,wcn399x-bt:
- max-speed: see Documentation/devicetree/bindings/serial/slave-device.txt
- firmware-name: specify the name of nvm firmware to load
+ - clocks: clock provided to the controller
Examples:
@@ -57,5 +58,6 @@ serial@898000 {
vddch0-supply = <&vreg_l25a_3p3>;
max-speed = <3200000>;
firmware-name = "crnv21.bin";
+ clocks = <&rpmhcc RPMH_RF_CLK2>;
};
};
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
new file mode 100644
index 000000000000..78bf511e2892
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
@@ -0,0 +1,225 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/ti,k3-am654-cpsw-nuss.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: The TI AM654x/J721E SoC Gigabit Ethernet MAC (Media Access Controller) Device Tree Bindings
+
+maintainers:
+ - Grygorii Strashko <grygorii.strashko@ti.com>
+ - Sekhar Nori <nsekhar@ti.com>
+
+description:
+ The TI AM654x/J721E SoC Gigabit Ethernet MAC (CPSW2G NUSS) has two ports
+ (one external) and provides Ethernet packet communication for the device.
+ CPSW2G NUSS features - the Reduced Gigabit Media Independent Interface (RGMII),
+ Reduced Media Independent Interface (RMII), the Management Data
+ Input/Output (MDIO) interface for physical layer device (PHY) management,
+ new version of Common Platform Time Sync (CPTS), updated Address Lookup
+ Engine (ALE).
+ One external Ethernet port (port 1) with selectable RGMII/RMII interfaces and
+ an internal Communications Port Programming Interface (CPPI5) (Host port 0).
+ Host Port 0 CPPI Packet Streaming Interface interface supports 8 TX channels
+ and one RX channels and operating by TI AM654x/J721E NAVSS Unified DMA
+ Peripheral Root Complex (UDMA-P) controller.
+ The CPSW2G NUSS is integrated into device MCU domain named MCU_CPSW0.
+
+ Additional features
+ priority level Quality Of Service (QOS) support (802.1p)
+ Support for Audio/Video Bridging (P802.1Qav/D6.0)
+ Support for IEEE 1588 Clock Synchronization (2008 Annex D, Annex E and Annex F)
+ Flow Control (802.3x) Support
+ Time Sensitive Network Support
+ IEEE P902.3br/D2.0 Interspersing Express Traffic
+ IEEE 802.1Qbv/D2.2 Enhancements for Scheduled Traffic
+ Configurable number of addresses plus VLANs
+ Configurable number of classifier/policers
+ VLAN support, 802.1Q compliant, Auto add port VLAN for untagged frames on
+ ingress, Auto VLAN removal on egress and auto pad to minimum frame size.
+ RX/TX csum offload
+
+ Specifications can be found at
+ http://www.ti.com/lit/ug/spruid7e/spruid7e.pdf
+ http://www.ti.com/lit/ug/spruil1a/spruil1a.pdf
+
+properties:
+ "#address-cells": true
+ "#size-cells": true
+
+ compatible:
+ oneOf:
+ - const: ti,am654-cpsw-nuss
+ - const: ti,j721e-cpsw-nuss
+
+ reg:
+ maxItems: 1
+ description:
+ The physical base address and size of full the CPSW2G NUSS IO range
+
+ reg-names:
+ items:
+ - const: cpsw_nuss
+
+ ranges: true
+
+ dma-coherent: true
+
+ clocks:
+ description: CPSW2G NUSS functional clock
+
+ clock-names:
+ items:
+ - const: fck
+
+ power-domains:
+ maxItems: 1
+
+ dmas:
+ maxItems: 9
+
+ dma-names:
+ items:
+ - const: tx0
+ - const: tx1
+ - const: tx2
+ - const: tx3
+ - const: tx4
+ - const: tx5
+ - const: tx6
+ - const: tx7
+ - const: rx
+
+ ethernet-ports:
+ type: object
+ properties:
+ '#address-cells':
+ const: 1
+ '#size-cells':
+ const: 0
+
+ patternProperties:
+ port@1:
+ type: object
+ description: CPSW2G NUSS external ports
+
+ allOf:
+ - $ref: ethernet-controller.yaml#
+
+ properties:
+ reg:
+ items:
+ - const: 1
+ description: CPSW port number
+
+ phys:
+ maxItems: 1
+ description: phandle on phy-gmii-sel PHY
+
+ label:
+ description: label associated with this port
+
+ ti,mac-only:
+ $ref: /schemas/types.yaml#definitions/flag
+ description:
+ Specifies the port works in mac-only mode.
+
+ ti,syscon-efuse:
+ $ref: /schemas/types.yaml#definitions/phandle-array
+ description:
+ Phandle to the system control device node which provides access
+ to efuse IO range with MAC addresses
+
+ required:
+ - reg
+ - phys
+
+ additionalProperties: false
+
+patternProperties:
+ "^mdio@[0-9a-f]+$":
+ type: object
+ allOf:
+ - $ref: "ti,davinci-mdio.yaml#"
+ description:
+ CPSW MDIO bus.
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - ranges
+ - clocks
+ - clock-names
+ - power-domains
+ - dmas
+ - dma-names
+ - '#address-cells'
+ - '#size-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/pinctrl/k3.h>
+ #include <dt-bindings/soc/ti,sci_pm_domain.h>
+ #include <dt-bindings/net/ti-dp83867.h>
+
+ mcu_cpsw: ethernet@46000000 {
+ compatible = "ti,am654-cpsw-nuss";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ reg = <0x0 0x46000000 0x0 0x200000>;
+ reg-names = "cpsw_nuss";
+ ranges = <0x0 0x0 0x46000000 0x0 0x200000>;
+ dma-coherent;
+ clocks = <&k3_clks 5 10>;
+ clock-names = "fck";
+ power-domains = <&k3_pds 5 TI_SCI_PD_EXCLUSIVE>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_cpsw_pins_default &mcu_mdio_pins_default>;
+
+ dmas = <&mcu_udmap 0xf000>,
+ <&mcu_udmap 0xf001>,
+ <&mcu_udmap 0xf002>,
+ <&mcu_udmap 0xf003>,
+ <&mcu_udmap 0xf004>,
+ <&mcu_udmap 0xf005>,
+ <&mcu_udmap 0xf006>,
+ <&mcu_udmap 0xf007>,
+ <&mcu_udmap 0x7000>;
+ dma-names = "tx0", "tx1", "tx2", "tx3", "tx4", "tx5", "tx6", "tx7",
+ "rx";
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpsw_port1: port@1 {
+ reg = <1>;
+ ti,mac-only;
+ label = "port1";
+ ti,syscon-efuse = <&mcu_conf 0x200>;
+ phys = <&phy_gmii_sel 1>;
+
+ phy-mode = "rgmii-rxid";
+ phy-handle = <&phy0>;
+ };
+ };
+
+ davinci_mdio: mdio@f00 {
+ compatible = "ti,cpsw-mdio","ti,davinci_mdio";
+ reg = <0x0 0xf00 0x0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&k3_clks 5 10>;
+ clock-names = "fck";
+ bus_freq = <1000000>;
+
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
index 7e675dafc256..3a76d8faaaed 100644
--- a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
+++ b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
@@ -4,17 +4,27 @@ This node provides properties for configuring the MediaTek mt76xx wireless
device. The node is expected to be specified as a child node of the PCI
controller to which the wireless chip is connected.
-Alternatively, it can specify the wireless part of the MT7628/MT7688 SoC.
-For SoC, use the compatible string "mediatek,mt7628-wmac" and the following
-properties:
+Alternatively, it can specify the wireless part of the MT7628/MT7688 or
+MT7622 SoC. For SoC, use the following compatible strings:
+
+compatible:
+- "mediatek,mt7628-wmac" for MT7628/MT7688
+- "mediatek,mt7622-wmac" for MT7622
+properties:
- reg: Address and length of the register set for the device.
- interrupts: Main device interrupt
+MT7622 specific properties:
+- power-domains: phandle to the power domain that the WMAC is part of
+- mediatek,infracfg: phandle to the infrastructure bus fabric syscon node
+
Optional properties:
- ieee80211-freq-limit: See ieee80211.txt
- mediatek,mtd-eeprom: Specify a MTD partition + offset containing EEPROM data
+- big-endian: if the radio eeprom partition is written in big-endian, specify
+ this property
The MAC address can as well be set with corresponding optional properties
defined in net/ethernet.txt.
@@ -31,6 +41,7 @@ Optional nodes:
reg = <0x0000 0 0 0 0>;
ieee80211-freq-limit = <5000000 6000000>;
mediatek,mtd-eeprom = <&factory 0x8000>;
+ big-endian;
led {
led-sources = <2>;
@@ -50,3 +61,15 @@ wmac: wmac@10300000 {
mediatek,mtd-eeprom = <&factory 0x0000>;
};
+
+MT7622 example:
+
+wmac: wmac@18000000 {
+ compatible = "mediatek,mt7622-wmac";
+ reg = <0 0x18000000 0 0x100000>;
+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_LOW>;
+
+ mediatek,infracfg = <&infracfg>;
+
+ power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
+};
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
index 616c87746d6f..71bf91f97386 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
@@ -91,6 +91,11 @@ Optional properties:
- qcom,msa-fixed-perm: Boolean context flag to disable SCM call for statically
mapped msa region.
+- qcom,coexist-support : should contain eithr "0" or "1" to indicate coex
+ support by the hardware.
+- qcom,coexist-gpio-pin : gpio pin number information to support coex
+ which will be used by wifi firmware.
+
Example (to supply PCI based wifi block details):
In this example, the node is defined as child node of the PCI controller.
@@ -159,6 +164,8 @@ wifi0: wifi@a000000 {
qcom,msi_addr = <0x0b006040>;
qcom,msi_base = <0x40>;
qcom,ath10k-pre-calibration-data = [ 01 02 03 ... ];
+ qcom,coexist-support = <1>;
+ qcom,coexist-gpio-pin = <0x33>;
};
Example (to supply wcn3990 SoC wifi block details):
diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt b/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
index f38950560982..88fd28d15eac 100644
--- a/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
+++ b/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
@@ -9,11 +9,12 @@ Required properties:
- spi-max-frequency : Maximum SPI clocking speed of device in Hz
- interrupts : Should contain interrupt line
- vio-supply : phandle to regulator providing VIO
-- ti,power-gpio : GPIO connected to chip's PMEN pin
Optional properties:
- ti,wl1251-has-eeprom : boolean, the wl1251 has an eeprom connected, which
provides configuration data (calibration, MAC, ...)
+- ti,power-gpio : GPIO connected to chip's PMEN pin if operated in
+ SPI mode
- Please consult Documentation/devicetree/bindings/spi/spi-bus.txt
for optional SPI connection related properties,
diff --git a/Documentation/devicetree/bindings/ptp/ptp-idt82p33.yaml b/Documentation/devicetree/bindings/ptp/ptp-idt82p33.yaml
new file mode 100644
index 000000000000..9bc664f414a1
--- /dev/null
+++ b/Documentation/devicetree/bindings/ptp/ptp-idt82p33.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ptp/ptp-idt82p33.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: IDT 82P33 PTP Clock Device Tree Bindings
+
+description: |
+ IDT 82P33XXX Synchronization Management Unit (SMU) based PTP clock
+
+maintainers:
+ - Min Li <min.li.xe@renesas.com>
+
+properties:
+ compatible:
+ enum:
+ - idt,82p33810
+ - idt,82p33813
+ - idt,82p33814
+ - idt,82p33831
+ - idt,82p33910
+ - idt,82p33913
+ - idt,82p33914
+ - idt,82p33931
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phc@51 {
+ compatible = "idt,82p33810";
+ reg = <0x51>;
+ };
+ };
diff --git a/Documentation/networking/6lowpan.txt b/Documentation/networking/6lowpan.rst
index 2e5a939d7e6f..e70a6520cc33 100644
--- a/Documentation/networking/6lowpan.txt
+++ b/Documentation/networking/6lowpan.rst
@@ -1,37 +1,40 @@
+.. SPDX-License-Identifier: GPL-2.0
-Netdev private dataroom for 6lowpan interfaces:
+==============================================
+Netdev private dataroom for 6lowpan interfaces
+==============================================
All 6lowpan able net devices, means all interfaces with ARPHRD_6LOWPAN,
must have "struct lowpan_priv" placed at beginning of netdev_priv.
-The priv_size of each interface should be calculate by:
+The priv_size of each interface should be calculate by::
dev->priv_size = LOWPAN_PRIV_SIZE(LL_6LOWPAN_PRIV_DATA);
Where LL_PRIV_6LOWPAN_DATA is sizeof linklayer 6lowpan private data struct.
-To access the LL_PRIV_6LOWPAN_DATA structure you can cast:
+To access the LL_PRIV_6LOWPAN_DATA structure you can cast::
lowpan_priv(dev)-priv;
to your LL_6LOWPAN_PRIV_DATA structure.
-Before registering the lowpan netdev interface you must run:
+Before registering the lowpan netdev interface you must run::
lowpan_netdev_setup(dev, LOWPAN_LLTYPE_FOOBAR);
wheres LOWPAN_LLTYPE_FOOBAR is a define for your 6LoWPAN linklayer type of
enum lowpan_lltypes.
-Example to evaluate the private usually you can do:
+Example to evaluate the private usually you can do::
-static inline struct lowpan_priv_foobar *
-lowpan_foobar_priv(struct net_device *dev)
-{
+ static inline struct lowpan_priv_foobar *
+ lowpan_foobar_priv(struct net_device *dev)
+ {
return (struct lowpan_priv_foobar *)lowpan_priv(dev)->priv;
-}
+ }
-switch (dev->type) {
-case ARPHRD_6LOWPAN:
+ switch (dev->type) {
+ case ARPHRD_6LOWPAN:
lowpan_priv = lowpan_priv(dev);
/* do great stuff which is ARPHRD_6LOWPAN related */
switch (lowpan_priv->lltype) {
@@ -42,8 +45,8 @@ case ARPHRD_6LOWPAN:
...
}
break;
-...
-}
+ ...
+ }
In case of generic 6lowpan branch ("net/6lowpan") you can remove the check
on ARPHRD_6LOWPAN, because you can be sure that these function are called
diff --git a/Documentation/networking/bareudp.rst b/Documentation/networking/bareudp.rst
new file mode 100644
index 000000000000..465a8b251bfe
--- /dev/null
+++ b/Documentation/networking/bareudp.rst
@@ -0,0 +1,52 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+========================================
+Bare UDP Tunnelling Module Documentation
+========================================
+
+There are various L3 encapsulation standards using UDP being discussed to
+leverage the UDP based load balancing capability of different networks.
+MPLSoUDP (__ https://tools.ietf.org/html/rfc7510) is one among them.
+
+The Bareudp tunnel module provides a generic L3 encapsulation tunnelling
+support for tunnelling different L3 protocols like MPLS, IP, NSH etc. inside
+a UDP tunnel.
+
+Special Handling
+----------------
+The bareudp device supports special handling for MPLS & IP as they can have
+multiple ethertypes.
+MPLS procotcol can have ethertypes ETH_P_MPLS_UC (unicast) & ETH_P_MPLS_MC (multicast).
+IP protocol can have ethertypes ETH_P_IP (v4) & ETH_P_IPV6 (v6).
+This special handling can be enabled only for ethertypes ETH_P_IP & ETH_P_MPLS_UC
+with a flag called multiproto mode.
+
+Usage
+------
+
+1) Device creation & deletion
+
+ a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847.
+
+ This creates a bareudp tunnel device which tunnels L3 traffic with ethertype
+ 0x8847 (MPLS traffic). The destination port of the UDP header will be set to
+ 6635.The device will listen on UDP port 6635 to receive traffic.
+
+ b) ip link delete bareudp0
+
+2) Device creation with multiple proto mode enabled
+
+There are two ways to create a bareudp device for MPLS & IP with multiproto mode
+enabled.
+
+ a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847 multiproto
+
+ b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls
+
+3) Device Usage
+
+The bareudp device could be used along with OVS or flower filter in TC.
+The OVS or TC flower layer must set the tunnel information in SKB dst field before
+sending packet buffer to the bareudp device for transmission. On reception the
+bareudp device extracts and stores the tunnel information in SKB dst field before
+passing the packet buffer to the network stack.
diff --git a/Documentation/networking/device_drivers/mellanox/mlx5.rst b/Documentation/networking/device_drivers/mellanox/mlx5.rst
index f575a49790e8..e9b65035cd47 100644
--- a/Documentation/networking/device_drivers/mellanox/mlx5.rst
+++ b/Documentation/networking/device_drivers/mellanox/mlx5.rst
@@ -101,7 +101,7 @@ Enabling the driver and kconfig options
**External options** ( Choose if the corresponding mlx5 feature is required )
- CONFIG_PTP_1588_CLOCK: When chosen, mlx5 ptp support will be enabled
-- CONFIG_VXLAN: When chosen, mlx5 vxaln support will be enabled.
+- CONFIG_VXLAN: When chosen, mlx5 vxlan support will be enabled.
- CONFIG_MLXFW: When chosen, mlx5 firmware flashing support will be enabled (via devlink and ethtool).
Devlink info
diff --git a/Documentation/networking/device_drivers/stmicro/stmmac.rst b/Documentation/networking/device_drivers/stmicro/stmmac.rst
index c34bab3d2df0..5d46e5036129 100644
--- a/Documentation/networking/device_drivers/stmicro/stmmac.rst
+++ b/Documentation/networking/device_drivers/stmicro/stmmac.rst
@@ -32,7 +32,8 @@ is also supported.
DesignWare(R) Cores Ethernet MAC 10/100/1000 Universal version 3.70a
(and older) and DesignWare(R) Cores Ethernet Quality-of-Service version 4.0
(and upper) have been used for developing this driver as well as
-DesignWare(R) Cores XGMAC - 10G Ethernet MAC.
+DesignWare(R) Cores XGMAC - 10G Ethernet MAC and DesignWare(R) Cores
+Enterprise MAC - 100G Ethernet MAC.
This driver supports both the platform bus and PCI.
@@ -48,6 +49,8 @@ Cores Ethernet Controllers and corresponding minimum and maximum versions:
+-------------------------------+--------------+--------------+--------------+
| XGMAC - 10G Ethernet MAC | 2.10a | N/A | XGMAC2+ |
+-------------------------------+--------------+--------------+--------------+
+| XLGMAC - 100G Ethernet MAC | 2.00a | N/A | XLGMAC2+ |
++-------------------------------+--------------+--------------+--------------+
For questions related to hardware requirements, refer to the documentation
supplied with your Ethernet adapter. All hardware requirements listed apply
@@ -57,7 +60,7 @@ Feature List
============
The following features are available in this driver:
- - GMII/MII/RGMII/SGMII/RMII/XGMII Interface
+ - GMII/MII/RGMII/SGMII/RMII/XGMII/XLGMII Interface
- Half-Duplex / Full-Duplex Operation
- Energy Efficient Ethernet (EEE)
- IEEE 802.3x PAUSE Packets (Flow Control)
diff --git a/Documentation/networking/devlink/bnxt.rst b/Documentation/networking/devlink/bnxt.rst
index 82ef9ec46707..3dfd84ccb1c7 100644
--- a/Documentation/networking/devlink/bnxt.rst
+++ b/Documentation/networking/devlink/bnxt.rst
@@ -51,6 +51,9 @@ The ``bnxt_en`` driver reports the following versions
* - Name
- Type
- Description
+ * - ``board.id``
+ - fixed
+ - Part number identifying the board design
* - ``asic.id``
- fixed
- ASIC design identifier
@@ -63,12 +66,15 @@ The ``bnxt_en`` driver reports the following versions
* - ``fw``
- stored, running
- Overall board firmware version
- * - ``fw.app``
- - stored, running
- - Data path firmware version
* - ``fw.mgmt``
- stored, running
- - Management firmware version
+ - NIC hardware resource management firmware version
+ * - ``fw.mgmt.api``
+ - running
+ - Minimum firmware interface spec version supported between driver and firmware
+ * - ``fw.nsci``
+ - stored, running
+ - General platform management firmware version
* - ``fw.roce``
- stored, running
- RoCE management firmware version
diff --git a/Documentation/networking/devlink/devlink-flash.rst b/Documentation/networking/devlink/devlink-flash.rst
new file mode 100644
index 000000000000..40a87c0222cb
--- /dev/null
+++ b/Documentation/networking/devlink/devlink-flash.rst
@@ -0,0 +1,93 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+.. _devlink_flash:
+
+=============
+Devlink Flash
+=============
+
+The ``devlink-flash`` API allows updating device firmware. It replaces the
+older ``ethtool-flash`` mechanism, and doesn't require taking any
+networking locks in the kernel to perform the flash update. Example use::
+
+ $ devlink dev flash pci/0000:05:00.0 file flash-boot.bin
+
+Note that the file name is a path relative to the firmware loading path
+(usually ``/lib/firmware/``). Drivers may send status updates to inform
+user space about the progress of the update operation.
+
+Firmware Loading
+================
+
+Devices which require firmware to operate usually store it in non-volatile
+memory on the board, e.g. flash. Some devices store only basic firmware on
+the board, and the driver loads the rest from disk during probing.
+``devlink-info`` allows users to query firmware information (loaded
+components and versions).
+
+In other cases the device can both store the image on the board, load from
+disk, or automatically flash a new image from disk. The ``fw_load_policy``
+devlink parameter can be used to control this behavior
+(:ref:`Documentation/networking/devlink/devlink-params.rst <devlink_params_generic>`).
+
+On-disk firmware files are usually stored in ``/lib/firmware/``.
+
+Firmware Version Management
+===========================
+
+Drivers are expected to implement ``devlink-flash`` and ``devlink-info``
+functionality, which together allow for implementing vendor-independent
+automated firmware update facilities.
+
+``devlink-info`` exposes the ``driver`` name and three version groups
+(``fixed``, ``running``, ``stored``).
+
+The ``driver`` attribute and ``fixed`` group identify the specific device
+design, e.g. for looking up applicable firmware updates. This is why
+``serial_number`` is not part of the ``fixed`` versions (even though it
+is fixed) - ``fixed`` versions should identify the design, not a single
+device.
+
+``running`` and ``stored`` firmware versions identify the firmware running
+on the device, and firmware which will be activated after reboot or device
+reset.
+
+The firmware update agent is supposed to be able to follow this simple
+algorithm to update firmware contents, regardless of the device vendor:
+
+.. code-block:: sh
+
+ # Get unique HW design identifier
+ $hw_id = devlink-dev-info['fixed']
+
+ # Find out which FW flash we want to use for this NIC
+ $want_flash_vers = some-db-backed.lookup($hw_id, 'flash')
+
+ # Update flash if necessary
+ if $want_flash_vers != devlink-dev-info['stored']:
+ $file = some-db-backed.download($hw_id, 'flash')
+ devlink-dev-flash($file)
+
+ # Find out the expected overall firmware versions
+ $want_fw_vers = some-db-backed.lookup($hw_id, 'all')
+
+ # Update on-disk file if necessary
+ if $want_fw_vers != devlink-dev-info['running']:
+ $file = some-db-backed.download($hw_id, 'disk')
+ write($file, '/lib/firmware/')
+
+ # Try device reset, if available
+ if $want_fw_vers != devlink-dev-info['running']:
+ devlink-reset()
+
+ # Reboot, if reset wasn't enough
+ if $want_fw_vers != devlink-dev-info['running']:
+ reboot()
+
+Note that each reference to ``devlink-dev-info`` in this pseudo-code
+is expected to fetch up-to-date information from the kernel.
+
+For the convenience of identifying firmware files some vendors add
+``bundle_id`` information to the firmware versions. This meta-version covers
+multiple per-component versions and can be used e.g. in firmware file names
+(all component versions could get rather long.)
diff --git a/Documentation/networking/devlink/devlink-info.rst b/Documentation/networking/devlink/devlink-info.rst
index 70981dd1b981..3fe11401b838 100644
--- a/Documentation/networking/devlink/devlink-info.rst
+++ b/Documentation/networking/devlink/devlink-info.rst
@@ -5,34 +5,119 @@ Devlink Info
============
The ``devlink-info`` mechanism enables device drivers to report device
-information in a generic fashion. It is extensible, and enables exporting
-even device or driver specific information.
+(hardware and firmware) information in a standard, extensible fashion.
-devlink supports representing the following types of versions
+The original motivation for the ``devlink-info`` API was twofold:
-.. list-table:: List of version types
+ - making it possible to automate device and firmware management in a fleet
+ of machines in a vendor-independent fashion (see also
+ :ref:`Documentation/networking/devlink/devlink-flash.rst <devlink_flash>`);
+ - name the per component FW versions (as opposed to the crowded ethtool
+ version string).
+
+``devlink-info`` supports reporting multiple types of objects. Reporting driver
+versions is generally discouraged - here, and via any other Linux API.
+
+.. list-table:: List of top level info objects
:widths: 5 95
- * - Type
+ * - Name
- Description
+ * - ``driver``
+ - Name of the currently used device driver, also available through sysfs.
+
+ * - ``serial_number``
+ - Serial number of the device.
+
+ This is usually the serial number of the ASIC, also often available
+ in PCI config space of the device in the *Device Serial Number*
+ capability.
+
+ The serial number should be unique per physical device.
+ Sometimes the serial number of the device is only 48 bits long (the
+ length of the Ethernet MAC address), and since PCI DSN is 64 bits long
+ devices pad or encode additional information into the serial number.
+ One example is adding port ID or PCI interface ID in the extra two bytes.
+ Drivers should make sure to strip or normalize any such padding
+ or interface ID, and report only the part of the serial number
+ which uniquely identifies the hardware. In other words serial number
+ reported for two ports of the same device or on two hosts of
+ a multi-host device should be identical.
+
+ .. note:: ``devlink-info`` API should be extended with a new field
+ if devices want to report board/product serial number (often
+ reported in PCI *Vital Product Data* capability).
+
* - ``fixed``
- - Represents fixed versions, which cannot change. For example,
+ - Group for hardware identifiers, and versions of components
+ which are not field-updatable.
+
+ Versions in this section identify the device design. For example,
component identifiers or the board version reported in the PCI VPD.
+ Data in ``devlink-info`` should be broken into the smallest logical
+ components, e.g. PCI VPD may concatenate various information
+ to form the Part Number string, while in ``devlink-info`` all parts
+ should be reported as separate items.
+
+ This group must not contain any frequently changing identifiers,
+ such as serial numbers. See
+ :ref:`Documentation/networking/devlink/devlink-flash.rst <devlink_flash>`
+ to understand why.
+
* - ``running``
- - Represents the version of the currently running component. For
- example the running version of firmware. These versions generally
- only update after a reboot.
+ - Group for information about currently running software/firmware.
+ These versions often only update after a reboot, sometimes device reset.
+
* - ``stored``
- - Represents the version of a component as stored, such as after a
- flash update. Stored values should update to reflect changes in the
- flash even if a reboot has not yet occurred.
+ - Group for software/firmware versions in device flash.
+
+ Stored values must update to reflect changes in the flash even
+ if reboot has not yet occurred. If device is not capable of updating
+ ``stored`` versions when new software is flashed, it must not report
+ them.
+
+Each version can be reported at most once in each version group. Firmware
+components stored on the flash should feature in both the ``running`` and
+``stored`` sections, if device is capable of reporting ``stored`` versions
+(see :ref:`Documentation/networking/devlink/devlink-flash.rst <devlink_flash>`).
+In case software/firmware components are loaded from the disk (e.g.
+``/lib/firmware``) only the running version should be reported via
+the kernel API.
Generic Versions
================
It is expected that drivers use the following generic names for exporting
-version information. Other information may be exposed using driver-specific
-names, but these should be documented in the driver-specific file.
+version information. If a generic name for a given component doesn't exist yet,
+driver authors should consult existing driver-specific versions and attempt
+reuse. As last resort, if a component is truly unique, using driver-specific
+names is allowed, but these should be documented in the driver-specific file.
+
+All versions should try to use the following terminology:
+
+.. list-table:: List of common version suffixes
+ :widths: 10 90
+
+ * - Name
+ - Description
+ * - ``id``, ``revision``
+ - Identifiers of designs and revision, mostly used for hardware versions.
+
+ * - ``api``
+ - Version of API between components. API items are usually of limited
+ value to the user, and can be inferred from other versions by the vendor,
+ so adding API versions is generally discouraged as noise.
+
+ * - ``bundle_id``
+ - Identifier of a distribution package which was flashed onto the device.
+ This is an attribute of a firmware package which covers multiple versions
+ for ease of managing firmware images (see
+ :ref:`Documentation/networking/devlink/devlink-flash.rst <devlink_flash>`).
+
+ ``bundle_id`` can appear in both ``running`` and ``stored`` versions,
+ but it must not be reported if any of the components covered by the
+ ``bundle_id`` was changed and no longer matches the version from
+ the bundle.
board.id
--------
@@ -52,7 +137,7 @@ ASIC design identifier.
asic.rev
--------
-ASIC design revision.
+ASIC design revision/stepping.
board.manufacture
-----------------
@@ -72,6 +157,12 @@ Control unit firmware version. This firmware is responsible for house
keeping tasks, PHY control etc. but not the packet-by-packet data path
operation.
+fw.mgmt.api
+-----------
+
+Firmware interface specification version of the software interfaces between
+driver and firmware.
+
fw.app
------
@@ -91,10 +182,31 @@ Network Controller Sideband Interface.
fw.psid
-------
-Unique identifier of the firmware parameter set.
+Unique identifier of the firmware parameter set. These are usually
+parameters of a particular board, defined at manufacturing time.
fw.roce
-------
RoCE firmware version which is responsible for handling roce
management.
+
+fw.bundle_id
+------------
+
+Unique identifier of the entire firmware bundle.
+
+Future work
+===========
+
+The following extensions could be useful:
+
+ - product serial number - NIC boards often get labeled with a board serial
+ number rather than ASIC serial number; it'd be useful to add board serial
+ numbers to the API if they can be retrieved from the device;
+
+ - on-disk firmware file names - drivers list the file names of firmware they
+ may need to load onto devices via the ``MODULE_FIRMWARE()`` macro. These,
+ however, are per module, rather than per device. It'd be useful to list
+ the names of firmware files the driver will try to load for a given device,
+ in order of priority.
diff --git a/Documentation/networking/devlink/devlink-params.rst b/Documentation/networking/devlink/devlink-params.rst
index da2f85c0fa21..d075fd090b3d 100644
--- a/Documentation/networking/devlink/devlink-params.rst
+++ b/Documentation/networking/devlink/devlink-params.rst
@@ -41,6 +41,8 @@ In order for ``driverinit`` parameters to take effect, the driver must
support reloading via the ``devlink-reload`` command. This command will
request a reload of the device driver.
+.. _devlink_params_generic:
+
Generic configuration parameters
================================
The following is a list of generic configuration parameters that drivers may
diff --git a/Documentation/networking/devlink/devlink-region.rst b/Documentation/networking/devlink/devlink-region.rst
index 8b46e8591fe0..04e04d1ff627 100644
--- a/Documentation/networking/devlink/devlink-region.rst
+++ b/Documentation/networking/devlink/devlink-region.rst
@@ -20,6 +20,11 @@ address regions that are otherwise inaccessible to the user.
Regions may also be used to provide an additional way to debug complex error
states, but see also :doc:`devlink-health`
+Regions may optionally support capturing a snapshot on demand via the
+``DEVLINK_CMD_REGION_NEW`` netlink message. A driver wishing to allow
+requested snapshots must implement the ``.snapshot`` callback for the region
+in its ``devlink_region_ops`` structure.
+
example usage
-------------
@@ -29,8 +34,7 @@ example usage
$ devlink region show [ DEV/REGION ]
$ devlink region del DEV/REGION snapshot SNAPSHOT_ID
$ devlink region dump DEV/REGION [ snapshot SNAPSHOT_ID ]
- $ devlink region read DEV/REGION [ snapshot SNAPSHOT_ID ]
- address ADDRESS length length
+ $ devlink region read DEV/REGION [ snapshot SNAPSHOT_ID ] address ADDRESS length length
# Show all of the exposed regions with region sizes:
$ devlink region show
@@ -40,6 +44,9 @@ example usage
# Delete a snapshot using:
$ devlink region del pci/0000:00:05.0/cr-space snapshot 1
+ # Request an immediate snapshot, if supported by the region
+ $ devlink region new pci/0000:00:05.0/cr-space snapshot 5
+
# Dump a snapshot:
$ devlink region dump pci/0000:00:05.0/fw-health snapshot 1
0000000000000000 0014 95dc 0014 9514 0035 1670 0034 db30
@@ -48,8 +55,7 @@ example usage
0000000000000030 bada cce5 bada cce5 bada cce5 bada cce5
# Read a specific part of a snapshot:
- $ devlink region read pci/0000:00:05.0/fw-health snapshot 1 address 0
- length 16
+ $ devlink region read pci/0000:00:05.0/fw-health snapshot 1 address 0 length 16
0000000000000000 0014 95dc 0014 9514 0035 1670 0034 db30
As regions are likely very device or driver specific, no generic regions are
diff --git a/Documentation/networking/devlink/devlink-trap.rst b/Documentation/networking/devlink/devlink-trap.rst
index 47a429bb8658..a09971c2115c 100644
--- a/Documentation/networking/devlink/devlink-trap.rst
+++ b/Documentation/networking/devlink/devlink-trap.rst
@@ -238,6 +238,12 @@ be added to the following table:
- ``drop``
- Traps NVE packets that the device decided to drop because their overlay
source MAC is multicast
+ * - ``ingress_flow_action_drop``
+ - ``drop``
+ - Traps packets dropped during processing of ingress flow action drop
+ * - ``egress_flow_action_drop``
+ - ``drop``
+ - Traps packets dropped during processing of egress flow action drop
Driver-specific Packet Traps
============================
@@ -277,6 +283,35 @@ narrow. The description of these groups must be added to the following table:
* - ``tunnel_drops``
- Contains packet traps for packets that were dropped by the device during
tunnel encapsulation / decapsulation
+ * - ``acl_drops``
+ - Contains packet traps for packets that were dropped by the device during
+ ACL processing
+
+Packet Trap Policers
+====================
+
+As previously explained, the underlying device can trap certain packets to the
+CPU for processing. In most cases, the underlying device is capable of handling
+packet rates that are several orders of magnitude higher compared to those that
+can be handled by the CPU.
+
+Therefore, in order to prevent the underlying device from overwhelming the CPU,
+devices usually include packet trap policers that are able to police the
+trapped packets to rates that can be handled by the CPU.
+
+The ``devlink-trap`` mechanism allows capable device drivers to register their
+supported packet trap policers with ``devlink``. The device driver can choose
+to associate these policers with supported packet trap groups (see
+:ref:`Generic-Packet-Trap-Groups`) during its initialization, thereby exposing
+its default control plane policy to user space.
+
+Device drivers should allow user space to change the parameters of the policers
+(e.g., rate, burst size) as well as the association between the policers and
+trap groups by implementing the relevant callbacks.
+
+If possible, device drivers should implement a callback that allows user space
+to retrieve the number of packets that were dropped by the policer because its
+configured policy was violated.
Testing
=======
diff --git a/Documentation/networking/devlink/ice.rst b/Documentation/networking/devlink/ice.rst
new file mode 100644
index 000000000000..5b58fc4e1268
--- /dev/null
+++ b/Documentation/networking/devlink/ice.rst
@@ -0,0 +1,96 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================
+ice devlink support
+===================
+
+This document describes the devlink features implemented by the ``ice``
+device driver.
+
+Info versions
+=============
+
+The ``ice`` driver reports the following versions
+
+.. list-table:: devlink info versions implemented
+ :widths: 5 5 5 90
+
+ * - Name
+ - Type
+ - Example
+ - Description
+ * - ``board.id``
+ - fixed
+ - K65390-000
+ - The Product Board Assembly (PBA) identifier of the board.
+ * - ``fw.mgmt``
+ - running
+ - 2.1.7
+ - 3-digit version number of the management firmware that controls the
+ PHY, link, etc.
+ * - ``fw.mgmt.api``
+ - running
+ - 1.5
+ - 2-digit version number of the API exported over the AdminQ by the
+ management firmware. Used by the driver to identify what commands
+ are supported.
+ * - ``fw.mgmt.build``
+ - running
+ - 0x305d955f
+ - Unique identifier of the source for the management firmware.
+ * - ``fw.undi``
+ - running
+ - 1.2581.0
+ - Version of the Option ROM containing the UEFI driver. The version is
+ reported in ``major.minor.patch`` format. The major version is
+ incremented whenever a major breaking change occurs, or when the
+ minor version would overflow. The minor version is incremented for
+ non-breaking changes and reset to 1 when the major version is
+ incremented. The patch version is normally 0 but is incremented when
+ a fix is delivered as a patch against an older base Option ROM.
+ * - ``fw.psid.api``
+ - running
+ - 0.80
+ - Version defining the format of the flash contents.
+ * - ``fw.bundle_id``
+ - running
+ - 0x80002ec0
+ - Unique identifier of the firmware image file that was loaded onto
+ the device. Also referred to as the EETRACK identifier of the NVM.
+ * - ``fw.app.name``
+ - running
+ - ICE OS Default Package
+ - The name of the DDP package that is active in the device. The DDP
+ package is loaded by the driver during initialization. Each varation
+ of DDP package shall have a unique name.
+ * - ``fw.app``
+ - running
+ - 1.3.1.0
+ - The version of the DDP package that is active in the device. Note
+ that both the name (as reported by ``fw.app.name``) and version are
+ required to uniquely identify the package.
+
+Regions
+=======
+
+The ``ice`` driver enables access to the contents of the Non Volatile Memory
+flash chip via the ``nvm-flash`` region.
+
+Users can request an immediate capture of a snapshot via the
+``DEVLINK_CMD_REGION_NEW``
+
+.. code:: shell
+
+ $ devlink region new pci/0000:01:00.0/nvm-flash snapshot 1
+ $ devlink region dump pci/0000:01:00.0/nvm-flash snapshot 1
+
+ $ devlink region dump pci/0000:01:00.0/nvm-flash snapshot 1
+ 0000000000000000 0014 95dc 0014 9514 0035 1670 0034 db30
+ 0000000000000010 0000 0000 ffff ff04 0029 8c00 0028 8cc8
+ 0000000000000020 0016 0bb8 0016 1720 0000 0000 c00f 3ffc
+ 0000000000000030 bada cce5 bada cce5 bada cce5 bada cce5
+
+ $ devlink region read pci/0000:01:00.0/nvm-flash snapshot 1 address 0 length 16
+ 0000000000000000 0014 95dc 0014 9514 0035 1670 0034 db30
+
+ $ devlink region delete pci/0000:01:00.0/nvm-flash snapshot 1
diff --git a/Documentation/networking/devlink/index.rst b/Documentation/networking/devlink/index.rst
index 087ff54d53fc..c536db2cc0f9 100644
--- a/Documentation/networking/devlink/index.rst
+++ b/Documentation/networking/devlink/index.rst
@@ -16,6 +16,7 @@ general.
devlink-dpipe
devlink-health
devlink-info
+ devlink-flash
devlink-params
devlink-region
devlink-resource
@@ -32,6 +33,7 @@ parameters, info versions, and other features it supports.
bnxt
ionic
+ ice
mlx4
mlx5
mlxsw
diff --git a/Documentation/networking/devlink/mlx5.rst b/Documentation/networking/devlink/mlx5.rst
index 629a6e69c036..4e4b97f7971a 100644
--- a/Documentation/networking/devlink/mlx5.rst
+++ b/Documentation/networking/devlink/mlx5.rst
@@ -37,6 +37,12 @@ parameters.
* ``smfs`` Software managed flow steering. In SMFS mode, the HW
steering entities are created and manage through the driver without
firmware intervention.
+ * - ``fdb_large_groups``
+ - u32
+ - driverinit
+ - Control the number of large groups (size > 1) in the FDB table.
+
+ * The default value is 15, and the range is between 1 and 1024.
The ``mlx5`` driver supports reloading via ``DEVLINK_CMD_RELOAD``
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index f1f868479ceb..567326491f80 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -189,6 +189,21 @@ Userspace to kernel:
``ETHTOOL_MSG_DEBUG_SET`` set debugging settings
``ETHTOOL_MSG_WOL_GET`` get wake-on-lan settings
``ETHTOOL_MSG_WOL_SET`` set wake-on-lan settings
+ ``ETHTOOL_MSG_FEATURES_GET`` get device features
+ ``ETHTOOL_MSG_FEATURES_SET`` set device features
+ ``ETHTOOL_MSG_PRIVFLAGS_GET`` get private flags
+ ``ETHTOOL_MSG_PRIVFLAGS_SET`` set private flags
+ ``ETHTOOL_MSG_RINGS_GET`` get ring sizes
+ ``ETHTOOL_MSG_RINGS_SET`` set ring sizes
+ ``ETHTOOL_MSG_CHANNELS_GET`` get channel counts
+ ``ETHTOOL_MSG_CHANNELS_SET`` set channel counts
+ ``ETHTOOL_MSG_COALESCE_GET`` get coalescing parameters
+ ``ETHTOOL_MSG_COALESCE_SET`` set coalescing parameters
+ ``ETHTOOL_MSG_PAUSE_GET`` get pause parameters
+ ``ETHTOOL_MSG_PAUSE_SET`` set pause parameters
+ ``ETHTOOL_MSG_EEE_GET`` get EEE settings
+ ``ETHTOOL_MSG_EEE_SET`` set EEE settings
+ ``ETHTOOL_MSG_TSINFO_GET`` get timestamping info
===================================== ================================
Kernel to userspace:
@@ -204,6 +219,22 @@ Kernel to userspace:
``ETHTOOL_MSG_DEBUG_NTF`` debugging settings notification
``ETHTOOL_MSG_WOL_GET_REPLY`` wake-on-lan settings
``ETHTOOL_MSG_WOL_NTF`` wake-on-lan settings notification
+ ``ETHTOOL_MSG_FEATURES_GET_REPLY`` device features
+ ``ETHTOOL_MSG_FEATURES_SET_REPLY`` optional reply to FEATURES_SET
+ ``ETHTOOL_MSG_FEATURES_NTF`` netdev features notification
+ ``ETHTOOL_MSG_PRIVFLAGS_GET_REPLY`` private flags
+ ``ETHTOOL_MSG_PRIVFLAGS_NTF`` private flags
+ ``ETHTOOL_MSG_RINGS_GET_REPLY`` ring sizes
+ ``ETHTOOL_MSG_RINGS_NTF`` ring sizes
+ ``ETHTOOL_MSG_CHANNELS_GET_REPLY`` channel counts
+ ``ETHTOOL_MSG_CHANNELS_NTF`` channel counts
+ ``ETHTOOL_MSG_COALESCE_GET_REPLY`` coalescing parameters
+ ``ETHTOOL_MSG_COALESCE_NTF`` coalescing parameters
+ ``ETHTOOL_MSG_PAUSE_GET_REPLY`` pause parameters
+ ``ETHTOOL_MSG_PAUSE_NTF`` pause parameters
+ ``ETHTOOL_MSG_EEE_GET_REPLY`` EEE settings
+ ``ETHTOOL_MSG_EEE_NTF`` EEE settings
+ ``ETHTOOL_MSG_TSINFO_GET_REPLY`` timestamping info
===================================== =================================
``GET`` requests are sent by userspace applications to retrieve device
@@ -521,6 +552,410 @@ Request contents:
``WAKE_MAGICSECURE`` mode.
+FEATURES_GET
+============
+
+Gets netdev features like ``ETHTOOL_GFEATURES`` ioctl request.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_FEATURES_HEADER`` nested request header
+ ==================================== ====== ==========================
+
+Kernel response contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_FEATURES_HEADER`` nested reply header
+ ``ETHTOOL_A_FEATURES_HW`` bitset dev->hw_features
+ ``ETHTOOL_A_FEATURES_WANTED`` bitset dev->wanted_features
+ ``ETHTOOL_A_FEATURES_ACTIVE`` bitset dev->features
+ ``ETHTOOL_A_FEATURES_NOCHANGE`` bitset NETIF_F_NEVER_CHANGE
+ ==================================== ====== ==========================
+
+Bitmaps in kernel response have the same meaning as bitmaps used in ioctl
+interference but attribute names are different (they are based on
+corresponding members of struct net_device). Legacy "flags" are not provided,
+if userspace needs them (most likely only ethtool for backward compatibility),
+it can calculate their values from related feature bits itself.
+ETHA_FEATURES_HW uses mask consisting of all features recognized by kernel (to
+provide all names when using verbose bitmap format), the other three use no
+mask (simple bit lists).
+
+
+FEATURES_SET
+============
+
+Request to set netdev features like ``ETHTOOL_SFEATURES`` ioctl request.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_FEATURES_HEADER`` nested request header
+ ``ETHTOOL_A_FEATURES_WANTED`` bitset requested features
+ ==================================== ====== ==========================
+
+Kernel response contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_FEATURES_HEADER`` nested reply header
+ ``ETHTOOL_A_FEATURES_WANTED`` bitset diff wanted vs. result
+ ``ETHTOOL_A_FEATURES_ACTIVE`` bitset diff old vs. new active
+ ==================================== ====== ==========================
+
+Request constains only one bitset which can be either value/mask pair (request
+to change specific feature bits and leave the rest) or only a value (request
+to set all features to specified set).
+
+As request is subject to netdev_change_features() sanity checks, optional
+kernel reply (can be suppressed by ``ETHTOOL_FLAG_OMIT_REPLY`` flag in request
+header) informs client about the actual result. ``ETHTOOL_A_FEATURES_WANTED``
+reports the difference between client request and actual result: mask consists
+of bits which differ between requested features and result (dev->features
+after the operation), value consists of values of these bits in the request
+(i.e. negated values from resulting features). ``ETHTOOL_A_FEATURES_ACTIVE``
+reports the difference between old and new dev->features: mask consists of
+bits which have changed, values are their values in new dev->features (after
+the operation).
+
+``ETHTOOL_MSG_FEATURES_NTF`` notification is sent not only if device features
+are modified using ``ETHTOOL_MSG_FEATURES_SET`` request or on of ethtool ioctl
+request but also each time features are modified with netdev_update_features()
+or netdev_change_features().
+
+
+PRIVFLAGS_GET
+=============
+
+Gets private flags like ``ETHTOOL_GPFLAGS`` ioctl request.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_PRIVFLAGS_HEADER`` nested request header
+ ==================================== ====== ==========================
+
+Kernel response contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_PRIVFLAGS_HEADER`` nested reply header
+ ``ETHTOOL_A_PRIVFLAGS_FLAGS`` bitset private flags
+ ==================================== ====== ==========================
+
+``ETHTOOL_A_PRIVFLAGS_FLAGS`` is a bitset with values of device private flags.
+These flags are defined by driver, their number and names (and also meaning)
+are device dependent. For compact bitset format, names can be retrieved as
+``ETH_SS_PRIV_FLAGS`` string set. If verbose bitset format is requested,
+response uses all private flags supported by the device as mask so that client
+gets the full information without having to fetch the string set with names.
+
+
+PRIVFLAGS_SET
+=============
+
+Sets or modifies values of device private flags like ``ETHTOOL_SPFLAGS``
+ioctl request.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_PRIVFLAGS_HEADER`` nested request header
+ ``ETHTOOL_A_PRIVFLAGS_FLAGS`` bitset private flags
+ ==================================== ====== ==========================
+
+``ETHTOOL_A_PRIVFLAGS_FLAGS`` can either set the whole set of private flags or
+modify only values of some of them.
+
+
+RINGS_GET
+=========
+
+Gets ring sizes like ``ETHTOOL_GRINGPARAM`` ioctl request.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_RINGS_HEADER`` nested request header
+ ==================================== ====== ==========================
+
+Kernel response contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_RINGS_HEADER`` nested reply header
+ ``ETHTOOL_A_RINGS_RX_MAX`` u32 max size of RX ring
+ ``ETHTOOL_A_RINGS_RX_MINI_MAX`` u32 max size of RX mini ring
+ ``ETHTOOL_A_RINGS_RX_JUMBO_MAX`` u32 max size of RX jumbo ring
+ ``ETHTOOL_A_RINGS_TX_MAX`` u32 max size of TX ring
+ ``ETHTOOL_A_RINGS_RX`` u32 size of RX ring
+ ``ETHTOOL_A_RINGS_RX_MINI`` u32 size of RX mini ring
+ ``ETHTOOL_A_RINGS_RX_JUMBO`` u32 size of RX jumbo ring
+ ``ETHTOOL_A_RINGS_TX`` u32 size of TX ring
+ ==================================== ====== ==========================
+
+
+RINGS_SET
+=========
+
+Sets ring sizes like ``ETHTOOL_SRINGPARAM`` ioctl request.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_RINGS_HEADER`` nested reply header
+ ``ETHTOOL_A_RINGS_RX`` u32 size of RX ring
+ ``ETHTOOL_A_RINGS_RX_MINI`` u32 size of RX mini ring
+ ``ETHTOOL_A_RINGS_RX_JUMBO`` u32 size of RX jumbo ring
+ ``ETHTOOL_A_RINGS_TX`` u32 size of TX ring
+ ==================================== ====== ==========================
+
+Kernel checks that requested ring sizes do not exceed limits reported by
+driver. Driver may impose additional constraints and may not suspport all
+attributes.
+
+
+CHANNELS_GET
+============
+
+Gets channel counts like ``ETHTOOL_GCHANNELS`` ioctl request.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_CHANNELS_HEADER`` nested request header
+ ==================================== ====== ==========================
+
+Kernel response contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_CHANNELS_HEADER`` nested reply header
+ ``ETHTOOL_A_CHANNELS_RX_MAX`` u32 max receive channels
+ ``ETHTOOL_A_CHANNELS_TX_MAX`` u32 max transmit channels
+ ``ETHTOOL_A_CHANNELS_OTHER_MAX`` u32 max other channels
+ ``ETHTOOL_A_CHANNELS_COMBINED_MAX`` u32 max combined channels
+ ``ETHTOOL_A_CHANNELS_RX_COUNT`` u32 receive channel count
+ ``ETHTOOL_A_CHANNELS_TX_COUNT`` u32 transmit channel count
+ ``ETHTOOL_A_CHANNELS_OTHER_COUNT`` u32 other channel count
+ ``ETHTOOL_A_CHANNELS_COMBINED_COUNT`` u32 combined channel count
+ ===================================== ====== ==========================
+
+
+CHANNELS_SET
+============
+
+Sets channel counts like ``ETHTOOL_SCHANNELS`` ioctl request.
+
+Request contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_CHANNELS_HEADER`` nested request header
+ ``ETHTOOL_A_CHANNELS_RX_COUNT`` u32 receive channel count
+ ``ETHTOOL_A_CHANNELS_TX_COUNT`` u32 transmit channel count
+ ``ETHTOOL_A_CHANNELS_OTHER_COUNT`` u32 other channel count
+ ``ETHTOOL_A_CHANNELS_COMBINED_COUNT`` u32 combined channel count
+ ===================================== ====== ==========================
+
+Kernel checks that requested channel counts do not exceed limits reported by
+driver. Driver may impose additional constraints and may not suspport all
+attributes.
+
+
+COALESCE_GET
+============
+
+Gets coalescing parameters like ``ETHTOOL_GCOALESCE`` ioctl request.
+
+Request contents:
+
+ ==================================== ====== ==========================
+ ``ETHTOOL_A_COALESCE_HEADER`` nested request header
+ ==================================== ====== ==========================
+
+Kernel response contents:
+
+ =========================================== ====== =======================
+ ``ETHTOOL_A_COALESCE_HEADER`` nested reply header
+ ``ETHTOOL_A_COALESCE_RX_USECS`` u32 delay (us), normal Rx
+ ``ETHTOOL_A_COALESCE_RX_MAX_FRAMES`` u32 max packets, normal Rx
+ ``ETHTOOL_A_COALESCE_RX_USECS_IRQ`` u32 delay (us), Rx in IRQ
+ ``ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ`` u32 max packets, Rx in IRQ
+ ``ETHTOOL_A_COALESCE_TX_USECS`` u32 delay (us), normal Tx
+ ``ETHTOOL_A_COALESCE_TX_MAX_FRAMES`` u32 max packets, normal Tx
+ ``ETHTOOL_A_COALESCE_TX_USECS_IRQ`` u32 delay (us), Tx in IRQ
+ ``ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ`` u32 IRQ packets, Tx in IRQ
+ ``ETHTOOL_A_COALESCE_STATS_BLOCK_USECS`` u32 delay of stats update
+ ``ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX`` bool adaptive Rx coalesce
+ ``ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX`` bool adaptive Tx coalesce
+ ``ETHTOOL_A_COALESCE_PKT_RATE_LOW`` u32 threshold for low rate
+ ``ETHTOOL_A_COALESCE_RX_USECS_LOW`` u32 delay (us), low Rx
+ ``ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW`` u32 max packets, low Rx
+ ``ETHTOOL_A_COALESCE_TX_USECS_LOW`` u32 delay (us), low Tx
+ ``ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW`` u32 max packets, low Tx
+ ``ETHTOOL_A_COALESCE_PKT_RATE_HIGH`` u32 threshold for high rate
+ ``ETHTOOL_A_COALESCE_RX_USECS_HIGH`` u32 delay (us), high Rx
+ ``ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH`` u32 max packets, high Rx
+ ``ETHTOOL_A_COALESCE_TX_USECS_HIGH`` u32 delay (us), high Tx
+ ``ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH`` u32 max packets, high Tx
+ ``ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL`` u32 rate sampling interval
+ =========================================== ====== =======================
+
+Attributes are only included in reply if their value is not zero or the
+corresponding bit in ``ethtool_ops::supported_coalesce_params`` is set (i.e.
+they are declared as supported by driver).
+
+
+COALESCE_SET
+============
+
+Sets coalescing parameters like ``ETHTOOL_SCOALESCE`` ioctl request.
+
+Request contents:
+
+ =========================================== ====== =======================
+ ``ETHTOOL_A_COALESCE_HEADER`` nested request header
+ ``ETHTOOL_A_COALESCE_RX_USECS`` u32 delay (us), normal Rx
+ ``ETHTOOL_A_COALESCE_RX_MAX_FRAMES`` u32 max packets, normal Rx
+ ``ETHTOOL_A_COALESCE_RX_USECS_IRQ`` u32 delay (us), Rx in IRQ
+ ``ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ`` u32 max packets, Rx in IRQ
+ ``ETHTOOL_A_COALESCE_TX_USECS`` u32 delay (us), normal Tx
+ ``ETHTOOL_A_COALESCE_TX_MAX_FRAMES`` u32 max packets, normal Tx
+ ``ETHTOOL_A_COALESCE_TX_USECS_IRQ`` u32 delay (us), Tx in IRQ
+ ``ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ`` u32 IRQ packets, Tx in IRQ
+ ``ETHTOOL_A_COALESCE_STATS_BLOCK_USECS`` u32 delay of stats update
+ ``ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX`` bool adaptive Rx coalesce
+ ``ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX`` bool adaptive Tx coalesce
+ ``ETHTOOL_A_COALESCE_PKT_RATE_LOW`` u32 threshold for low rate
+ ``ETHTOOL_A_COALESCE_RX_USECS_LOW`` u32 delay (us), low Rx
+ ``ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW`` u32 max packets, low Rx
+ ``ETHTOOL_A_COALESCE_TX_USECS_LOW`` u32 delay (us), low Tx
+ ``ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW`` u32 max packets, low Tx
+ ``ETHTOOL_A_COALESCE_PKT_RATE_HIGH`` u32 threshold for high rate
+ ``ETHTOOL_A_COALESCE_RX_USECS_HIGH`` u32 delay (us), high Rx
+ ``ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH`` u32 max packets, high Rx
+ ``ETHTOOL_A_COALESCE_TX_USECS_HIGH`` u32 delay (us), high Tx
+ ``ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH`` u32 max packets, high Tx
+ ``ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL`` u32 rate sampling interval
+ =========================================== ====== =======================
+
+Request is rejected if it attributes declared as unsupported by driver (i.e.
+such that the corresponding bit in ``ethtool_ops::supported_coalesce_params``
+is not set), regardless of their values. Driver may impose additional
+constraints on coalescing parameters and their values.
+
+
+PAUSE_GET
+============
+
+Gets channel counts like ``ETHTOOL_GPAUSE`` ioctl request.
+
+Request contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_PAUSE_HEADER`` nested request header
+ ===================================== ====== ==========================
+
+Kernel response contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_PAUSE_HEADER`` nested request header
+ ``ETHTOOL_A_PAUSE_AUTONEG`` bool pause autonegotiation
+ ``ETHTOOL_A_PAUSE_RX`` bool receive pause frames
+ ``ETHTOOL_A_PAUSE_TX`` bool transmit pause frames
+ ===================================== ====== ==========================
+
+
+PAUSE_SET
+============
+
+Sets pause parameters like ``ETHTOOL_GPAUSEPARAM`` ioctl request.
+
+Request contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_PAUSE_HEADER`` nested request header
+ ``ETHTOOL_A_PAUSE_AUTONEG`` bool pause autonegotiation
+ ``ETHTOOL_A_PAUSE_RX`` bool receive pause frames
+ ``ETHTOOL_A_PAUSE_TX`` bool transmit pause frames
+ ===================================== ====== ==========================
+
+
+EEE_GET
+=======
+
+Gets channel counts like ``ETHTOOL_GEEE`` ioctl request.
+
+Request contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_EEE_HEADER`` nested request header
+ ===================================== ====== ==========================
+
+Kernel response contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_EEE_HEADER`` nested request header
+ ``ETHTOOL_A_EEE_MODES_OURS`` bool supported/advertised modes
+ ``ETHTOOL_A_EEE_MODES_PEER`` bool peer advertised link modes
+ ``ETHTOOL_A_EEE_ACTIVE`` bool EEE is actively used
+ ``ETHTOOL_A_EEE_ENABLED`` bool EEE is enabled
+ ``ETHTOOL_A_EEE_TX_LPI_ENABLED`` bool Tx lpi enabled
+ ``ETHTOOL_A_EEE_TX_LPI_TIMER`` u32 Tx lpi timeout (in us)
+ ===================================== ====== ==========================
+
+In ``ETHTOOL_A_EEE_MODES_OURS``, mask consists of link modes for which EEE is
+enabled, value of link modes for which EEE is advertised. Link modes for which
+peer advertises EEE are listed in ``ETHTOOL_A_EEE_MODES_PEER`` (no mask). The
+netlink interface allows reporting EEE status for all link modes but only
+first 32 are provided by the ``ethtool_ops`` callback.
+
+
+EEE_SET
+=======
+
+Sets pause parameters like ``ETHTOOL_GEEEPARAM`` ioctl request.
+
+Request contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_EEE_HEADER`` nested request header
+ ``ETHTOOL_A_EEE_MODES_OURS`` bool advertised modes
+ ``ETHTOOL_A_EEE_ENABLED`` bool EEE is enabled
+ ``ETHTOOL_A_EEE_TX_LPI_ENABLED`` bool Tx lpi enabled
+ ``ETHTOOL_A_EEE_TX_LPI_TIMER`` u32 Tx lpi timeout (in us)
+ ===================================== ====== ==========================
+
+``ETHTOOL_A_EEE_MODES_OURS`` is used to either list link modes to advertise
+EEE for (if there is no mask) or specify changes to the list (if there is
+a mask). The netlink interface allows reporting EEE status for all link modes
+but only first 32 can be set at the moment as that is what the ``ethtool_ops``
+callback supports.
+
+
+TSINFO_GET
+==========
+
+Gets timestamping information like ``ETHTOOL_GET_TS_INFO`` ioctl request.
+
+Request contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_TSINFO_HEADER`` nested request header
+ ===================================== ====== ==========================
+
+Kernel response contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_TSINFO_HEADER`` nested request header
+ ``ETHTOOL_A_TSINFO_TIMESTAMPING`` bitset SO_TIMESTAMPING flags
+ ``ETHTOOL_A_TSINFO_TX_TYPES`` bitset supported Tx types
+ ``ETHTOOL_A_TSINFO_RX_FILTERS`` bitset supported Rx filters
+ ``ETHTOOL_A_TSINFO_PHC_INDEX`` u32 PTP hw clock index
+ ===================================== ====== ==========================
+
+``ETHTOOL_A_TSINFO_PHC_INDEX`` is absent if there is no associated PHC (there
+is no special value for this case). The bitset attributes are omitted if they
+would be empty (no bit set).
+
+
Request translation
===================
@@ -545,37 +980,37 @@ have their netlink replacement yet.
``ETHTOOL_GLINK`` ``ETHTOOL_MSG_LINKSTATE_GET``
``ETHTOOL_GEEPROM`` n/a
``ETHTOOL_SEEPROM`` n/a
- ``ETHTOOL_GCOALESCE`` n/a
- ``ETHTOOL_SCOALESCE`` n/a
- ``ETHTOOL_GRINGPARAM`` n/a
- ``ETHTOOL_SRINGPARAM`` n/a
- ``ETHTOOL_GPAUSEPARAM`` n/a
- ``ETHTOOL_SPAUSEPARAM`` n/a
- ``ETHTOOL_GRXCSUM`` n/a
- ``ETHTOOL_SRXCSUM`` n/a
- ``ETHTOOL_GTXCSUM`` n/a
- ``ETHTOOL_STXCSUM`` n/a
- ``ETHTOOL_GSG`` n/a
- ``ETHTOOL_SSG`` n/a
+ ``ETHTOOL_GCOALESCE`` ``ETHTOOL_MSG_COALESCE_GET``
+ ``ETHTOOL_SCOALESCE`` ``ETHTOOL_MSG_COALESCE_SET``
+ ``ETHTOOL_GRINGPARAM`` ``ETHTOOL_MSG_RINGS_GET``
+ ``ETHTOOL_SRINGPARAM`` ``ETHTOOL_MSG_RINGS_SET``
+ ``ETHTOOL_GPAUSEPARAM`` ``ETHTOOL_MSG_PAUSE_GET``
+ ``ETHTOOL_SPAUSEPARAM`` ``ETHTOOL_MSG_PAUSE_SET``
+ ``ETHTOOL_GRXCSUM`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_SRXCSUM`` ``ETHTOOL_MSG_FEATURES_SET``
+ ``ETHTOOL_GTXCSUM`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_STXCSUM`` ``ETHTOOL_MSG_FEATURES_SET``
+ ``ETHTOOL_GSG`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_SSG`` ``ETHTOOL_MSG_FEATURES_SET``
``ETHTOOL_TEST`` n/a
``ETHTOOL_GSTRINGS`` ``ETHTOOL_MSG_STRSET_GET``
``ETHTOOL_PHYS_ID`` n/a
``ETHTOOL_GSTATS`` n/a
- ``ETHTOOL_GTSO`` n/a
- ``ETHTOOL_STSO`` n/a
+ ``ETHTOOL_GTSO`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_STSO`` ``ETHTOOL_MSG_FEATURES_SET``
``ETHTOOL_GPERMADDR`` rtnetlink ``RTM_GETLINK``
- ``ETHTOOL_GUFO`` n/a
- ``ETHTOOL_SUFO`` n/a
- ``ETHTOOL_GGSO`` n/a
- ``ETHTOOL_SGSO`` n/a
- ``ETHTOOL_GFLAGS`` n/a
- ``ETHTOOL_SFLAGS`` n/a
- ``ETHTOOL_GPFLAGS`` n/a
- ``ETHTOOL_SPFLAGS`` n/a
+ ``ETHTOOL_GUFO`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_SUFO`` ``ETHTOOL_MSG_FEATURES_SET``
+ ``ETHTOOL_GGSO`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_SGSO`` ``ETHTOOL_MSG_FEATURES_SET``
+ ``ETHTOOL_GFLAGS`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_SFLAGS`` ``ETHTOOL_MSG_FEATURES_SET``
+ ``ETHTOOL_GPFLAGS`` ``ETHTOOL_MSG_PRIVFLAGS_GET``
+ ``ETHTOOL_SPFLAGS`` ``ETHTOOL_MSG_PRIVFLAGS_SET``
``ETHTOOL_GRXFH`` n/a
``ETHTOOL_SRXFH`` n/a
- ``ETHTOOL_GGRO`` n/a
- ``ETHTOOL_SGRO`` n/a
+ ``ETHTOOL_GGRO`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_SGRO`` ``ETHTOOL_MSG_FEATURES_SET``
``ETHTOOL_GRXRINGS`` n/a
``ETHTOOL_GRXCLSRLCNT`` n/a
``ETHTOOL_GRXCLSRULE`` n/a
@@ -589,18 +1024,18 @@ have their netlink replacement yet.
``ETHTOOL_GSSET_INFO`` ``ETHTOOL_MSG_STRSET_GET``
``ETHTOOL_GRXFHINDIR`` n/a
``ETHTOOL_SRXFHINDIR`` n/a
- ``ETHTOOL_GFEATURES`` n/a
- ``ETHTOOL_SFEATURES`` n/a
- ``ETHTOOL_GCHANNELS`` n/a
- ``ETHTOOL_SCHANNELS`` n/a
+ ``ETHTOOL_GFEATURES`` ``ETHTOOL_MSG_FEATURES_GET``
+ ``ETHTOOL_SFEATURES`` ``ETHTOOL_MSG_FEATURES_SET``
+ ``ETHTOOL_GCHANNELS`` ``ETHTOOL_MSG_CHANNELS_GET``
+ ``ETHTOOL_SCHANNELS`` ``ETHTOOL_MSG_CHANNELS_SET``
``ETHTOOL_SET_DUMP`` n/a
``ETHTOOL_GET_DUMP_FLAG`` n/a
``ETHTOOL_GET_DUMP_DATA`` n/a
- ``ETHTOOL_GET_TS_INFO`` n/a
+ ``ETHTOOL_GET_TS_INFO`` ``ETHTOOL_MSG_TSINFO_GET``
``ETHTOOL_GMODULEINFO`` n/a
``ETHTOOL_GMODULEEEPROM`` n/a
- ``ETHTOOL_GEEE`` n/a
- ``ETHTOOL_SEEE`` n/a
+ ``ETHTOOL_GEEE`` ``ETHTOOL_MSG_EEE_GET``
+ ``ETHTOOL_SEEE`` ``ETHTOOL_MSG_EEE_SET``
``ETHTOOL_GRSSH`` n/a
``ETHTOOL_SRSSH`` n/a
``ETHTOOL_GTUNABLE`` n/a
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index c4a328f2d57a..2f0f8b17dade 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -606,7 +606,7 @@ before a conversion to the new layout is being done behind the scenes!
Currently, the classic BPF format is being used for JITing on most
32-bit architectures, whereas x86-64, aarch64, s390x, powerpc64,
-sparc64, arm32, riscv (RV64G) perform JIT compilation from eBPF
+sparc64, arm32, riscv64, riscv32 perform JIT compilation from eBPF
instruction set.
Some core changes of the new internal format:
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index d07d9855dcd3..50133d9761c9 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -8,6 +8,7 @@ Contents:
netdev-FAQ
af_xdp
+ bareudp
batman-adv
can
can_ucan_protocol
@@ -33,6 +34,7 @@ Contents:
tls
tls-offload
nfc
+ 6lowpan
.. only:: subproject and html
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 5f53faff4e25..ee961d322d93 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -958,6 +958,15 @@ ip_nonlocal_bind - BOOLEAN
which can be quite useful - but may break some applications.
Default: 0
+ip_autobind_reuse - BOOLEAN
+ By default, bind() does not select the ports automatically even if
+ the new socket and all sockets bound to the port have SO_REUSEADDR.
+ ip_autobind_reuse allows bind() to reuse the port and this is useful
+ when you use bind()+connect(), but may break some applications.
+ The preferred solution is to use IP_BIND_ADDRESS_NO_PORT and this
+ option should only be set by experts.
+ Default: 0
+
ip_dynaddr - BOOLEAN
If set non-zero, enables support for dynamic addresses.
If set to a non-zero value larger than 1, a kernel log
diff --git a/Documentation/networking/page_pool.rst b/Documentation/networking/page_pool.rst
new file mode 100644
index 000000000000..43088ddf95e4
--- /dev/null
+++ b/Documentation/networking/page_pool.rst
@@ -0,0 +1,159 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============
+Page Pool API
+=============
+
+The page_pool allocator is optimized for the XDP mode that uses one frame
+per-page, but it can fallback on the regular page allocator APIs.
+
+Basic use involves replacing alloc_pages() calls with the
+page_pool_alloc_pages() call. Drivers should use page_pool_dev_alloc_pages()
+replacing dev_alloc_pages().
+
+API keeps track of inflight pages, in order to let API user know
+when it is safe to free a page_pool object. Thus, API users
+must run page_pool_release_page() when a page is leaving the page_pool or
+call page_pool_put_page() where appropriate in order to maintain correct
+accounting.
+
+API user must call page_pool_put_page() once on a page, as it
+will either recycle the page, or in case of refcnt > 1, it will
+release the DMA mapping and inflight state accounting.
+
+Architecture overview
+=====================
+
+.. code-block:: none
+
+ +------------------+
+ | Driver |
+ +------------------+
+ ^
+ |
+ |
+ |
+ v
+ +--------------------------------------------+
+ | request memory |
+ +--------------------------------------------+
+ ^ ^
+ | |
+ | Pool empty | Pool has entries
+ | |
+ v v
+ +-----------------------+ +------------------------+
+ | alloc (and map) pages | | get page from cache |
+ +-----------------------+ +------------------------+
+ ^ ^
+ | |
+ | cache available | No entries, refill
+ | | from ptr-ring
+ | |
+ v v
+ +-----------------+ +------------------+
+ | Fast cache | | ptr-ring cache |
+ +-----------------+ +------------------+
+
+API interface
+=============
+The number of pools created **must** match the number of hardware queues
+unless hardware restrictions make that impossible. This would otherwise beat the
+purpose of page pool, which is allocate pages fast from cache without locking.
+This lockless guarantee naturally comes from running under a NAPI softirq.
+The protection doesn't strictly have to be NAPI, any guarantee that allocating
+a page will cause no race conditions is enough.
+
+* page_pool_create(): Create a pool.
+ * flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV
+ * order: 2^order pages on allocation
+ * pool_size: size of the ptr_ring
+ * nid: preferred NUMA node for allocation
+ * dev: struct device. Used on DMA operations
+ * dma_dir: DMA direction
+ * max_len: max DMA sync memory size
+ * offset: DMA address offset
+
+* page_pool_put_page(): The outcome of this depends on the page refcnt. If the
+ driver bumps the refcnt > 1 this will unmap the page. If the page refcnt is 1
+ the allocator owns the page and will try to recycle it in one of the pool
+ caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device
+ using dma_sync_single_range_for_device().
+
+* page_pool_put_full_page(): Similar to page_pool_put_page(), but will DMA sync
+ for the entire memory area configured in area pool->max_len.
+
+* page_pool_recycle_direct(): Similar to page_pool_put_full_page() but caller
+ must guarantee safe context (e.g NAPI), since it will recycle the page
+ directly into the pool fast cache.
+
+* page_pool_release_page(): Unmap the page (if mapped) and account for it on
+ inflight counters.
+
+* page_pool_dev_alloc_pages(): Get a page from the page allocator or page_pool
+ caches.
+
+* page_pool_get_dma_addr(): Retrieve the stored DMA address.
+
+* page_pool_get_dma_dir(): Retrieve the stored DMA direction.
+
+Coding examples
+===============
+
+Registration
+------------
+
+.. code-block:: c
+
+ /* Page pool registration */
+ struct page_pool_params pp_params = { 0 };
+ struct xdp_rxq_info xdp_rxq;
+ int err;
+
+ pp_params.order = 0;
+ /* internal DMA mapping in page_pool */
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = DESC_NUM;
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dev = priv->dev;
+ pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+ page_pool = page_pool_create(&pp_params);
+
+ err = xdp_rxq_info_reg(&xdp_rxq, ndev, 0);
+ if (err)
+ goto err_out;
+
+ err = xdp_rxq_info_reg_mem_model(&xdp_rxq, MEM_TYPE_PAGE_POOL, page_pool);
+ if (err)
+ goto err_out;
+
+NAPI poller
+-----------
+
+
+.. code-block:: c
+
+ /* NAPI Rx poller */
+ enum dma_data_direction dma_dir;
+
+ dma_dir = page_pool_get_dma_dir(dring->page_pool);
+ while (done < budget) {
+ if (some error)
+ page_pool_recycle_direct(page_pool, page);
+ if (packet_is_xdp) {
+ if XDP_DROP:
+ page_pool_recycle_direct(page_pool, page);
+ } else (packet_is_skb) {
+ page_pool_release_page(page_pool, page);
+ new_page = page_pool_dev_alloc_pages(page_pool);
+ }
+ }
+
+Driver unload
+-------------
+
+.. code-block:: c
+
+ /* Driver unload */
+ page_pool_put_full_page(page_pool, page, false);
+ xdp_rxq_info_unreg(&xdp_rxq);
diff --git a/Documentation/networking/sfp-phylink.rst b/Documentation/networking/sfp-phylink.rst
index d753a309f9d1..5aec7c8857d0 100644
--- a/Documentation/networking/sfp-phylink.rst
+++ b/Documentation/networking/sfp-phylink.rst
@@ -74,10 +74,13 @@ phylib to the sfp/phylink support. Please send patches to improve
this documentation.
1. Optionally split the network driver's phylib update function into
- three parts dealing with link-down, link-up and reconfiguring the
- MAC settings. This can be done as a separate preparation commit.
+ two parts dealing with link-down and link-up. This can be done as
+ a separate preparation commit.
- An example of this preparation can be found in git commit fc548b991fb0.
+ An older example of this preparation can be found in git commit
+ fc548b991fb0, although this was splitting into three parts; the
+ link-up part now includes configuring the MAC for the link settings.
+ Please see :c:func:`mac_link_up` for more information on this.
2. Replace::
@@ -135,27 +138,27 @@ this documentation.
.. code-block:: c
- static int foo_ethtool_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
- {
- struct foo_priv *priv = netdev_priv(dev);
-
- return phylink_ethtool_ksettings_set(priv->phylink, cmd);
- }
-
- static int foo_ethtool_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
- {
- struct foo_priv *priv = netdev_priv(dev);
+ static int foo_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+ {
+ struct foo_priv *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_set(priv->phylink, cmd);
+ }
- return phylink_ethtool_ksettings_get(priv->phylink, cmd);
- }
+ static int foo_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+ {
+ struct foo_priv *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+ }
-7. Replace the call to:
+7. Replace the call to::
phy_dev = of_phy_connect(dev, node, link_func, flags, phy_interface);
- and associated code with a call to:
+ and associated code with a call to::
err = phylink_of_phy_connect(priv->phylink, node, flags);
@@ -207,6 +210,14 @@ this documentation.
using. This is particularly important for in-band negotiation
methods such as 1000base-X and SGMII.
+ The :c:func:`mac_link_up` method is used to inform the MAC that the
+ link has come up. The call includes the negotiation mode and interface
+ for reference only. The finalised link parameters are also supplied
+ (speed, duplex and flow control/pause enablement settings) which
+ should be used to configure the MAC when the MAC and PCS are not
+ tightly integrated, or when the settings are not coming from in-band
+ negotiation.
+
The :c:func:`mac_config` method is used to update the MAC with the
requested state, and must avoid unnecessarily taking the link down
when making changes to the MAC configuration. This means the
diff --git a/MAINTAINERS b/MAINTAINERS
index a2b9bbf6e8b2..195052943574 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -176,7 +176,7 @@ L: linux-wpan@vger.kernel.org
S: Maintained
F: net/6lowpan/
F: include/net/6lowpan.h
-F: Documentation/networking/6lowpan.txt
+F: Documentation/networking/6lowpan.rst
6PACK NETWORK DRIVER FOR AX.25
M: Andreas Koensgen <ajk@comnets.uni-bremen.de>
@@ -3173,6 +3173,8 @@ R: Martin KaFai Lau <kafai@fb.com>
R: Song Liu <songliubraving@fb.com>
R: Yonghong Song <yhs@fb.com>
R: Andrii Nakryiko <andriin@fb.com>
+R: John Fastabend <john.fastabend@gmail.com>
+R: KP Singh <kpsingh@chromium.org>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
@@ -3239,11 +3241,22 @@ L: bpf@vger.kernel.org
S: Maintained
F: arch/powerpc/net/
-BPF JIT for RISC-V (RV64G)
+BPF JIT for RISC-V (32-bit)
+M: Luke Nelson <luke.r.nels@gmail.com>
+M: Xi Wang <xi.wang@gmail.com>
+L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
+S: Maintained
+F: arch/riscv/net/
+X: arch/riscv/net/bpf_jit_comp64.c
+
+BPF JIT for RISC-V (64-bit)
M: Björn Töpel <bjorn.topel@gmail.com>
L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
S: Maintained
F: arch/riscv/net/
+X: arch/riscv/net/bpf_jit_comp32.c
BPF JIT for S390
M: Ilya Leoshkevich <iii@linux.ibm.com>
@@ -9385,6 +9398,8 @@ F: include/net/l3mdev.h
L7 BPF FRAMEWORK
M: John Fastabend <john.fastabend@gmail.com>
M: Daniel Borkmann <daniel@iogearbox.net>
+M: Jakub Sitnicki <jakub@cloudflare.com>
+M: Lorenz Bauer <lmb@cloudflare.com>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Maintained
@@ -9392,6 +9407,7 @@ F: include/linux/skmsg.h
F: net/core/skmsg.c
F: net/core/sock_map.c
F: net/ipv4/tcp_bpf.c
+F: net/ipv4/udp_bpf.c
LANTIQ / INTEL Ethernet drivers
M: Hauke Mehrtens <hauke@hauke-m.de>
@@ -10122,6 +10138,13 @@ M: Nicolas Pitre <nico@fluxnic.net>
S: Odd Fixes
F: drivers/mmc/host/mvsdio.*
+MARVELL USB MDIO CONTROLLER DRIVER
+M: Tobias Waldekranz <tobias@waldekranz.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/phy/mdio-mvusb.c
+F: Documentation/devicetree/bindings/net/marvell,mvusb.yaml
+
MARVELL XENON MMC/SD/SDIO HOST CONTROLLER DRIVER
M: Hu Ziji <huziji@marvell.com>
L: linux-mmc@vger.kernel.org
@@ -11738,6 +11761,7 @@ W: https://github.com/multipath-tcp/mptcp_net-next/wiki
B: https://github.com/multipath-tcp/mptcp_net-next/issues
S: Maintained
F: include/net/mptcp.h
+F: include/uapi/linux/mptcp.h
F: net/mptcp/
F: tools/testing/selftests/net/mptcp/
@@ -13706,6 +13730,12 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/qcom/
+QCOM IPA DRIVER
+M: Alex Elder <elder@kernel.org>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ipa/
+
QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT
M: Gabriel Somlo <somlo@cmu.edu>
M: "Michael S. Tsirkin" <mst@redhat.com>
@@ -16175,6 +16205,13 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/synopsys/
+SYNOPSYS DESIGNWARE ETHERNET XPCS DRIVER
+M: Jose Abreu <Jose.Abreu@synopsys.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/phy/mdio-xpcs.c
+F: include/linux/mdio-xpcs.h
+
SYNOPSYS DESIGNWARE I2C DRIVER
M: Jarkko Nikula <jarkko.nikula@linux.intel.com>
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
@@ -17917,6 +17954,13 @@ S: Supported
F: arch/x86/kernel/cpu/vmware.c
F: arch/x86/include/asm/vmware.h
+VMWARE VIRTUAL PTP CLOCK DRIVER
+M: Vivek Thampi <vithampi@vmware.com>
+M: "VMware, Inc." <pv-drivers@vmware.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/ptp/ptp_vmw.c
+
VMWARE PVRDMA DRIVER
M: Adit Ranadive <aditr@vmware.com>
M: VMware PV-Drivers <pv-drivers@vmware.com>
diff --git a/arch/arm/boot/dts/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
index 1b5bc6b5e806..347a5edc6927 100644
--- a/arch/arm/boot/dts/imx6qdl-apalis.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
@@ -180,7 +180,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ethphy>;
phy-reset-duration = <10>;
phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
index 92629cbdc184..cbf97b621931 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
@@ -187,4 +187,53 @@
#size-cells = <0>;
};
};
+
+ mcu_cpsw: ethernet@46000000 {
+ compatible = "ti,am654-cpsw-nuss";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ reg = <0x0 0x46000000 0x0 0x200000>;
+ reg-names = "cpsw_nuss";
+ ranges = <0x0 0x0 0x0 0x46000000 0x0 0x200000>;
+ dma-coherent;
+ clocks = <&k3_clks 5 10>;
+ clock-names = "fck";
+ power-domains = <&k3_pds 5 TI_SCI_PD_EXCLUSIVE>;
+
+ dmas = <&mcu_udmap 0xf000>,
+ <&mcu_udmap 0xf001>,
+ <&mcu_udmap 0xf002>,
+ <&mcu_udmap 0xf003>,
+ <&mcu_udmap 0xf004>,
+ <&mcu_udmap 0xf005>,
+ <&mcu_udmap 0xf006>,
+ <&mcu_udmap 0xf007>,
+ <&mcu_udmap 0x7000>;
+ dma-names = "tx0", "tx1", "tx2", "tx3",
+ "tx4", "tx5", "tx6", "tx7",
+ "rx";
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpsw_port1: port@1 {
+ reg = <1>;
+ ti,mac-only;
+ label = "port1";
+ ti,syscon-efuse = <&mcu_conf 0x200>;
+ phys = <&phy_gmii_sel 1>;
+ };
+ };
+
+ davinci_mdio: mdio@f00 {
+ compatible = "ti,cpsw-mdio","ti,davinci_mdio";
+ reg = <0x0 0xf00 0x0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&k3_clks 5 10>;
+ clock-names = "fck";
+ bus_freq = <1000000>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-am65.dtsi b/arch/arm64/boot/dts/ti/k3-am65.dtsi
index aea36e29dd32..5be75e430965 100644
--- a/arch/arm64/boot/dts/ti/k3-am65.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65.dtsi
@@ -30,6 +30,7 @@
i2c3 = &main_i2c1;
i2c4 = &main_i2c2;
i2c5 = &main_i2c3;
+ ethernet0 = &cpsw_port1;
};
chosen { };
diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
index 1700996800eb..2f3d3316a1cf 100644
--- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
@@ -7,6 +7,7 @@
#include "k3-am654.dtsi"
#include <dt-bindings/input/input.h>
+#include <dt-bindings/net/ti-dp83867.h>
/ {
compatible = "ti,am654-evm", "ti,am654";
@@ -95,7 +96,30 @@
wkup_pca554_default: wkup_pca554_default {
pinctrl-single,pins = <
AM65X_WKUP_IOPAD(0x0034, PIN_INPUT, 7) /* (T1) MCU_OSPI1_CLK.WKUP_GPIO0_25 */
+ >;
+ };
+
+ mcu_cpsw_pins_default: mcu_cpsw_pins_default {
+ pinctrl-single,pins = <
+ AM65X_WKUP_IOPAD(0x0058, PIN_OUTPUT, 0) /* (N4) MCU_RGMII1_TX_CTL */
+ AM65X_WKUP_IOPAD(0x005c, PIN_INPUT, 0) /* (N5) MCU_RGMII1_RX_CTL */
+ AM65X_WKUP_IOPAD(0x0060, PIN_OUTPUT, 0) /* (M2) MCU_RGMII1_TD3 */
+ AM65X_WKUP_IOPAD(0x0064, PIN_OUTPUT, 0) /* (M3) MCU_RGMII1_TD2 */
+ AM65X_WKUP_IOPAD(0x0068, PIN_OUTPUT, 0) /* (M4) MCU_RGMII1_TD1 */
+ AM65X_WKUP_IOPAD(0x006c, PIN_OUTPUT, 0) /* (M5) MCU_RGMII1_TD0 */
+ AM65X_WKUP_IOPAD(0x0078, PIN_INPUT, 0) /* (L2) MCU_RGMII1_RD3 */
+ AM65X_WKUP_IOPAD(0x007c, PIN_INPUT, 0) /* (L5) MCU_RGMII1_RD2 */
+ AM65X_WKUP_IOPAD(0x0080, PIN_INPUT, 0) /* (M6) MCU_RGMII1_RD1 */
+ AM65X_WKUP_IOPAD(0x0084, PIN_INPUT, 0) /* (L6) MCU_RGMII1_RD0 */
+ AM65X_WKUP_IOPAD(0x0070, PIN_INPUT, 0) /* (N1) MCU_RGMII1_TXC */
+ AM65X_WKUP_IOPAD(0x0074, PIN_INPUT, 0) /* (M1) MCU_RGMII1_RXC */
+ >;
+ };
+ mcu_mdio_pins_default: mcu_mdio1_pins_default {
+ pinctrl-single,pins = <
+ AM65X_WKUP_IOPAD(0x008c, PIN_OUTPUT, 0) /* (L1) MCU_MDIO0_MDC */
+ AM65X_WKUP_IOPAD(0x0088, PIN_INPUT, 0) /* (L4) MCU_MDIO0_MDIO */
>;
};
};
@@ -419,3 +443,21 @@
data-lanes = <1 2>;
};
};
+
+&mcu_cpsw {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_cpsw_pins_default &mcu_mdio_pins_default>;
+};
+
+&davinci_mdio {
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ };
+};
+
+&cpsw_port1 {
+ phy-mode = "rgmii-rxid";
+ phy-handle = <&phy0>;
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
index 7a5c3d4adadd..98e5e17e3ff7 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
@@ -8,6 +8,7 @@
#include "k3-j721e-som-p0.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
+#include <dt-bindings/net/ti-dp83867.h>
/ {
chosen {
@@ -128,6 +129,30 @@
J721E_WKUP_IOPAD(0x38, PIN_INPUT, 0) /* (A23) MCU_OSPI1_LBCLKO */
>;
};
+
+ mcu_cpsw_pins_default: mcu_cpsw_pins_default {
+ pinctrl-single,pins = <
+ J721E_WKUP_IOPAD(0x0058, PIN_OUTPUT, 0) /* MCU_RGMII1_TX_CTL */
+ J721E_WKUP_IOPAD(0x005c, PIN_INPUT, 0) /* MCU_RGMII1_RX_CTL */
+ J721E_WKUP_IOPAD(0x0060, PIN_OUTPUT, 0) /* MCU_RGMII1_TD3 */
+ J721E_WKUP_IOPAD(0x0064, PIN_OUTPUT, 0) /* MCU_RGMII1_TD2 */
+ J721E_WKUP_IOPAD(0x0068, PIN_OUTPUT, 0) /* MCU_RGMII1_TD1 */
+ J721E_WKUP_IOPAD(0x006c, PIN_OUTPUT, 0) /* MCU_RGMII1_TD0 */
+ J721E_WKUP_IOPAD(0x0078, PIN_INPUT, 0) /* MCU_RGMII1_RD3 */
+ J721E_WKUP_IOPAD(0x007c, PIN_INPUT, 0) /* MCU_RGMII1_RD2 */
+ J721E_WKUP_IOPAD(0x0080, PIN_INPUT, 0) /* MCU_RGMII1_RD1 */
+ J721E_WKUP_IOPAD(0x0084, PIN_INPUT, 0) /* MCU_RGMII1_RD0 */
+ J721E_WKUP_IOPAD(0x0070, PIN_INPUT, 0) /* MCU_RGMII1_TXC */
+ J721E_WKUP_IOPAD(0x0074, PIN_INPUT, 0) /* MCU_RGMII1_RXC */
+ >;
+ };
+
+ mcu_mdio_pins_default: mcu_mdio1_pins_default {
+ pinctrl-single,pins = <
+ J721E_WKUP_IOPAD(0x008c, PIN_OUTPUT, 0) /* MCU_MDIO0_MDC */
+ J721E_WKUP_IOPAD(0x0088, PIN_INPUT, 0) /* MCU_MDIO0_MDIO */
+ >;
+ };
};
&wkup_uart0 {
@@ -429,3 +454,21 @@
#gpio-cells = <2>;
};
};
+
+&mcu_cpsw {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_cpsw_pins_default &mcu_mdio_pins_default>;
+};
+
+&davinci_mdio {
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ };
+};
+
+&cpsw_port1 {
+ phy-mode = "rgmii-rxid";
+ phy-handle = <&phy0>;
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
index 16c874bfd49a..bfe91f2a52cb 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
@@ -270,4 +270,53 @@
ti,sci-rm-range-rflow = <0x00>; /* GP RFLOW */
};
};
+
+ mcu_cpsw: ethernet@46000000 {
+ compatible = "ti,j721e-cpsw-nuss";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ reg = <0x0 0x46000000 0x0 0x200000>;
+ reg-names = "cpsw_nuss";
+ ranges = <0x0 0x0 0x0 0x46000000 0x0 0x200000>;
+ dma-coherent;
+ clocks = <&k3_clks 18 22>;
+ clock-names = "fck";
+ power-domains = <&k3_pds 18 TI_SCI_PD_EXCLUSIVE>;
+
+ dmas = <&mcu_udmap 0xf000>,
+ <&mcu_udmap 0xf001>,
+ <&mcu_udmap 0xf002>,
+ <&mcu_udmap 0xf003>,
+ <&mcu_udmap 0xf004>,
+ <&mcu_udmap 0xf005>,
+ <&mcu_udmap 0xf006>,
+ <&mcu_udmap 0xf007>,
+ <&mcu_udmap 0x7000>;
+ dma-names = "tx0", "tx1", "tx2", "tx3",
+ "tx4", "tx5", "tx6", "tx7",
+ "rx";
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpsw_port1: port@1 {
+ reg = <1>;
+ ti,mac-only;
+ label = "port1";
+ ti,syscon-efuse = <&mcu_conf 0x200>;
+ phys = <&phy_gmii_sel 1>;
+ };
+ };
+
+ davinci_mdio: mdio@f00 {
+ compatible = "ti,cpsw-mdio","ti,davinci_mdio";
+ reg = <0x0 0xf00 0x0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&k3_clks 18 22>;
+ clock-names = "fck";
+ bus_freq = <1000000>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e.dtsi b/arch/arm64/boot/dts/ti/k3-j721e.dtsi
index 027bd1febafa..2f9a56d9b114 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e.dtsi
@@ -30,6 +30,7 @@
serial9 = &main_uart7;
serial10 = &main_uart8;
serial11 = &main_uart9;
+ ethernet0 = &cpsw_port1;
};
chosen { };
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index e7573289a66f..a6c0d02d9928 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -284,6 +284,7 @@ CONFIG_SMSC911X=y
CONFIG_SNI_AVE=y
CONFIG_SNI_NETSEC=y
CONFIG_STMMAC_ETH=m
+CONFIG_TI_K3_AM65_CPSW_NUSS=y
CONFIG_MDIO_BUS_MUX_MMIOREG=y
CONFIG_MARVELL_PHY=m
CONFIG_MARVELL_10G_PHY=m
@@ -699,6 +700,8 @@ CONFIG_QCOM_HIDMA_MGMT=y
CONFIG_QCOM_HIDMA=y
CONFIG_RCAR_DMAC=y
CONFIG_RENESAS_USB_DMAC=m
+CONFIG_TI_K3_UDMA=y
+CONFIG_TI_K3_UDMA_GLUE_LAYER=y
CONFIG_VFIO=y
CONFIG_VFIO_PCI=y
CONFIG_VIRTIO_PCI=y
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index a32d478a7f41..b4c89a1acebb 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -303,12 +303,6 @@ SECTIONS
*(.branch_lt)
}
-#ifdef CONFIG_DEBUG_INFO_BTF
- .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) {
- *(.BTF)
- }
-#endif
-
.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
__start_opd = .;
KEEP(*(.opd))
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index cd5db57bfd41..8672e77a5b7a 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -55,7 +55,7 @@ config RISCV
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MMIOWB
select ARCH_HAS_DEBUG_VIRTUAL
- select HAVE_EBPF_JIT if 64BIT
+ select HAVE_EBPF_JIT
select EDAC_SUPPORT
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
diff --git a/arch/riscv/net/Makefile b/arch/riscv/net/Makefile
index ec5b14763316..9a1e5f0a94e5 100644
--- a/arch/riscv/net/Makefile
+++ b/arch/riscv/net/Makefile
@@ -1,2 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o
+
+obj-$(CONFIG_BPF_JIT) += bpf_jit_core.o
+
+ifeq ($(CONFIG_ARCH_RV64I),y)
+ obj-$(CONFIG_BPF_JIT) += bpf_jit_comp64.o
+else
+ obj-$(CONFIG_BPF_JIT) += bpf_jit_comp32.o
+endif
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
new file mode 100644
index 000000000000..20e235d06f66
--- /dev/null
+++ b/arch/riscv/net/bpf_jit.h
@@ -0,0 +1,514 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common functionality for RV32 and RV64 BPF JIT compilers
+ *
+ * Copyright (c) 2019 Björn Töpel <bjorn.topel@gmail.com>
+ *
+ */
+
+#ifndef _BPF_JIT_H
+#define _BPF_JIT_H
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <asm/cacheflush.h>
+
+enum {
+ RV_REG_ZERO = 0, /* The constant value 0 */
+ RV_REG_RA = 1, /* Return address */
+ RV_REG_SP = 2, /* Stack pointer */
+ RV_REG_GP = 3, /* Global pointer */
+ RV_REG_TP = 4, /* Thread pointer */
+ RV_REG_T0 = 5, /* Temporaries */
+ RV_REG_T1 = 6,
+ RV_REG_T2 = 7,
+ RV_REG_FP = 8, /* Saved register/frame pointer */
+ RV_REG_S1 = 9, /* Saved register */
+ RV_REG_A0 = 10, /* Function argument/return values */
+ RV_REG_A1 = 11, /* Function arguments */
+ RV_REG_A2 = 12,
+ RV_REG_A3 = 13,
+ RV_REG_A4 = 14,
+ RV_REG_A5 = 15,
+ RV_REG_A6 = 16,
+ RV_REG_A7 = 17,
+ RV_REG_S2 = 18, /* Saved registers */
+ RV_REG_S3 = 19,
+ RV_REG_S4 = 20,
+ RV_REG_S5 = 21,
+ RV_REG_S6 = 22,
+ RV_REG_S7 = 23,
+ RV_REG_S8 = 24,
+ RV_REG_S9 = 25,
+ RV_REG_S10 = 26,
+ RV_REG_S11 = 27,
+ RV_REG_T3 = 28, /* Temporaries */
+ RV_REG_T4 = 29,
+ RV_REG_T5 = 30,
+ RV_REG_T6 = 31,
+};
+
+struct rv_jit_context {
+ struct bpf_prog *prog;
+ u32 *insns; /* RV insns */
+ int ninsns;
+ int epilogue_offset;
+ int *offset; /* BPF to RV */
+ unsigned long flags;
+ int stack_size;
+};
+
+struct rv_jit_data {
+ struct bpf_binary_header *header;
+ u8 *image;
+ struct rv_jit_context ctx;
+};
+
+static inline void bpf_fill_ill_insns(void *area, unsigned int size)
+{
+ memset(area, 0, size);
+}
+
+static inline void bpf_flush_icache(void *start, void *end)
+{
+ flush_icache_range((unsigned long)start, (unsigned long)end);
+}
+
+static inline void emit(const u32 insn, struct rv_jit_context *ctx)
+{
+ if (ctx->insns)
+ ctx->insns[ctx->ninsns] = insn;
+
+ ctx->ninsns++;
+}
+
+static inline int epilogue_offset(struct rv_jit_context *ctx)
+{
+ int to = ctx->epilogue_offset, from = ctx->ninsns;
+
+ return (to - from) << 2;
+}
+
+/* Return -1 or inverted cond. */
+static inline int invert_bpf_cond(u8 cond)
+{
+ switch (cond) {
+ case BPF_JEQ:
+ return BPF_JNE;
+ case BPF_JGT:
+ return BPF_JLE;
+ case BPF_JLT:
+ return BPF_JGE;
+ case BPF_JGE:
+ return BPF_JLT;
+ case BPF_JLE:
+ return BPF_JGT;
+ case BPF_JNE:
+ return BPF_JEQ;
+ case BPF_JSGT:
+ return BPF_JSLE;
+ case BPF_JSLT:
+ return BPF_JSGE;
+ case BPF_JSGE:
+ return BPF_JSLT;
+ case BPF_JSLE:
+ return BPF_JSGT;
+ }
+ return -1;
+}
+
+static inline bool is_12b_int(long val)
+{
+ return -(1L << 11) <= val && val < (1L << 11);
+}
+
+static inline int is_12b_check(int off, int insn)
+{
+ if (!is_12b_int(off)) {
+ pr_err("bpf-jit: insn=%d 12b < offset=%d not supported yet!\n",
+ insn, (int)off);
+ return -1;
+ }
+ return 0;
+}
+
+static inline bool is_13b_int(long val)
+{
+ return -(1L << 12) <= val && val < (1L << 12);
+}
+
+static inline bool is_21b_int(long val)
+{
+ return -(1L << 20) <= val && val < (1L << 20);
+}
+
+static inline int rv_offset(int insn, int off, struct rv_jit_context *ctx)
+{
+ int from, to;
+
+ off++; /* BPF branch is from PC+1, RV is from PC */
+ from = (insn > 0) ? ctx->offset[insn - 1] : 0;
+ to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0;
+ return (to - from) << 2;
+}
+
+/* Instruction formats. */
+
+static inline u32 rv_r_insn(u8 funct7, u8 rs2, u8 rs1, u8 funct3, u8 rd,
+ u8 opcode)
+{
+ return (funct7 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
+ (rd << 7) | opcode;
+}
+
+static inline u32 rv_i_insn(u16 imm11_0, u8 rs1, u8 funct3, u8 rd, u8 opcode)
+{
+ return (imm11_0 << 20) | (rs1 << 15) | (funct3 << 12) | (rd << 7) |
+ opcode;
+}
+
+static inline u32 rv_s_insn(u16 imm11_0, u8 rs2, u8 rs1, u8 funct3, u8 opcode)
+{
+ u8 imm11_5 = imm11_0 >> 5, imm4_0 = imm11_0 & 0x1f;
+
+ return (imm11_5 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
+ (imm4_0 << 7) | opcode;
+}
+
+static inline u32 rv_b_insn(u16 imm12_1, u8 rs2, u8 rs1, u8 funct3, u8 opcode)
+{
+ u8 imm12 = ((imm12_1 & 0x800) >> 5) | ((imm12_1 & 0x3f0) >> 4);
+ u8 imm4_1 = ((imm12_1 & 0xf) << 1) | ((imm12_1 & 0x400) >> 10);
+
+ return (imm12 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
+ (imm4_1 << 7) | opcode;
+}
+
+static inline u32 rv_u_insn(u32 imm31_12, u8 rd, u8 opcode)
+{
+ return (imm31_12 << 12) | (rd << 7) | opcode;
+}
+
+static inline u32 rv_j_insn(u32 imm20_1, u8 rd, u8 opcode)
+{
+ u32 imm;
+
+ imm = (imm20_1 & 0x80000) | ((imm20_1 & 0x3ff) << 9) |
+ ((imm20_1 & 0x400) >> 2) | ((imm20_1 & 0x7f800) >> 11);
+
+ return (imm << 12) | (rd << 7) | opcode;
+}
+
+static inline u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1,
+ u8 funct3, u8 rd, u8 opcode)
+{
+ u8 funct7 = (funct5 << 2) | (aq << 1) | rl;
+
+ return rv_r_insn(funct7, rs2, rs1, funct3, rd, opcode);
+}
+
+/* Instructions shared by both RV32 and RV64. */
+
+static inline u32 rv_addi(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 0, rd, 0x13);
+}
+
+static inline u32 rv_andi(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 7, rd, 0x13);
+}
+
+static inline u32 rv_ori(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 6, rd, 0x13);
+}
+
+static inline u32 rv_xori(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 4, rd, 0x13);
+}
+
+static inline u32 rv_slli(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 1, rd, 0x13);
+}
+
+static inline u32 rv_srli(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 5, rd, 0x13);
+}
+
+static inline u32 rv_srai(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x13);
+}
+
+static inline u32 rv_lui(u8 rd, u32 imm31_12)
+{
+ return rv_u_insn(imm31_12, rd, 0x37);
+}
+
+static inline u32 rv_auipc(u8 rd, u32 imm31_12)
+{
+ return rv_u_insn(imm31_12, rd, 0x17);
+}
+
+static inline u32 rv_add(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 0, rd, 0x33);
+}
+
+static inline u32 rv_sub(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x33);
+}
+
+static inline u32 rv_sltu(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 3, rd, 0x33);
+}
+
+static inline u32 rv_and(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 7, rd, 0x33);
+}
+
+static inline u32 rv_or(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 6, rd, 0x33);
+}
+
+static inline u32 rv_xor(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 4, rd, 0x33);
+}
+
+static inline u32 rv_sll(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 1, rd, 0x33);
+}
+
+static inline u32 rv_srl(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 5, rd, 0x33);
+}
+
+static inline u32 rv_sra(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x33);
+}
+
+static inline u32 rv_mul(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 0, rd, 0x33);
+}
+
+static inline u32 rv_mulhu(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 3, rd, 0x33);
+}
+
+static inline u32 rv_divu(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 5, rd, 0x33);
+}
+
+static inline u32 rv_remu(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 7, rd, 0x33);
+}
+
+static inline u32 rv_jal(u8 rd, u32 imm20_1)
+{
+ return rv_j_insn(imm20_1, rd, 0x6f);
+}
+
+static inline u32 rv_jalr(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 0, rd, 0x67);
+}
+
+static inline u32 rv_beq(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_b_insn(imm12_1, rs2, rs1, 0, 0x63);
+}
+
+static inline u32 rv_bne(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_b_insn(imm12_1, rs2, rs1, 1, 0x63);
+}
+
+static inline u32 rv_bltu(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_b_insn(imm12_1, rs2, rs1, 6, 0x63);
+}
+
+static inline u32 rv_bgtu(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_bltu(rs2, rs1, imm12_1);
+}
+
+static inline u32 rv_bgeu(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_b_insn(imm12_1, rs2, rs1, 7, 0x63);
+}
+
+static inline u32 rv_bleu(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_bgeu(rs2, rs1, imm12_1);
+}
+
+static inline u32 rv_blt(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_b_insn(imm12_1, rs2, rs1, 4, 0x63);
+}
+
+static inline u32 rv_bgt(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_blt(rs2, rs1, imm12_1);
+}
+
+static inline u32 rv_bge(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_b_insn(imm12_1, rs2, rs1, 5, 0x63);
+}
+
+static inline u32 rv_ble(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_bge(rs2, rs1, imm12_1);
+}
+
+static inline u32 rv_lw(u8 rd, u16 imm11_0, u8 rs1)
+{
+ return rv_i_insn(imm11_0, rs1, 2, rd, 0x03);
+}
+
+static inline u32 rv_lbu(u8 rd, u16 imm11_0, u8 rs1)
+{
+ return rv_i_insn(imm11_0, rs1, 4, rd, 0x03);
+}
+
+static inline u32 rv_lhu(u8 rd, u16 imm11_0, u8 rs1)
+{
+ return rv_i_insn(imm11_0, rs1, 5, rd, 0x03);
+}
+
+static inline u32 rv_sb(u8 rs1, u16 imm11_0, u8 rs2)
+{
+ return rv_s_insn(imm11_0, rs2, rs1, 0, 0x23);
+}
+
+static inline u32 rv_sh(u8 rs1, u16 imm11_0, u8 rs2)
+{
+ return rv_s_insn(imm11_0, rs2, rs1, 1, 0x23);
+}
+
+static inline u32 rv_sw(u8 rs1, u16 imm11_0, u8 rs2)
+{
+ return rv_s_insn(imm11_0, rs2, rs1, 2, 0x23);
+}
+
+static inline u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f);
+}
+
+/*
+ * RV64-only instructions.
+ *
+ * These instructions are not available on RV32. Wrap them below a #if to
+ * ensure that the RV32 JIT doesn't emit any of these instructions.
+ */
+
+#if __riscv_xlen == 64
+
+static inline u32 rv_addiw(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 0, rd, 0x1b);
+}
+
+static inline u32 rv_slliw(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 1, rd, 0x1b);
+}
+
+static inline u32 rv_srliw(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 5, rd, 0x1b);
+}
+
+static inline u32 rv_sraiw(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x1b);
+}
+
+static inline u32 rv_addw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 0, rd, 0x3b);
+}
+
+static inline u32 rv_subw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x3b);
+}
+
+static inline u32 rv_sllw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 1, rd, 0x3b);
+}
+
+static inline u32 rv_srlw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 5, rd, 0x3b);
+}
+
+static inline u32 rv_sraw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x3b);
+}
+
+static inline u32 rv_mulw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 0, rd, 0x3b);
+}
+
+static inline u32 rv_divuw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 5, rd, 0x3b);
+}
+
+static inline u32 rv_remuw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 7, rd, 0x3b);
+}
+
+static inline u32 rv_ld(u8 rd, u16 imm11_0, u8 rs1)
+{
+ return rv_i_insn(imm11_0, rs1, 3, rd, 0x03);
+}
+
+static inline u32 rv_lwu(u8 rd, u16 imm11_0, u8 rs1)
+{
+ return rv_i_insn(imm11_0, rs1, 6, rd, 0x03);
+}
+
+static inline u32 rv_sd(u8 rs1, u16 imm11_0, u8 rs2)
+{
+ return rv_s_insn(imm11_0, rs2, rs1, 3, 0x23);
+}
+
+static inline u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f);
+}
+
+#endif /* __riscv_xlen == 64 */
+
+void bpf_jit_build_prologue(struct rv_jit_context *ctx);
+void bpf_jit_build_epilogue(struct rv_jit_context *ctx);
+
+int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
+ bool extra_pass);
+
+#endif /* _BPF_JIT_H */
diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c
new file mode 100644
index 000000000000..302934177760
--- /dev/null
+++ b/arch/riscv/net/bpf_jit_comp32.c
@@ -0,0 +1,1310 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BPF JIT compiler for RV32G
+ *
+ * Copyright (c) 2020 Luke Nelson <luke.r.nels@gmail.com>
+ * Copyright (c) 2020 Xi Wang <xi.wang@gmail.com>
+ *
+ * The code is based on the BPF JIT compiler for RV64G by Björn Töpel and
+ * the BPF JIT compiler for 32-bit ARM by Shubham Bansal and Mircea Gherzan.
+ */
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include "bpf_jit.h"
+
+enum {
+ /* Stack layout - these are offsets from (top of stack - 4). */
+ BPF_R6_HI,
+ BPF_R6_LO,
+ BPF_R7_HI,
+ BPF_R7_LO,
+ BPF_R8_HI,
+ BPF_R8_LO,
+ BPF_R9_HI,
+ BPF_R9_LO,
+ BPF_AX_HI,
+ BPF_AX_LO,
+ /* Stack space for BPF_REG_6 through BPF_REG_9 and BPF_REG_AX. */
+ BPF_JIT_SCRATCH_REGS,
+};
+
+#define STACK_OFFSET(k) (-4 - ((k) * 4))
+
+#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
+#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
+
+#define RV_REG_TCC RV_REG_T6
+#define RV_REG_TCC_SAVED RV_REG_S7
+
+static const s8 bpf2rv32[][2] = {
+ /* Return value from in-kernel function, and exit value from eBPF. */
+ [BPF_REG_0] = {RV_REG_S2, RV_REG_S1},
+ /* Arguments from eBPF program to in-kernel function. */
+ [BPF_REG_1] = {RV_REG_A1, RV_REG_A0},
+ [BPF_REG_2] = {RV_REG_A3, RV_REG_A2},
+ [BPF_REG_3] = {RV_REG_A5, RV_REG_A4},
+ [BPF_REG_4] = {RV_REG_A7, RV_REG_A6},
+ [BPF_REG_5] = {RV_REG_S4, RV_REG_S3},
+ /*
+ * Callee-saved registers that in-kernel function will preserve.
+ * Stored on the stack.
+ */
+ [BPF_REG_6] = {STACK_OFFSET(BPF_R6_HI), STACK_OFFSET(BPF_R6_LO)},
+ [BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)},
+ [BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)},
+ [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)},
+ /* Read-only frame pointer to access BPF stack. */
+ [BPF_REG_FP] = {RV_REG_S6, RV_REG_S5},
+ /* Temporary register for blinding constants. Stored on the stack. */
+ [BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)},
+ /*
+ * Temporary registers used by the JIT to operate on registers stored
+ * on the stack. Save t0 and t1 to be used as temporaries in generated
+ * code.
+ */
+ [TMP_REG_1] = {RV_REG_T3, RV_REG_T2},
+ [TMP_REG_2] = {RV_REG_T5, RV_REG_T4},
+};
+
+static s8 hi(const s8 *r)
+{
+ return r[0];
+}
+
+static s8 lo(const s8 *r)
+{
+ return r[1];
+}
+
+static void emit_imm(const s8 rd, s32 imm, struct rv_jit_context *ctx)
+{
+ u32 upper = (imm + (1 << 11)) >> 12;
+ u32 lower = imm & 0xfff;
+
+ if (upper) {
+ emit(rv_lui(rd, upper), ctx);
+ emit(rv_addi(rd, rd, lower), ctx);
+ } else {
+ emit(rv_addi(rd, RV_REG_ZERO, lower), ctx);
+ }
+}
+
+static void emit_imm32(const s8 *rd, s32 imm, struct rv_jit_context *ctx)
+{
+ /* Emit immediate into lower bits. */
+ emit_imm(lo(rd), imm, ctx);
+
+ /* Sign-extend into upper bits. */
+ if (imm >= 0)
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ else
+ emit(rv_addi(hi(rd), RV_REG_ZERO, -1), ctx);
+}
+
+static void emit_imm64(const s8 *rd, s32 imm_hi, s32 imm_lo,
+ struct rv_jit_context *ctx)
+{
+ emit_imm(lo(rd), imm_lo, ctx);
+ emit_imm(hi(rd), imm_hi, ctx);
+}
+
+static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
+{
+ int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 4;
+ const s8 *r0 = bpf2rv32[BPF_REG_0];
+
+ store_offset -= 4 * BPF_JIT_SCRATCH_REGS;
+
+ /* Set return value if not tail call. */
+ if (!is_tail_call) {
+ emit(rv_addi(RV_REG_A0, lo(r0), 0), ctx);
+ emit(rv_addi(RV_REG_A1, hi(r0), 0), ctx);
+ }
+
+ /* Restore callee-saved registers. */
+ emit(rv_lw(RV_REG_RA, store_offset - 0, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_FP, store_offset - 4, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S1, store_offset - 8, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S2, store_offset - 12, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S3, store_offset - 16, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S4, store_offset - 20, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S5, store_offset - 24, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S6, store_offset - 28, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S7, store_offset - 32, RV_REG_SP), ctx);
+
+ emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx);
+
+ if (is_tail_call) {
+ /*
+ * goto *(t0 + 4);
+ * Skips first instruction of prologue which initializes tail
+ * call counter. Assumes t0 contains address of target program,
+ * see emit_bpf_tail_call.
+ */
+ emit(rv_jalr(RV_REG_ZERO, RV_REG_T0, 4), ctx);
+ } else {
+ emit(rv_jalr(RV_REG_ZERO, RV_REG_RA, 0), ctx);
+ }
+}
+
+static bool is_stacked(s8 reg)
+{
+ return reg < 0;
+}
+
+static const s8 *bpf_get_reg64(const s8 *reg, const s8 *tmp,
+ struct rv_jit_context *ctx)
+{
+ if (is_stacked(hi(reg))) {
+ emit(rv_lw(hi(tmp), hi(reg), RV_REG_FP), ctx);
+ emit(rv_lw(lo(tmp), lo(reg), RV_REG_FP), ctx);
+ reg = tmp;
+ }
+ return reg;
+}
+
+static void bpf_put_reg64(const s8 *reg, const s8 *src,
+ struct rv_jit_context *ctx)
+{
+ if (is_stacked(hi(reg))) {
+ emit(rv_sw(RV_REG_FP, hi(reg), hi(src)), ctx);
+ emit(rv_sw(RV_REG_FP, lo(reg), lo(src)), ctx);
+ }
+}
+
+static const s8 *bpf_get_reg32(const s8 *reg, const s8 *tmp,
+ struct rv_jit_context *ctx)
+{
+ if (is_stacked(lo(reg))) {
+ emit(rv_lw(lo(tmp), lo(reg), RV_REG_FP), ctx);
+ reg = tmp;
+ }
+ return reg;
+}
+
+static void bpf_put_reg32(const s8 *reg, const s8 *src,
+ struct rv_jit_context *ctx)
+{
+ if (is_stacked(lo(reg))) {
+ emit(rv_sw(RV_REG_FP, lo(reg), lo(src)), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit(rv_sw(RV_REG_FP, hi(reg), RV_REG_ZERO), ctx);
+ } else if (!ctx->prog->aux->verifier_zext) {
+ emit(rv_addi(hi(reg), RV_REG_ZERO, 0), ctx);
+ }
+}
+
+static void emit_jump_and_link(u8 rd, s32 rvoff, bool force_jalr,
+ struct rv_jit_context *ctx)
+{
+ s32 upper, lower;
+
+ if (rvoff && is_21b_int(rvoff) && !force_jalr) {
+ emit(rv_jal(rd, rvoff >> 1), ctx);
+ return;
+ }
+
+ upper = (rvoff + (1 << 11)) >> 12;
+ lower = rvoff & 0xfff;
+ emit(rv_auipc(RV_REG_T1, upper), ctx);
+ emit(rv_jalr(rd, RV_REG_T1, lower), ctx);
+}
+
+static void emit_alu_i64(const s8 *dst, s32 imm,
+ struct rv_jit_context *ctx, const u8 op)
+{
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+
+ switch (op) {
+ case BPF_MOV:
+ emit_imm32(rd, imm, ctx);
+ break;
+ case BPF_AND:
+ if (is_12b_int(imm)) {
+ emit(rv_andi(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_and(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ if (imm >= 0)
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ break;
+ case BPF_OR:
+ if (is_12b_int(imm)) {
+ emit(rv_ori(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_or(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ if (imm < 0)
+ emit(rv_ori(hi(rd), RV_REG_ZERO, -1), ctx);
+ break;
+ case BPF_XOR:
+ if (is_12b_int(imm)) {
+ emit(rv_xori(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_xor(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ if (imm < 0)
+ emit(rv_xori(hi(rd), hi(rd), -1), ctx);
+ break;
+ case BPF_LSH:
+ if (imm >= 32) {
+ emit(rv_slli(hi(rd), lo(rd), imm - 32), ctx);
+ emit(rv_addi(lo(rd), RV_REG_ZERO, 0), ctx);
+ } else if (imm == 0) {
+ /* Do nothing. */
+ } else {
+ emit(rv_srli(RV_REG_T0, lo(rd), 32 - imm), ctx);
+ emit(rv_slli(hi(rd), hi(rd), imm), ctx);
+ emit(rv_or(hi(rd), RV_REG_T0, hi(rd)), ctx);
+ emit(rv_slli(lo(rd), lo(rd), imm), ctx);
+ }
+ break;
+ case BPF_RSH:
+ if (imm >= 32) {
+ emit(rv_srli(lo(rd), hi(rd), imm - 32), ctx);
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ } else if (imm == 0) {
+ /* Do nothing. */
+ } else {
+ emit(rv_slli(RV_REG_T0, hi(rd), 32 - imm), ctx);
+ emit(rv_srli(lo(rd), lo(rd), imm), ctx);
+ emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx);
+ emit(rv_srli(hi(rd), hi(rd), imm), ctx);
+ }
+ break;
+ case BPF_ARSH:
+ if (imm >= 32) {
+ emit(rv_srai(lo(rd), hi(rd), imm - 32), ctx);
+ emit(rv_srai(hi(rd), hi(rd), 31), ctx);
+ } else if (imm == 0) {
+ /* Do nothing. */
+ } else {
+ emit(rv_slli(RV_REG_T0, hi(rd), 32 - imm), ctx);
+ emit(rv_srli(lo(rd), lo(rd), imm), ctx);
+ emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx);
+ emit(rv_srai(hi(rd), hi(rd), imm), ctx);
+ }
+ break;
+ }
+
+ bpf_put_reg64(dst, rd, ctx);
+}
+
+static void emit_alu_i32(const s8 *dst, s32 imm,
+ struct rv_jit_context *ctx, const u8 op)
+{
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *rd = bpf_get_reg32(dst, tmp1, ctx);
+
+ switch (op) {
+ case BPF_MOV:
+ emit_imm(lo(rd), imm, ctx);
+ break;
+ case BPF_ADD:
+ if (is_12b_int(imm)) {
+ emit(rv_addi(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_add(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ break;
+ case BPF_SUB:
+ if (is_12b_int(-imm)) {
+ emit(rv_addi(lo(rd), lo(rd), -imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_sub(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ break;
+ case BPF_AND:
+ if (is_12b_int(imm)) {
+ emit(rv_andi(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_and(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ break;
+ case BPF_OR:
+ if (is_12b_int(imm)) {
+ emit(rv_ori(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_or(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ break;
+ case BPF_XOR:
+ if (is_12b_int(imm)) {
+ emit(rv_xori(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_xor(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ break;
+ case BPF_LSH:
+ if (is_12b_int(imm)) {
+ emit(rv_slli(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_sll(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ break;
+ case BPF_RSH:
+ if (is_12b_int(imm)) {
+ emit(rv_srli(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_srl(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ break;
+ case BPF_ARSH:
+ if (is_12b_int(imm)) {
+ emit(rv_srai(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_sra(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ break;
+ }
+
+ bpf_put_reg32(dst, rd, ctx);
+}
+
+static void emit_alu_r64(const s8 *dst, const s8 *src,
+ struct rv_jit_context *ctx, const u8 op)
+{
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *tmp2 = bpf2rv32[TMP_REG_2];
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+ const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
+
+ switch (op) {
+ case BPF_MOV:
+ emit(rv_addi(lo(rd), lo(rs), 0), ctx);
+ emit(rv_addi(hi(rd), hi(rs), 0), ctx);
+ break;
+ case BPF_ADD:
+ if (rd == rs) {
+ emit(rv_srli(RV_REG_T0, lo(rd), 31), ctx);
+ emit(rv_slli(hi(rd), hi(rd), 1), ctx);
+ emit(rv_or(hi(rd), RV_REG_T0, hi(rd)), ctx);
+ emit(rv_slli(lo(rd), lo(rd), 1), ctx);
+ } else {
+ emit(rv_add(lo(rd), lo(rd), lo(rs)), ctx);
+ emit(rv_sltu(RV_REG_T0, lo(rd), lo(rs)), ctx);
+ emit(rv_add(hi(rd), hi(rd), hi(rs)), ctx);
+ emit(rv_add(hi(rd), hi(rd), RV_REG_T0), ctx);
+ }
+ break;
+ case BPF_SUB:
+ emit(rv_sub(RV_REG_T1, hi(rd), hi(rs)), ctx);
+ emit(rv_sltu(RV_REG_T0, lo(rd), lo(rs)), ctx);
+ emit(rv_sub(hi(rd), RV_REG_T1, RV_REG_T0), ctx);
+ emit(rv_sub(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_AND:
+ emit(rv_and(lo(rd), lo(rd), lo(rs)), ctx);
+ emit(rv_and(hi(rd), hi(rd), hi(rs)), ctx);
+ break;
+ case BPF_OR:
+ emit(rv_or(lo(rd), lo(rd), lo(rs)), ctx);
+ emit(rv_or(hi(rd), hi(rd), hi(rs)), ctx);
+ break;
+ case BPF_XOR:
+ emit(rv_xor(lo(rd), lo(rd), lo(rs)), ctx);
+ emit(rv_xor(hi(rd), hi(rd), hi(rs)), ctx);
+ break;
+ case BPF_MUL:
+ emit(rv_mul(RV_REG_T0, hi(rs), lo(rd)), ctx);
+ emit(rv_mul(hi(rd), hi(rd), lo(rs)), ctx);
+ emit(rv_mulhu(RV_REG_T1, lo(rd), lo(rs)), ctx);
+ emit(rv_add(hi(rd), hi(rd), RV_REG_T0), ctx);
+ emit(rv_mul(lo(rd), lo(rd), lo(rs)), ctx);
+ emit(rv_add(hi(rd), hi(rd), RV_REG_T1), ctx);
+ break;
+ case BPF_LSH:
+ emit(rv_addi(RV_REG_T0, lo(rs), -32), ctx);
+ emit(rv_blt(RV_REG_T0, RV_REG_ZERO, 8), ctx);
+ emit(rv_sll(hi(rd), lo(rd), RV_REG_T0), ctx);
+ emit(rv_addi(lo(rd), RV_REG_ZERO, 0), ctx);
+ emit(rv_jal(RV_REG_ZERO, 16), ctx);
+ emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 31), ctx);
+ emit(rv_srli(RV_REG_T0, lo(rd), 1), ctx);
+ emit(rv_sub(RV_REG_T1, RV_REG_T1, lo(rs)), ctx);
+ emit(rv_srl(RV_REG_T0, RV_REG_T0, RV_REG_T1), ctx);
+ emit(rv_sll(hi(rd), hi(rd), lo(rs)), ctx);
+ emit(rv_or(hi(rd), RV_REG_T0, hi(rd)), ctx);
+ emit(rv_sll(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_RSH:
+ emit(rv_addi(RV_REG_T0, lo(rs), -32), ctx);
+ emit(rv_blt(RV_REG_T0, RV_REG_ZERO, 8), ctx);
+ emit(rv_srl(lo(rd), hi(rd), RV_REG_T0), ctx);
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ emit(rv_jal(RV_REG_ZERO, 16), ctx);
+ emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 31), ctx);
+ emit(rv_slli(RV_REG_T0, hi(rd), 1), ctx);
+ emit(rv_sub(RV_REG_T1, RV_REG_T1, lo(rs)), ctx);
+ emit(rv_sll(RV_REG_T0, RV_REG_T0, RV_REG_T1), ctx);
+ emit(rv_srl(lo(rd), lo(rd), lo(rs)), ctx);
+ emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx);
+ emit(rv_srl(hi(rd), hi(rd), lo(rs)), ctx);
+ break;
+ case BPF_ARSH:
+ emit(rv_addi(RV_REG_T0, lo(rs), -32), ctx);
+ emit(rv_blt(RV_REG_T0, RV_REG_ZERO, 8), ctx);
+ emit(rv_sra(lo(rd), hi(rd), RV_REG_T0), ctx);
+ emit(rv_srai(hi(rd), hi(rd), 31), ctx);
+ emit(rv_jal(RV_REG_ZERO, 16), ctx);
+ emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 31), ctx);
+ emit(rv_slli(RV_REG_T0, hi(rd), 1), ctx);
+ emit(rv_sub(RV_REG_T1, RV_REG_T1, lo(rs)), ctx);
+ emit(rv_sll(RV_REG_T0, RV_REG_T0, RV_REG_T1), ctx);
+ emit(rv_srl(lo(rd), lo(rd), lo(rs)), ctx);
+ emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx);
+ emit(rv_sra(hi(rd), hi(rd), lo(rs)), ctx);
+ break;
+ case BPF_NEG:
+ emit(rv_sub(lo(rd), RV_REG_ZERO, lo(rd)), ctx);
+ emit(rv_sltu(RV_REG_T0, RV_REG_ZERO, lo(rd)), ctx);
+ emit(rv_sub(hi(rd), RV_REG_ZERO, hi(rd)), ctx);
+ emit(rv_sub(hi(rd), hi(rd), RV_REG_T0), ctx);
+ break;
+ }
+
+ bpf_put_reg64(dst, rd, ctx);
+}
+
+static void emit_alu_r32(const s8 *dst, const s8 *src,
+ struct rv_jit_context *ctx, const u8 op)
+{
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *tmp2 = bpf2rv32[TMP_REG_2];
+ const s8 *rd = bpf_get_reg32(dst, tmp1, ctx);
+ const s8 *rs = bpf_get_reg32(src, tmp2, ctx);
+
+ switch (op) {
+ case BPF_MOV:
+ emit(rv_addi(lo(rd), lo(rs), 0), ctx);
+ break;
+ case BPF_ADD:
+ emit(rv_add(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_SUB:
+ emit(rv_sub(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_AND:
+ emit(rv_and(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_OR:
+ emit(rv_or(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_XOR:
+ emit(rv_xor(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_MUL:
+ emit(rv_mul(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_DIV:
+ emit(rv_divu(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_MOD:
+ emit(rv_remu(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_LSH:
+ emit(rv_sll(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_RSH:
+ emit(rv_srl(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_ARSH:
+ emit(rv_sra(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_NEG:
+ emit(rv_sub(lo(rd), RV_REG_ZERO, lo(rd)), ctx);
+ break;
+ }
+
+ bpf_put_reg32(dst, rd, ctx);
+}
+
+static int emit_branch_r64(const s8 *src1, const s8 *src2, s32 rvoff,
+ struct rv_jit_context *ctx, const u8 op)
+{
+ int e, s = ctx->ninsns;
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *tmp2 = bpf2rv32[TMP_REG_2];
+
+ const s8 *rs1 = bpf_get_reg64(src1, tmp1, ctx);
+ const s8 *rs2 = bpf_get_reg64(src2, tmp2, ctx);
+
+ /*
+ * NO_JUMP skips over the rest of the instructions and the
+ * emit_jump_and_link, meaning the BPF branch is not taken.
+ * JUMP skips directly to the emit_jump_and_link, meaning
+ * the BPF branch is taken.
+ *
+ * The fallthrough case results in the BPF branch being taken.
+ */
+#define NO_JUMP(idx) (6 + (2 * (idx)))
+#define JUMP(idx) (2 + (2 * (idx)))
+
+ switch (op) {
+ case BPF_JEQ:
+ emit(rv_bne(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bne(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JGT:
+ emit(rv_bgtu(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(rv_bltu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bleu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JLT:
+ emit(rv_bltu(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(rv_bgtu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bgeu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JGE:
+ emit(rv_bgtu(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(rv_bltu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bltu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JLE:
+ emit(rv_bltu(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(rv_bgtu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bgtu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JNE:
+ emit(rv_bne(hi(rs1), hi(rs2), JUMP(1)), ctx);
+ emit(rv_beq(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JSGT:
+ emit(rv_bgt(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(rv_blt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bleu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JSLT:
+ emit(rv_blt(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(rv_bgt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bgeu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JSGE:
+ emit(rv_bgt(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(rv_blt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bltu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JSLE:
+ emit(rv_blt(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(rv_bgt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bgtu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JSET:
+ emit(rv_and(RV_REG_T0, hi(rs1), hi(rs2)), ctx);
+ emit(rv_bne(RV_REG_T0, RV_REG_ZERO, JUMP(2)), ctx);
+ emit(rv_and(RV_REG_T0, lo(rs1), lo(rs2)), ctx);
+ emit(rv_beq(RV_REG_T0, RV_REG_ZERO, NO_JUMP(0)), ctx);
+ break;
+ }
+
+#undef NO_JUMP
+#undef JUMP
+
+ e = ctx->ninsns;
+ /* Adjust for extra insns. */
+ rvoff -= (e - s) << 2;
+ emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
+ return 0;
+}
+
+static int emit_bcc(u8 op, u8 rd, u8 rs, int rvoff, struct rv_jit_context *ctx)
+{
+ int e, s = ctx->ninsns;
+ bool far = false;
+ int off;
+
+ if (op == BPF_JSET) {
+ /*
+ * BPF_JSET is a special case: it has no inverse so we always
+ * treat it as a far branch.
+ */
+ far = true;
+ } else if (!is_13b_int(rvoff)) {
+ op = invert_bpf_cond(op);
+ far = true;
+ }
+
+ /*
+ * For a far branch, the condition is negated and we jump over the
+ * branch itself, and the two instructions from emit_jump_and_link.
+ * For a near branch, just use rvoff.
+ */
+ off = far ? 6 : (rvoff >> 1);
+
+ switch (op) {
+ case BPF_JEQ:
+ emit(rv_beq(rd, rs, off), ctx);
+ break;
+ case BPF_JGT:
+ emit(rv_bgtu(rd, rs, off), ctx);
+ break;
+ case BPF_JLT:
+ emit(rv_bltu(rd, rs, off), ctx);
+ break;
+ case BPF_JGE:
+ emit(rv_bgeu(rd, rs, off), ctx);
+ break;
+ case BPF_JLE:
+ emit(rv_bleu(rd, rs, off), ctx);
+ break;
+ case BPF_JNE:
+ emit(rv_bne(rd, rs, off), ctx);
+ break;
+ case BPF_JSGT:
+ emit(rv_bgt(rd, rs, off), ctx);
+ break;
+ case BPF_JSLT:
+ emit(rv_blt(rd, rs, off), ctx);
+ break;
+ case BPF_JSGE:
+ emit(rv_bge(rd, rs, off), ctx);
+ break;
+ case BPF_JSLE:
+ emit(rv_ble(rd, rs, off), ctx);
+ break;
+ case BPF_JSET:
+ emit(rv_and(RV_REG_T0, rd, rs), ctx);
+ emit(rv_beq(RV_REG_T0, RV_REG_ZERO, off), ctx);
+ break;
+ }
+
+ if (far) {
+ e = ctx->ninsns;
+ /* Adjust for extra insns. */
+ rvoff -= (e - s) << 2;
+ emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
+ }
+ return 0;
+}
+
+static int emit_branch_r32(const s8 *src1, const s8 *src2, s32 rvoff,
+ struct rv_jit_context *ctx, const u8 op)
+{
+ int e, s = ctx->ninsns;
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *tmp2 = bpf2rv32[TMP_REG_2];
+
+ const s8 *rs1 = bpf_get_reg32(src1, tmp1, ctx);
+ const s8 *rs2 = bpf_get_reg32(src2, tmp2, ctx);
+
+ e = ctx->ninsns;
+ /* Adjust for extra insns. */
+ rvoff -= (e - s) << 2;
+
+ if (emit_bcc(op, lo(rs1), lo(rs2), rvoff, ctx))
+ return -1;
+
+ return 0;
+}
+
+static void emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
+{
+ const s8 *r0 = bpf2rv32[BPF_REG_0];
+ const s8 *r5 = bpf2rv32[BPF_REG_5];
+ u32 upper = ((u32)addr + (1 << 11)) >> 12;
+ u32 lower = addr & 0xfff;
+
+ /* R1-R4 already in correct registers---need to push R5 to stack. */
+ emit(rv_addi(RV_REG_SP, RV_REG_SP, -16), ctx);
+ emit(rv_sw(RV_REG_SP, 0, lo(r5)), ctx);
+ emit(rv_sw(RV_REG_SP, 4, hi(r5)), ctx);
+
+ /* Backup TCC. */
+ emit(rv_addi(RV_REG_TCC_SAVED, RV_REG_TCC, 0), ctx);
+
+ /*
+ * Use lui/jalr pair to jump to absolute address. Don't use emit_imm as
+ * the number of emitted instructions should not depend on the value of
+ * addr.
+ */
+ emit(rv_lui(RV_REG_T1, upper), ctx);
+ emit(rv_jalr(RV_REG_RA, RV_REG_T1, lower), ctx);
+
+ /* Restore TCC. */
+ emit(rv_addi(RV_REG_TCC, RV_REG_TCC_SAVED, 0), ctx);
+
+ /* Set return value and restore stack. */
+ emit(rv_addi(lo(r0), RV_REG_A0, 0), ctx);
+ emit(rv_addi(hi(r0), RV_REG_A1, 0), ctx);
+ emit(rv_addi(RV_REG_SP, RV_REG_SP, 16), ctx);
+}
+
+static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
+{
+ /*
+ * R1 -> &ctx
+ * R2 -> &array
+ * R3 -> index
+ */
+ int tc_ninsn, off, start_insn = ctx->ninsns;
+ const s8 *arr_reg = bpf2rv32[BPF_REG_2];
+ const s8 *idx_reg = bpf2rv32[BPF_REG_3];
+
+ tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] :
+ ctx->offset[0];
+
+ /* max_entries = array->map.max_entries; */
+ off = offsetof(struct bpf_array, map.max_entries);
+ if (is_12b_check(off, insn))
+ return -1;
+ emit(rv_lw(RV_REG_T1, off, lo(arr_reg)), ctx);
+
+ /*
+ * if (index >= max_entries)
+ * goto out;
+ */
+ off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+ emit_bcc(BPF_JGE, lo(idx_reg), RV_REG_T1, off, ctx);
+
+ /*
+ * if ((temp_tcc = tcc - 1) < 0)
+ * goto out;
+ */
+ emit(rv_addi(RV_REG_T1, RV_REG_TCC, -1), ctx);
+ off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+ emit_bcc(BPF_JSLT, RV_REG_T1, RV_REG_ZERO, off, ctx);
+
+ /*
+ * prog = array->ptrs[index];
+ * if (!prog)
+ * goto out;
+ */
+ emit(rv_slli(RV_REG_T0, lo(idx_reg), 2), ctx);
+ emit(rv_add(RV_REG_T0, RV_REG_T0, lo(arr_reg)), ctx);
+ off = offsetof(struct bpf_array, ptrs);
+ if (is_12b_check(off, insn))
+ return -1;
+ emit(rv_lw(RV_REG_T0, off, RV_REG_T0), ctx);
+ off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+ emit_bcc(BPF_JEQ, RV_REG_T0, RV_REG_ZERO, off, ctx);
+
+ /*
+ * tcc = temp_tcc;
+ * goto *(prog->bpf_func + 4);
+ */
+ off = offsetof(struct bpf_prog, bpf_func);
+ if (is_12b_check(off, insn))
+ return -1;
+ emit(rv_lw(RV_REG_T0, off, RV_REG_T0), ctx);
+ emit(rv_addi(RV_REG_TCC, RV_REG_T1, 0), ctx);
+ /* Epilogue jumps to *(t0 + 4). */
+ __build_epilogue(true, ctx);
+ return 0;
+}
+
+static int emit_load_r64(const s8 *dst, const s8 *src, s16 off,
+ struct rv_jit_context *ctx, const u8 size)
+{
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *tmp2 = bpf2rv32[TMP_REG_2];
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+ const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
+
+ emit_imm(RV_REG_T0, off, ctx);
+ emit(rv_add(RV_REG_T0, RV_REG_T0, lo(rs)), ctx);
+
+ switch (size) {
+ case BPF_B:
+ emit(rv_lbu(lo(rd), 0, RV_REG_T0), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ break;
+ case BPF_H:
+ emit(rv_lhu(lo(rd), 0, RV_REG_T0), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ break;
+ case BPF_W:
+ emit(rv_lw(lo(rd), 0, RV_REG_T0), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ break;
+ case BPF_DW:
+ emit(rv_lw(lo(rd), 0, RV_REG_T0), ctx);
+ emit(rv_lw(hi(rd), 4, RV_REG_T0), ctx);
+ break;
+ }
+
+ bpf_put_reg64(dst, rd, ctx);
+ return 0;
+}
+
+static int emit_store_r64(const s8 *dst, const s8 *src, s16 off,
+ struct rv_jit_context *ctx, const u8 size,
+ const u8 mode)
+{
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *tmp2 = bpf2rv32[TMP_REG_2];
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+ const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
+
+ if (mode == BPF_XADD && size != BPF_W)
+ return -1;
+
+ emit_imm(RV_REG_T0, off, ctx);
+ emit(rv_add(RV_REG_T0, RV_REG_T0, lo(rd)), ctx);
+
+ switch (size) {
+ case BPF_B:
+ emit(rv_sb(RV_REG_T0, 0, lo(rs)), ctx);
+ break;
+ case BPF_H:
+ emit(rv_sh(RV_REG_T0, 0, lo(rs)), ctx);
+ break;
+ case BPF_W:
+ switch (mode) {
+ case BPF_MEM:
+ emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx);
+ break;
+ case BPF_XADD:
+ emit(rv_amoadd_w(RV_REG_ZERO, lo(rs), RV_REG_T0, 0, 0),
+ ctx);
+ break;
+ }
+ break;
+ case BPF_DW:
+ emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx);
+ emit(rv_sw(RV_REG_T0, 4, hi(rs)), ctx);
+ break;
+ }
+
+ return 0;
+}
+
+static void emit_rev16(const s8 rd, struct rv_jit_context *ctx)
+{
+ emit(rv_slli(rd, rd, 16), ctx);
+ emit(rv_slli(RV_REG_T1, rd, 8), ctx);
+ emit(rv_srli(rd, rd, 8), ctx);
+ emit(rv_add(RV_REG_T1, rd, RV_REG_T1), ctx);
+ emit(rv_srli(rd, RV_REG_T1, 16), ctx);
+}
+
+static void emit_rev32(const s8 rd, struct rv_jit_context *ctx)
+{
+ emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 0), ctx);
+ emit(rv_andi(RV_REG_T0, rd, 255), ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx);
+ emit(rv_slli(RV_REG_T1, RV_REG_T1, 8), ctx);
+ emit(rv_srli(rd, rd, 8), ctx);
+ emit(rv_andi(RV_REG_T0, rd, 255), ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx);
+ emit(rv_slli(RV_REG_T1, RV_REG_T1, 8), ctx);
+ emit(rv_srli(rd, rd, 8), ctx);
+ emit(rv_andi(RV_REG_T0, rd, 255), ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx);
+ emit(rv_slli(RV_REG_T1, RV_REG_T1, 8), ctx);
+ emit(rv_srli(rd, rd, 8), ctx);
+ emit(rv_andi(RV_REG_T0, rd, 255), ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx);
+ emit(rv_addi(rd, RV_REG_T1, 0), ctx);
+}
+
+static void emit_zext64(const s8 *dst, struct rv_jit_context *ctx)
+{
+ const s8 *rd;
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+
+ rd = bpf_get_reg64(dst, tmp1, ctx);
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ bpf_put_reg64(dst, rd, ctx);
+}
+
+int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
+ bool extra_pass)
+{
+ bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
+ BPF_CLASS(insn->code) == BPF_JMP;
+ int s, e, rvoff, i = insn - ctx->prog->insnsi;
+ u8 code = insn->code;
+ s16 off = insn->off;
+ s32 imm = insn->imm;
+
+ const s8 *dst = bpf2rv32[insn->dst_reg];
+ const s8 *src = bpf2rv32[insn->src_reg];
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *tmp2 = bpf2rv32[TMP_REG_2];
+
+ switch (code) {
+ case BPF_ALU64 | BPF_MOV | BPF_X:
+
+ case BPF_ALU64 | BPF_ADD | BPF_X:
+ case BPF_ALU64 | BPF_ADD | BPF_K:
+
+ case BPF_ALU64 | BPF_SUB | BPF_X:
+ case BPF_ALU64 | BPF_SUB | BPF_K:
+
+ case BPF_ALU64 | BPF_AND | BPF_X:
+ case BPF_ALU64 | BPF_OR | BPF_X:
+ case BPF_ALU64 | BPF_XOR | BPF_X:
+
+ case BPF_ALU64 | BPF_MUL | BPF_X:
+ case BPF_ALU64 | BPF_MUL | BPF_K:
+
+ case BPF_ALU64 | BPF_LSH | BPF_X:
+ case BPF_ALU64 | BPF_RSH | BPF_X:
+ case BPF_ALU64 | BPF_ARSH | BPF_X:
+ if (BPF_SRC(code) == BPF_K) {
+ emit_imm32(tmp2, imm, ctx);
+ src = tmp2;
+ }
+ emit_alu_r64(dst, src, ctx, BPF_OP(code));
+ break;
+
+ case BPF_ALU64 | BPF_NEG:
+ emit_alu_r64(dst, tmp2, ctx, BPF_OP(code));
+ break;
+
+ case BPF_ALU64 | BPF_DIV | BPF_X:
+ case BPF_ALU64 | BPF_DIV | BPF_K:
+ case BPF_ALU64 | BPF_MOD | BPF_X:
+ case BPF_ALU64 | BPF_MOD | BPF_K:
+ goto notsupported;
+
+ case BPF_ALU64 | BPF_MOV | BPF_K:
+ case BPF_ALU64 | BPF_AND | BPF_K:
+ case BPF_ALU64 | BPF_OR | BPF_K:
+ case BPF_ALU64 | BPF_XOR | BPF_K:
+ case BPF_ALU64 | BPF_LSH | BPF_K:
+ case BPF_ALU64 | BPF_RSH | BPF_K:
+ case BPF_ALU64 | BPF_ARSH | BPF_K:
+ emit_alu_i64(dst, imm, ctx, BPF_OP(code));
+ break;
+
+ case BPF_ALU | BPF_MOV | BPF_X:
+ if (imm == 1) {
+ /* Special mov32 for zext. */
+ emit_zext64(dst, ctx);
+ break;
+ }
+ /* Fallthrough. */
+
+ case BPF_ALU | BPF_ADD | BPF_X:
+ case BPF_ALU | BPF_SUB | BPF_X:
+ case BPF_ALU | BPF_AND | BPF_X:
+ case BPF_ALU | BPF_OR | BPF_X:
+ case BPF_ALU | BPF_XOR | BPF_X:
+
+ case BPF_ALU | BPF_MUL | BPF_X:
+ case BPF_ALU | BPF_MUL | BPF_K:
+
+ case BPF_ALU | BPF_DIV | BPF_X:
+ case BPF_ALU | BPF_DIV | BPF_K:
+
+ case BPF_ALU | BPF_MOD | BPF_X:
+ case BPF_ALU | BPF_MOD | BPF_K:
+
+ case BPF_ALU | BPF_LSH | BPF_X:
+ case BPF_ALU | BPF_RSH | BPF_X:
+ case BPF_ALU | BPF_ARSH | BPF_X:
+ if (BPF_SRC(code) == BPF_K) {
+ emit_imm32(tmp2, imm, ctx);
+ src = tmp2;
+ }
+ emit_alu_r32(dst, src, ctx, BPF_OP(code));
+ break;
+
+ case BPF_ALU | BPF_MOV | BPF_K:
+ case BPF_ALU | BPF_ADD | BPF_K:
+ case BPF_ALU | BPF_SUB | BPF_K:
+ case BPF_ALU | BPF_AND | BPF_K:
+ case BPF_ALU | BPF_OR | BPF_K:
+ case BPF_ALU | BPF_XOR | BPF_K:
+ case BPF_ALU | BPF_LSH | BPF_K:
+ case BPF_ALU | BPF_RSH | BPF_K:
+ case BPF_ALU | BPF_ARSH | BPF_K:
+ /*
+ * mul,div,mod are handled in the BPF_X case since there are
+ * no RISC-V I-type equivalents.
+ */
+ emit_alu_i32(dst, imm, ctx, BPF_OP(code));
+ break;
+
+ case BPF_ALU | BPF_NEG:
+ /*
+ * src is ignored---choose tmp2 as a dummy register since it
+ * is not on the stack.
+ */
+ emit_alu_r32(dst, tmp2, ctx, BPF_OP(code));
+ break;
+
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ {
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+
+ switch (imm) {
+ case 16:
+ emit(rv_slli(lo(rd), lo(rd), 16), ctx);
+ emit(rv_srli(lo(rd), lo(rd), 16), ctx);
+ /* Fallthrough. */
+ case 32:
+ if (!ctx->prog->aux->verifier_zext)
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ break;
+ case 64:
+ /* Do nothing. */
+ break;
+ default:
+ pr_err("bpf-jit: BPF_END imm %d invalid\n", imm);
+ return -1;
+ }
+
+ bpf_put_reg64(dst, rd, ctx);
+ break;
+ }
+
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+ {
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+
+ switch (imm) {
+ case 16:
+ emit_rev16(lo(rd), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ break;
+ case 32:
+ emit_rev32(lo(rd), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ break;
+ case 64:
+ /* Swap upper and lower halves. */
+ emit(rv_addi(RV_REG_T0, lo(rd), 0), ctx);
+ emit(rv_addi(lo(rd), hi(rd), 0), ctx);
+ emit(rv_addi(hi(rd), RV_REG_T0, 0), ctx);
+
+ /* Swap each half. */
+ emit_rev32(lo(rd), ctx);
+ emit_rev32(hi(rd), ctx);
+ break;
+ default:
+ pr_err("bpf-jit: BPF_END imm %d invalid\n", imm);
+ return -1;
+ }
+
+ bpf_put_reg64(dst, rd, ctx);
+ break;
+ }
+
+ case BPF_JMP | BPF_JA:
+ rvoff = rv_offset(i, off, ctx);
+ emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
+ break;
+
+ case BPF_JMP | BPF_CALL:
+ {
+ bool fixed;
+ int ret;
+ u64 addr;
+
+ ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr,
+ &fixed);
+ if (ret < 0)
+ return ret;
+ emit_call(fixed, addr, ctx);
+ break;
+ }
+
+ case BPF_JMP | BPF_TAIL_CALL:
+ if (emit_bpf_tail_call(i, ctx))
+ return -1;
+ break;
+
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+
+ case BPF_JMP | BPF_JLE | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+
+ case BPF_JMP | BPF_JLT | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+
+ case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ rvoff = rv_offset(i, off, ctx);
+ if (BPF_SRC(code) == BPF_K) {
+ s = ctx->ninsns;
+ emit_imm32(tmp2, imm, ctx);
+ src = tmp2;
+ e = ctx->ninsns;
+ rvoff -= (e - s) << 2;
+ }
+
+ if (is64)
+ emit_branch_r64(dst, src, rvoff, ctx, BPF_OP(code));
+ else
+ emit_branch_r32(dst, src, rvoff, ctx, BPF_OP(code));
+ break;
+
+ case BPF_JMP | BPF_EXIT:
+ if (i == ctx->prog->len - 1)
+ break;
+
+ rvoff = epilogue_offset(ctx);
+ emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
+ break;
+
+ case BPF_LD | BPF_IMM | BPF_DW:
+ {
+ struct bpf_insn insn1 = insn[1];
+ s32 imm_lo = imm;
+ s32 imm_hi = insn1.imm;
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+
+ emit_imm64(rd, imm_hi, imm_lo, ctx);
+ bpf_put_reg64(dst, rd, ctx);
+ return 1;
+ }
+
+ case BPF_LDX | BPF_MEM | BPF_B:
+ case BPF_LDX | BPF_MEM | BPF_H:
+ case BPF_LDX | BPF_MEM | BPF_W:
+ case BPF_LDX | BPF_MEM | BPF_DW:
+ if (emit_load_r64(dst, src, off, ctx, BPF_SIZE(code)))
+ return -1;
+ break;
+
+ case BPF_ST | BPF_MEM | BPF_B:
+ case BPF_ST | BPF_MEM | BPF_H:
+ case BPF_ST | BPF_MEM | BPF_W:
+ case BPF_ST | BPF_MEM | BPF_DW:
+
+ case BPF_STX | BPF_MEM | BPF_B:
+ case BPF_STX | BPF_MEM | BPF_H:
+ case BPF_STX | BPF_MEM | BPF_W:
+ case BPF_STX | BPF_MEM | BPF_DW:
+ case BPF_STX | BPF_XADD | BPF_W:
+ if (BPF_CLASS(code) == BPF_ST) {
+ emit_imm32(tmp2, imm, ctx);
+ src = tmp2;
+ }
+
+ if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code),
+ BPF_MODE(code)))
+ return -1;
+ break;
+
+ /* No hardware support for 8-byte atomics in RV32. */
+ case BPF_STX | BPF_XADD | BPF_DW:
+ /* Fallthrough. */
+
+notsupported:
+ pr_info_once("bpf-jit: not supported: opcode %02x ***\n", code);
+ return -EFAULT;
+
+ default:
+ pr_err("bpf-jit: unknown opcode %02x\n", code);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void bpf_jit_build_prologue(struct rv_jit_context *ctx)
+{
+ /* Make space to save 9 registers: ra, fp, s1--s7. */
+ int stack_adjust = 9 * sizeof(u32), store_offset, bpf_stack_adjust;
+ const s8 *fp = bpf2rv32[BPF_REG_FP];
+ const s8 *r1 = bpf2rv32[BPF_REG_1];
+
+ bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
+ stack_adjust += bpf_stack_adjust;
+
+ store_offset = stack_adjust - 4;
+
+ stack_adjust += 4 * BPF_JIT_SCRATCH_REGS;
+
+ /*
+ * The first instruction sets the tail-call-counter (TCC) register.
+ * This instruction is skipped by tail calls.
+ */
+ emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx);
+
+ emit(rv_addi(RV_REG_SP, RV_REG_SP, -stack_adjust), ctx);
+
+ /* Save callee-save registers. */
+ emit(rv_sw(RV_REG_SP, store_offset - 0, RV_REG_RA), ctx);
+ emit(rv_sw(RV_REG_SP, store_offset - 4, RV_REG_FP), ctx);
+ emit(rv_sw(RV_REG_SP, store_offset - 8, RV_REG_S1), ctx);
+ emit(rv_sw(RV_REG_SP, store_offset - 12, RV_REG_S2), ctx);
+ emit(rv_sw(RV_REG_SP, store_offset - 16, RV_REG_S3), ctx);
+ emit(rv_sw(RV_REG_SP, store_offset - 20, RV_REG_S4), ctx);
+ emit(rv_sw(RV_REG_SP, store_offset - 24, RV_REG_S5), ctx);
+ emit(rv_sw(RV_REG_SP, store_offset - 28, RV_REG_S6), ctx);
+ emit(rv_sw(RV_REG_SP, store_offset - 32, RV_REG_S7), ctx);
+
+ /* Set fp: used as the base address for stacked BPF registers. */
+ emit(rv_addi(RV_REG_FP, RV_REG_SP, stack_adjust), ctx);
+
+ /* Set up BPF stack pointer. */
+ emit(rv_addi(lo(fp), RV_REG_SP, bpf_stack_adjust), ctx);
+ emit(rv_addi(hi(fp), RV_REG_ZERO, 0), ctx);
+
+ /* Set up context pointer. */
+ emit(rv_addi(lo(r1), RV_REG_A0, 0), ctx);
+ emit(rv_addi(hi(r1), RV_REG_ZERO, 0), ctx);
+
+ ctx->stack_size = stack_adjust;
+}
+
+void bpf_jit_build_epilogue(struct rv_jit_context *ctx)
+{
+ __build_epilogue(false, ctx);
+}
diff --git a/arch/riscv/net/bpf_jit_comp.c b/arch/riscv/net/bpf_jit_comp64.c
index 483f4ad7f4dc..cc1985d8750a 100644
--- a/arch/riscv/net/bpf_jit_comp.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -7,42 +7,7 @@
#include <linux/bpf.h>
#include <linux/filter.h>
-#include <asm/cacheflush.h>
-
-enum {
- RV_REG_ZERO = 0, /* The constant value 0 */
- RV_REG_RA = 1, /* Return address */
- RV_REG_SP = 2, /* Stack pointer */
- RV_REG_GP = 3, /* Global pointer */
- RV_REG_TP = 4, /* Thread pointer */
- RV_REG_T0 = 5, /* Temporaries */
- RV_REG_T1 = 6,
- RV_REG_T2 = 7,
- RV_REG_FP = 8,
- RV_REG_S1 = 9, /* Saved registers */
- RV_REG_A0 = 10, /* Function argument/return values */
- RV_REG_A1 = 11, /* Function arguments */
- RV_REG_A2 = 12,
- RV_REG_A3 = 13,
- RV_REG_A4 = 14,
- RV_REG_A5 = 15,
- RV_REG_A6 = 16,
- RV_REG_A7 = 17,
- RV_REG_S2 = 18, /* Saved registers */
- RV_REG_S3 = 19,
- RV_REG_S4 = 20,
- RV_REG_S5 = 21,
- RV_REG_S6 = 22,
- RV_REG_S7 = 23,
- RV_REG_S8 = 24,
- RV_REG_S9 = 25,
- RV_REG_S10 = 26,
- RV_REG_S11 = 27,
- RV_REG_T3 = 28, /* Temporaries */
- RV_REG_T4 = 29,
- RV_REG_T5 = 30,
- RV_REG_T6 = 31,
-};
+#include "bpf_jit.h"
#define RV_REG_TCC RV_REG_A6
#define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */
@@ -73,22 +38,6 @@ enum {
RV_CTX_F_SEEN_S6 = RV_REG_S6,
};
-struct rv_jit_context {
- struct bpf_prog *prog;
- u32 *insns; /* RV insns */
- int ninsns;
- int epilogue_offset;
- int *offset; /* BPF to RV */
- unsigned long flags;
- int stack_size;
-};
-
-struct rv_jit_data {
- struct bpf_binary_header *header;
- u8 *image;
- struct rv_jit_context ctx;
-};
-
static u8 bpf_to_rv_reg(int bpf_reg, struct rv_jit_context *ctx)
{
u8 reg = regmap[bpf_reg];
@@ -156,346 +105,11 @@ static u8 rv_tail_call_reg(struct rv_jit_context *ctx)
return RV_REG_A6;
}
-static void emit(const u32 insn, struct rv_jit_context *ctx)
-{
- if (ctx->insns)
- ctx->insns[ctx->ninsns] = insn;
-
- ctx->ninsns++;
-}
-
-static u32 rv_r_insn(u8 funct7, u8 rs2, u8 rs1, u8 funct3, u8 rd, u8 opcode)
-{
- return (funct7 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
- (rd << 7) | opcode;
-}
-
-static u32 rv_i_insn(u16 imm11_0, u8 rs1, u8 funct3, u8 rd, u8 opcode)
-{
- return (imm11_0 << 20) | (rs1 << 15) | (funct3 << 12) | (rd << 7) |
- opcode;
-}
-
-static u32 rv_s_insn(u16 imm11_0, u8 rs2, u8 rs1, u8 funct3, u8 opcode)
-{
- u8 imm11_5 = imm11_0 >> 5, imm4_0 = imm11_0 & 0x1f;
-
- return (imm11_5 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
- (imm4_0 << 7) | opcode;
-}
-
-static u32 rv_sb_insn(u16 imm12_1, u8 rs2, u8 rs1, u8 funct3, u8 opcode)
-{
- u8 imm12 = ((imm12_1 & 0x800) >> 5) | ((imm12_1 & 0x3f0) >> 4);
- u8 imm4_1 = ((imm12_1 & 0xf) << 1) | ((imm12_1 & 0x400) >> 10);
-
- return (imm12 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
- (imm4_1 << 7) | opcode;
-}
-
-static u32 rv_u_insn(u32 imm31_12, u8 rd, u8 opcode)
-{
- return (imm31_12 << 12) | (rd << 7) | opcode;
-}
-
-static u32 rv_uj_insn(u32 imm20_1, u8 rd, u8 opcode)
-{
- u32 imm;
-
- imm = (imm20_1 & 0x80000) | ((imm20_1 & 0x3ff) << 9) |
- ((imm20_1 & 0x400) >> 2) | ((imm20_1 & 0x7f800) >> 11);
-
- return (imm << 12) | (rd << 7) | opcode;
-}
-
-static u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1,
- u8 funct3, u8 rd, u8 opcode)
-{
- u8 funct7 = (funct5 << 2) | (aq << 1) | rl;
-
- return rv_r_insn(funct7, rs2, rs1, funct3, rd, opcode);
-}
-
-static u32 rv_addiw(u8 rd, u8 rs1, u16 imm11_0)
-{
- return rv_i_insn(imm11_0, rs1, 0, rd, 0x1b);
-}
-
-static u32 rv_addi(u8 rd, u8 rs1, u16 imm11_0)
-{
- return rv_i_insn(imm11_0, rs1, 0, rd, 0x13);
-}
-
-static u32 rv_addw(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(0, rs2, rs1, 0, rd, 0x3b);
-}
-
-static u32 rv_add(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(0, rs2, rs1, 0, rd, 0x33);
-}
-
-static u32 rv_subw(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x3b);
-}
-
-static u32 rv_sub(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x33);
-}
-
-static u32 rv_and(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(0, rs2, rs1, 7, rd, 0x33);
-}
-
-static u32 rv_or(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(0, rs2, rs1, 6, rd, 0x33);
-}
-
-static u32 rv_xor(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(0, rs2, rs1, 4, rd, 0x33);
-}
-
-static u32 rv_mulw(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(1, rs2, rs1, 0, rd, 0x3b);
-}
-
-static u32 rv_mul(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(1, rs2, rs1, 0, rd, 0x33);
-}
-
-static u32 rv_divuw(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(1, rs2, rs1, 5, rd, 0x3b);
-}
-
-static u32 rv_divu(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(1, rs2, rs1, 5, rd, 0x33);
-}
-
-static u32 rv_remuw(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(1, rs2, rs1, 7, rd, 0x3b);
-}
-
-static u32 rv_remu(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(1, rs2, rs1, 7, rd, 0x33);
-}
-
-static u32 rv_sllw(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(0, rs2, rs1, 1, rd, 0x3b);
-}
-
-static u32 rv_sll(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(0, rs2, rs1, 1, rd, 0x33);
-}
-
-static u32 rv_srlw(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(0, rs2, rs1, 5, rd, 0x3b);
-}
-
-static u32 rv_srl(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(0, rs2, rs1, 5, rd, 0x33);
-}
-
-static u32 rv_sraw(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x3b);
-}
-
-static u32 rv_sra(u8 rd, u8 rs1, u8 rs2)
-{
- return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x33);
-}
-
-static u32 rv_lui(u8 rd, u32 imm31_12)
-{
- return rv_u_insn(imm31_12, rd, 0x37);
-}
-
-static u32 rv_slli(u8 rd, u8 rs1, u16 imm11_0)
-{
- return rv_i_insn(imm11_0, rs1, 1, rd, 0x13);
-}
-
-static u32 rv_andi(u8 rd, u8 rs1, u16 imm11_0)
-{
- return rv_i_insn(imm11_0, rs1, 7, rd, 0x13);
-}
-
-static u32 rv_ori(u8 rd, u8 rs1, u16 imm11_0)
-{
- return rv_i_insn(imm11_0, rs1, 6, rd, 0x13);
-}
-
-static u32 rv_xori(u8 rd, u8 rs1, u16 imm11_0)
-{
- return rv_i_insn(imm11_0, rs1, 4, rd, 0x13);
-}
-
-static u32 rv_slliw(u8 rd, u8 rs1, u16 imm11_0)
-{
- return rv_i_insn(imm11_0, rs1, 1, rd, 0x1b);
-}
-
-static u32 rv_srliw(u8 rd, u8 rs1, u16 imm11_0)
-{
- return rv_i_insn(imm11_0, rs1, 5, rd, 0x1b);
-}
-
-static u32 rv_srli(u8 rd, u8 rs1, u16 imm11_0)
-{
- return rv_i_insn(imm11_0, rs1, 5, rd, 0x13);
-}
-
-static u32 rv_sraiw(u8 rd, u8 rs1, u16 imm11_0)
-{
- return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x1b);
-}
-
-static u32 rv_srai(u8 rd, u8 rs1, u16 imm11_0)
-{
- return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x13);
-}
-
-static u32 rv_jal(u8 rd, u32 imm20_1)
-{
- return rv_uj_insn(imm20_1, rd, 0x6f);
-}
-
-static u32 rv_jalr(u8 rd, u8 rs1, u16 imm11_0)
-{
- return rv_i_insn(imm11_0, rs1, 0, rd, 0x67);
-}
-
-static u32 rv_beq(u8 rs1, u8 rs2, u16 imm12_1)
-{
- return rv_sb_insn(imm12_1, rs2, rs1, 0, 0x63);
-}
-
-static u32 rv_bltu(u8 rs1, u8 rs2, u16 imm12_1)
-{
- return rv_sb_insn(imm12_1, rs2, rs1, 6, 0x63);
-}
-
-static u32 rv_bgeu(u8 rs1, u8 rs2, u16 imm12_1)
-{
- return rv_sb_insn(imm12_1, rs2, rs1, 7, 0x63);
-}
-
-static u32 rv_bne(u8 rs1, u8 rs2, u16 imm12_1)
-{
- return rv_sb_insn(imm12_1, rs2, rs1, 1, 0x63);
-}
-
-static u32 rv_blt(u8 rs1, u8 rs2, u16 imm12_1)
-{
- return rv_sb_insn(imm12_1, rs2, rs1, 4, 0x63);
-}
-
-static u32 rv_bge(u8 rs1, u8 rs2, u16 imm12_1)
-{
- return rv_sb_insn(imm12_1, rs2, rs1, 5, 0x63);
-}
-
-static u32 rv_sb(u8 rs1, u16 imm11_0, u8 rs2)
-{
- return rv_s_insn(imm11_0, rs2, rs1, 0, 0x23);
-}
-
-static u32 rv_sh(u8 rs1, u16 imm11_0, u8 rs2)
-{
- return rv_s_insn(imm11_0, rs2, rs1, 1, 0x23);
-}
-
-static u32 rv_sw(u8 rs1, u16 imm11_0, u8 rs2)
-{
- return rv_s_insn(imm11_0, rs2, rs1, 2, 0x23);
-}
-
-static u32 rv_sd(u8 rs1, u16 imm11_0, u8 rs2)
-{
- return rv_s_insn(imm11_0, rs2, rs1, 3, 0x23);
-}
-
-static u32 rv_lbu(u8 rd, u16 imm11_0, u8 rs1)
-{
- return rv_i_insn(imm11_0, rs1, 4, rd, 0x03);
-}
-
-static u32 rv_lhu(u8 rd, u16 imm11_0, u8 rs1)
-{
- return rv_i_insn(imm11_0, rs1, 5, rd, 0x03);
-}
-
-static u32 rv_lwu(u8 rd, u16 imm11_0, u8 rs1)
-{
- return rv_i_insn(imm11_0, rs1, 6, rd, 0x03);
-}
-
-static u32 rv_ld(u8 rd, u16 imm11_0, u8 rs1)
-{
- return rv_i_insn(imm11_0, rs1, 3, rd, 0x03);
-}
-
-static u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
-{
- return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f);
-}
-
-static u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
-{
- return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f);
-}
-
-static u32 rv_auipc(u8 rd, u32 imm31_12)
-{
- return rv_u_insn(imm31_12, rd, 0x17);
-}
-
-static bool is_12b_int(s64 val)
-{
- return -(1 << 11) <= val && val < (1 << 11);
-}
-
-static bool is_13b_int(s64 val)
-{
- return -(1 << 12) <= val && val < (1 << 12);
-}
-
-static bool is_21b_int(s64 val)
-{
- return -(1L << 20) <= val && val < (1L << 20);
-}
-
static bool is_32b_int(s64 val)
{
return -(1L << 31) <= val && val < (1L << 31);
}
-static int is_12b_check(int off, int insn)
-{
- if (!is_12b_int(off)) {
- pr_err("bpf-jit: insn=%d 12b < offset=%d not supported yet!\n",
- insn, (int)off);
- return -1;
- }
- return 0;
-}
-
static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
{
/* Note that the immediate from the add is sign-extended,
@@ -535,23 +149,6 @@ static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
emit(rv_addi(rd, rd, lower), ctx);
}
-static int rv_offset(int insn, int off, struct rv_jit_context *ctx)
-{
- int from, to;
-
- off++; /* BPF branch is from PC+1, RV is from PC */
- from = (insn > 0) ? ctx->offset[insn - 1] : 0;
- to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0;
- return (to - from) << 2;
-}
-
-static int epilogue_offset(struct rv_jit_context *ctx)
-{
- int to = ctx->epilogue_offset, from = ctx->ninsns;
-
- return (to - from) << 2;
-}
-
static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
{
int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 8;
@@ -596,34 +193,6 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
ctx);
}
-/* return -1 or inverted cond */
-static int invert_bpf_cond(u8 cond)
-{
- switch (cond) {
- case BPF_JEQ:
- return BPF_JNE;
- case BPF_JGT:
- return BPF_JLE;
- case BPF_JLT:
- return BPF_JGE;
- case BPF_JGE:
- return BPF_JLT;
- case BPF_JLE:
- return BPF_JGT;
- case BPF_JNE:
- return BPF_JEQ;
- case BPF_JSGT:
- return BPF_JSLE;
- case BPF_JSLT:
- return BPF_JSGE;
- case BPF_JSGE:
- return BPF_JSLT;
- case BPF_JSLE:
- return BPF_JSGT;
- }
- return -1;
-}
-
static void emit_bcc(u8 cond, u8 rd, u8 rs, int rvoff,
struct rv_jit_context *ctx)
{
@@ -855,8 +424,8 @@ static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
return 0;
}
-static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
- bool extra_pass)
+int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
+ bool extra_pass)
{
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
BPF_CLASS(insn->code) == BPF_JMP;
@@ -1434,7 +1003,7 @@ out_be:
return 0;
}
-static void build_prologue(struct rv_jit_context *ctx)
+void bpf_jit_build_prologue(struct rv_jit_context *ctx)
{
int stack_adjust = 0, store_offset, bpf_stack_adjust;
@@ -1515,175 +1084,11 @@ static void build_prologue(struct rv_jit_context *ctx)
ctx->stack_size = stack_adjust;
}
-static void build_epilogue(struct rv_jit_context *ctx)
+void bpf_jit_build_epilogue(struct rv_jit_context *ctx)
{
__build_epilogue(false, ctx);
}
-static int build_body(struct rv_jit_context *ctx, bool extra_pass, int *offset)
-{
- const struct bpf_prog *prog = ctx->prog;
- int i;
-
- for (i = 0; i < prog->len; i++) {
- const struct bpf_insn *insn = &prog->insnsi[i];
- int ret;
-
- ret = emit_insn(insn, ctx, extra_pass);
- if (ret > 0) {
- i++;
- if (offset)
- offset[i] = ctx->ninsns;
- continue;
- }
- if (offset)
- offset[i] = ctx->ninsns;
- if (ret)
- return ret;
- }
- return 0;
-}
-
-static void bpf_fill_ill_insns(void *area, unsigned int size)
-{
- memset(area, 0, size);
-}
-
-static void bpf_flush_icache(void *start, void *end)
-{
- flush_icache_range((unsigned long)start, (unsigned long)end);
-}
-
-bool bpf_jit_needs_zext(void)
-{
- return true;
-}
-
-struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
-{
- bool tmp_blinded = false, extra_pass = false;
- struct bpf_prog *tmp, *orig_prog = prog;
- int pass = 0, prev_ninsns = 0, i;
- struct rv_jit_data *jit_data;
- unsigned int image_size = 0;
- struct rv_jit_context *ctx;
-
- if (!prog->jit_requested)
- return orig_prog;
-
- tmp = bpf_jit_blind_constants(prog);
- if (IS_ERR(tmp))
- return orig_prog;
- if (tmp != prog) {
- tmp_blinded = true;
- prog = tmp;
- }
-
- jit_data = prog->aux->jit_data;
- if (!jit_data) {
- jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
- if (!jit_data) {
- prog = orig_prog;
- goto out;
- }
- prog->aux->jit_data = jit_data;
- }
-
- ctx = &jit_data->ctx;
-
- if (ctx->offset) {
- extra_pass = true;
- image_size = sizeof(u32) * ctx->ninsns;
- goto skip_init_ctx;
- }
-
- ctx->prog = prog;
- ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
- if (!ctx->offset) {
- prog = orig_prog;
- goto out_offset;
- }
- for (i = 0; i < prog->len; i++) {
- prev_ninsns += 32;
- ctx->offset[i] = prev_ninsns;
- }
-
- for (i = 0; i < 16; i++) {
- pass++;
- ctx->ninsns = 0;
- if (build_body(ctx, extra_pass, ctx->offset)) {
- prog = orig_prog;
- goto out_offset;
- }
- build_prologue(ctx);
- ctx->epilogue_offset = ctx->ninsns;
- build_epilogue(ctx);
-
- if (ctx->ninsns == prev_ninsns) {
- if (jit_data->header)
- break;
-
- image_size = sizeof(u32) * ctx->ninsns;
- jit_data->header =
- bpf_jit_binary_alloc(image_size,
- &jit_data->image,
- sizeof(u32),
- bpf_fill_ill_insns);
- if (!jit_data->header) {
- prog = orig_prog;
- goto out_offset;
- }
-
- ctx->insns = (u32 *)jit_data->image;
- /* Now, when the image is allocated, the image
- * can potentially shrink more (auipc/jalr ->
- * jal).
- */
- }
- prev_ninsns = ctx->ninsns;
- }
-
- if (i == 16) {
- pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
- bpf_jit_binary_free(jit_data->header);
- prog = orig_prog;
- goto out_offset;
- }
-
-skip_init_ctx:
- pass++;
- ctx->ninsns = 0;
-
- build_prologue(ctx);
- if (build_body(ctx, extra_pass, NULL)) {
- bpf_jit_binary_free(jit_data->header);
- prog = orig_prog;
- goto out_offset;
- }
- build_epilogue(ctx);
-
- if (bpf_jit_enable > 1)
- bpf_jit_dump(prog->len, image_size, pass, ctx->insns);
-
- prog->bpf_func = (void *)ctx->insns;
- prog->jited = 1;
- prog->jited_len = image_size;
-
- bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
-
- if (!prog->is_func || extra_pass) {
-out_offset:
- kfree(ctx->offset);
- kfree(jit_data);
- prog->aux->jit_data = NULL;
- }
-out:
- if (tmp_blinded)
- bpf_jit_prog_release_other(prog, prog == orig_prog ?
- tmp : orig_prog);
- return prog;
-}
-
void *bpf_jit_alloc_exec(unsigned long size)
{
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c
new file mode 100644
index 000000000000..709b94ece3ed
--- /dev/null
+++ b/arch/riscv/net/bpf_jit_core.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common functionality for RV32 and RV64 BPF JIT compilers
+ *
+ * Copyright (c) 2019 Björn Töpel <bjorn.topel@gmail.com>
+ *
+ */
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include "bpf_jit.h"
+
+/* Number of iterations to try until offsets converge. */
+#define NR_JIT_ITERATIONS 16
+
+static int build_body(struct rv_jit_context *ctx, bool extra_pass, int *offset)
+{
+ const struct bpf_prog *prog = ctx->prog;
+ int i;
+
+ for (i = 0; i < prog->len; i++) {
+ const struct bpf_insn *insn = &prog->insnsi[i];
+ int ret;
+
+ ret = bpf_jit_emit_insn(insn, ctx, extra_pass);
+ /* BPF_LD | BPF_IMM | BPF_DW: skip the next instruction. */
+ if (ret > 0)
+ i++;
+ if (offset)
+ offset[i] = ctx->ninsns;
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+bool bpf_jit_needs_zext(void)
+{
+ return true;
+}
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+{
+ bool tmp_blinded = false, extra_pass = false;
+ struct bpf_prog *tmp, *orig_prog = prog;
+ int pass = 0, prev_ninsns = 0, i;
+ struct rv_jit_data *jit_data;
+ struct rv_jit_context *ctx;
+ unsigned int image_size = 0;
+
+ if (!prog->jit_requested)
+ return orig_prog;
+
+ tmp = bpf_jit_blind_constants(prog);
+ if (IS_ERR(tmp))
+ return orig_prog;
+ if (tmp != prog) {
+ tmp_blinded = true;
+ prog = tmp;
+ }
+
+ jit_data = prog->aux->jit_data;
+ if (!jit_data) {
+ jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+ if (!jit_data) {
+ prog = orig_prog;
+ goto out;
+ }
+ prog->aux->jit_data = jit_data;
+ }
+
+ ctx = &jit_data->ctx;
+
+ if (ctx->offset) {
+ extra_pass = true;
+ image_size = sizeof(u32) * ctx->ninsns;
+ goto skip_init_ctx;
+ }
+
+ ctx->prog = prog;
+ ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
+ if (!ctx->offset) {
+ prog = orig_prog;
+ goto out_offset;
+ }
+ for (i = 0; i < prog->len; i++) {
+ prev_ninsns += 32;
+ ctx->offset[i] = prev_ninsns;
+ }
+
+ for (i = 0; i < NR_JIT_ITERATIONS; i++) {
+ pass++;
+ ctx->ninsns = 0;
+ if (build_body(ctx, extra_pass, ctx->offset)) {
+ prog = orig_prog;
+ goto out_offset;
+ }
+ bpf_jit_build_prologue(ctx);
+ ctx->epilogue_offset = ctx->ninsns;
+ bpf_jit_build_epilogue(ctx);
+
+ if (ctx->ninsns == prev_ninsns) {
+ if (jit_data->header)
+ break;
+
+ image_size = sizeof(u32) * ctx->ninsns;
+ jit_data->header =
+ bpf_jit_binary_alloc(image_size,
+ &jit_data->image,
+ sizeof(u32),
+ bpf_fill_ill_insns);
+ if (!jit_data->header) {
+ prog = orig_prog;
+ goto out_offset;
+ }
+
+ ctx->insns = (u32 *)jit_data->image;
+ /*
+ * Now, when the image is allocated, the image can
+ * potentially shrink more (auipc/jalr -> jal).
+ */
+ }
+ prev_ninsns = ctx->ninsns;
+ }
+
+ if (i == NR_JIT_ITERATIONS) {
+ pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
+ bpf_jit_binary_free(jit_data->header);
+ prog = orig_prog;
+ goto out_offset;
+ }
+
+skip_init_ctx:
+ pass++;
+ ctx->ninsns = 0;
+
+ bpf_jit_build_prologue(ctx);
+ if (build_body(ctx, extra_pass, NULL)) {
+ bpf_jit_binary_free(jit_data->header);
+ prog = orig_prog;
+ goto out_offset;
+ }
+ bpf_jit_build_epilogue(ctx);
+
+ if (bpf_jit_enable > 1)
+ bpf_jit_dump(prog->len, image_size, pass, ctx->insns);
+
+ prog->bpf_func = (void *)ctx->insns;
+ prog->jited = 1;
+ prog->jited_len = image_size;
+
+ bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
+
+ if (!prog->is_func || extra_pass) {
+out_offset:
+ kfree(ctx->offset);
+ kfree(jit_data);
+ prog->aux->jit_data = NULL;
+ }
+out:
+
+ if (tmp_blinded)
+ bpf_jit_prog_release_other(prog, prog == orig_prog ?
+ tmp : orig_prog);
+ return prog;
+}
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 1e3517b0518b..e577f8533009 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -338,7 +338,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
* @no_output_qs: number of output queues
* @input_handler: handler to be called for input queues
* @output_handler: handler to be called for output queues
- * @queue_start_poll_array: polling handlers (one per input queue or NULL)
+ * @irq_poll: Data IRQ polling handler (NULL when not supported)
* @scan_threshold: # of in-use buffers that triggers scan on output queue
* @int_parm: interruption parameter
* @input_sbal_addr_array: address of no_input_qs * 128 pointers
@@ -359,8 +359,7 @@ struct qdio_initialize {
unsigned int no_output_qs;
qdio_handler_t *input_handler;
qdio_handler_t *output_handler;
- void (**queue_start_poll_array) (struct ccw_device *, int,
- unsigned long);
+ void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
unsigned int scan_threshold;
unsigned long int_parm;
struct qdio_buffer **input_sbal_addr_array;
@@ -415,8 +414,8 @@ extern int qdio_activate(struct ccw_device *);
extern void qdio_release_aob(struct qaob *);
extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
unsigned int);
-extern int qdio_start_irq(struct ccw_device *, int);
-extern int qdio_stop_irq(struct ccw_device *, int);
+extern int qdio_start_irq(struct ccw_device *cdev);
+extern int qdio_stop_irq(struct ccw_device *cdev);
extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr,
bool is_input, unsigned int *bufnr,
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 0ff86391f77d..e98304d0219e 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -1508,6 +1508,7 @@ static int vector_set_coalesce(struct net_device *netdev,
}
static const struct ethtool_ops vector_net_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS,
.get_drvinfo = vector_net_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 8ae0272c1c51..de73992b8432 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -238,7 +238,11 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
}
}
-static inline int is_kernel_text(unsigned long addr)
+/*
+ * The <linux/kallsyms.h> already defines is_kernel_text,
+ * using '__' prefix not to get in conflict.
+ */
+static inline int __is_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
return 1;
@@ -328,8 +332,8 @@ repeat:
addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
PAGE_OFFSET + PAGE_SIZE-1;
- if (is_kernel_text(addr) ||
- is_kernel_text(addr2))
+ if (__is_kernel_text(addr) ||
+ __is_kernel_text(addr2))
prot = PAGE_KERNEL_LARGE_EXEC;
pages_2m++;
@@ -354,7 +358,7 @@ repeat:
*/
pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
- if (is_kernel_text(addr))
+ if (__is_kernel_text(addr))
prot = PAGE_KERNEL_EXEC;
pages_4k++;
@@ -843,7 +847,7 @@ static void mark_nxdata_nx(void)
*/
unsigned long start = PFN_ALIGN(_etext);
/*
- * This comes from is_kernel_text upper limit. Also HPAGE where used:
+ * This comes from __is_kernel_text upper limit. Also HPAGE where used:
*/
unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 9ba08e9abc09..5ea7c2cf7ab4 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1361,37 +1361,140 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
-(stack_size - i * 8));
}
+static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
+ struct bpf_prog *p, int stack_size, bool mod_ret)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+
+ if (emit_call(&prog, __bpf_prog_enter, prog))
+ return -EINVAL;
+ /* remember prog start time returned by __bpf_prog_enter */
+ emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
+
+ /* arg1: lea rdi, [rbp - stack_size] */
+ EMIT4(0x48, 0x8D, 0x7D, -stack_size);
+ /* arg2: progs[i]->insnsi for interpreter */
+ if (!p->jited)
+ emit_mov_imm64(&prog, BPF_REG_2,
+ (long) p->insnsi >> 32,
+ (u32) (long) p->insnsi);
+ /* call JITed bpf program or interpreter */
+ if (emit_call(&prog, p->bpf_func, prog))
+ return -EINVAL;
+
+ /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
+ * of the previous call which is then passed on the stack to
+ * the next BPF program.
+ */
+ if (mod_ret)
+ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
+
+ /* arg1: mov rdi, progs[i] */
+ emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32,
+ (u32) (long) p);
+ /* arg2: mov rsi, rbx <- start time in nsec */
+ emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
+ if (emit_call(&prog, __bpf_prog_exit, prog))
+ return -EINVAL;
+
+ *pprog = prog;
+ return 0;
+}
+
+static void emit_nops(u8 **pprog, unsigned int len)
+{
+ unsigned int i, noplen;
+ u8 *prog = *pprog;
+ int cnt = 0;
+
+ while (len > 0) {
+ noplen = len;
+
+ if (noplen > ASM_NOP_MAX)
+ noplen = ASM_NOP_MAX;
+
+ for (i = 0; i < noplen; i++)
+ EMIT1(ideal_nops[noplen][i]);
+ len -= noplen;
+ }
+
+ *pprog = prog;
+}
+
+static void emit_align(u8 **pprog, u32 align)
+{
+ u8 *target, *prog = *pprog;
+
+ target = PTR_ALIGN(prog, align);
+ if (target != prog)
+ emit_nops(&prog, target - prog);
+
+ *pprog = prog;
+}
+
+static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+ s64 offset;
+
+ offset = func - (ip + 2 + 4);
+ if (!is_simm32(offset)) {
+ pr_err("Target %p is out of range\n", func);
+ return -EINVAL;
+ }
+ EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
+ *pprog = prog;
+ return 0;
+}
+
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
- struct bpf_prog **progs, int prog_cnt, int stack_size)
+ struct bpf_tramp_progs *tp, int stack_size)
{
+ int i;
u8 *prog = *pprog;
- int cnt = 0, i;
- for (i = 0; i < prog_cnt; i++) {
- if (emit_call(&prog, __bpf_prog_enter, prog))
- return -EINVAL;
- /* remember prog start time returned by __bpf_prog_enter */
- emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
-
- /* arg1: lea rdi, [rbp - stack_size] */
- EMIT4(0x48, 0x8D, 0x7D, -stack_size);
- /* arg2: progs[i]->insnsi for interpreter */
- if (!progs[i]->jited)
- emit_mov_imm64(&prog, BPF_REG_2,
- (long) progs[i]->insnsi >> 32,
- (u32) (long) progs[i]->insnsi);
- /* call JITed bpf program or interpreter */
- if (emit_call(&prog, progs[i]->bpf_func, prog))
+ for (i = 0; i < tp->nr_progs; i++) {
+ if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
return -EINVAL;
+ }
+ *pprog = prog;
+ return 0;
+}
+
+static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
+ struct bpf_tramp_progs *tp, int stack_size,
+ u8 **branches)
+{
+ u8 *prog = *pprog;
+ int i, cnt = 0;
- /* arg1: mov rdi, progs[i] */
- emit_mov_imm64(&prog, BPF_REG_1, (long) progs[i] >> 32,
- (u32) (long) progs[i]);
- /* arg2: mov rsi, rbx <- start time in nsec */
- emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
- if (emit_call(&prog, __bpf_prog_exit, prog))
+ /* The first fmod_ret program will receive a garbage return value.
+ * Set this to 0 to avoid confusing the program.
+ */
+ emit_mov_imm32(&prog, false, BPF_REG_0, 0);
+ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
+ for (i = 0; i < tp->nr_progs; i++) {
+ if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
return -EINVAL;
+
+ /* mod_ret prog stored return value into [rbp - 8]. Emit:
+ * if (*(u64 *)(rbp - 8) != 0)
+ * goto do_fexit;
+ */
+ /* cmp QWORD PTR [rbp - 0x8], 0x0 */
+ EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
+
+ /* Save the location of the branch and Generate 6 nops
+ * (4 bytes for an offset and 2 bytes for the jump) These nops
+ * are replaced with a conditional jump once do_fexit (i.e. the
+ * start of the fexit invocation) is finalized.
+ */
+ branches[i] = prog;
+ emit_nops(&prog, 4 + 2);
}
+
*pprog = prog;
return 0;
}
@@ -1458,12 +1561,15 @@ static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
*/
int arch_prepare_bpf_trampoline(void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
- struct bpf_prog **fentry_progs, int fentry_cnt,
- struct bpf_prog **fexit_progs, int fexit_cnt,
+ struct bpf_tramp_progs *tprogs,
void *orig_call)
{
- int cnt = 0, nr_args = m->nr_args;
+ int ret, i, cnt = 0, nr_args = m->nr_args;
int stack_size = nr_args * 8;
+ struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
+ struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
+ struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
+ u8 **branches = NULL;
u8 *prog;
/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
@@ -1492,28 +1598,64 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
save_regs(m, &prog, nr_args, stack_size);
- if (fentry_cnt)
- if (invoke_bpf(m, &prog, fentry_progs, fentry_cnt, stack_size))
+ if (fentry->nr_progs)
+ if (invoke_bpf(m, &prog, fentry, stack_size))
return -EINVAL;
+ if (fmod_ret->nr_progs) {
+ branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *),
+ GFP_KERNEL);
+ if (!branches)
+ return -ENOMEM;
+
+ if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size,
+ branches)) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
if (flags & BPF_TRAMP_F_CALL_ORIG) {
- if (fentry_cnt)
+ if (fentry->nr_progs || fmod_ret->nr_progs)
restore_regs(m, &prog, nr_args, stack_size);
/* call original function */
- if (emit_call(&prog, orig_call, prog))
- return -EINVAL;
+ if (emit_call(&prog, orig_call, prog)) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
/* remember return value in a stack for bpf prog to access */
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
}
- if (fexit_cnt)
- if (invoke_bpf(m, &prog, fexit_progs, fexit_cnt, stack_size))
- return -EINVAL;
+ if (fmod_ret->nr_progs) {
+ /* From Intel 64 and IA-32 Architectures Optimization
+ * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
+ * Coding Rule 11: All branch targets should be 16-byte
+ * aligned.
+ */
+ emit_align(&prog, 16);
+ /* Update the branches saved in invoke_bpf_mod_ret with the
+ * aligned address of do_fexit.
+ */
+ for (i = 0; i < fmod_ret->nr_progs; i++)
+ emit_cond_near_jump(&branches[i], prog, branches[i],
+ X86_JNE);
+ }
+
+ if (fexit->nr_progs)
+ if (invoke_bpf(m, &prog, fexit, stack_size)) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
if (flags & BPF_TRAMP_F_RESTORE_REGS)
restore_regs(m, &prog, nr_args, stack_size);
+ /* This needs to be done regardless. If there were fmod_ret programs,
+ * the return value is only updated on the stack and still needs to be
+ * restored to R0.
+ */
if (flags & BPF_TRAMP_F_CALL_ORIG)
/* restore original return value back into RAX */
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
@@ -1525,45 +1667,15 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
EMIT1(0xC3); /* ret */
/* Make sure the trampoline generation logic doesn't overflow */
- if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY))
- return -EFAULT;
- return prog - (u8 *)image;
-}
-
-static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
-{
- u8 *prog = *pprog;
- int cnt = 0;
- s64 offset;
-
- offset = func - (ip + 2 + 4);
- if (!is_simm32(offset)) {
- pr_err("Target %p is out of range\n", func);
- return -EINVAL;
- }
- EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
- *pprog = prog;
- return 0;
-}
-
-static void emit_nops(u8 **pprog, unsigned int len)
-{
- unsigned int i, noplen;
- u8 *prog = *pprog;
- int cnt = 0;
-
- while (len > 0) {
- noplen = len;
-
- if (noplen > ASM_NOP_MAX)
- noplen = ASM_NOP_MAX;
-
- for (i = 0; i < noplen; i++)
- EMIT1(ideal_nops[noplen][i]);
- len -= noplen;
+ if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
+ ret = -EFAULT;
+ goto cleanup;
}
+ ret = prog - (u8 *)image;
- *pprog = prog;
+cleanup:
+ kfree(branches);
+ return ret;
}
static int emit_fallback_jump(u8 **pprog)
@@ -1588,7 +1700,7 @@ static int emit_fallback_jump(u8 **pprog)
static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
{
- u8 *jg_reloc, *jg_target, *prog = *pprog;
+ u8 *jg_reloc, *prog = *pprog;
int pivot, err, jg_bytes = 1, cnt = 0;
s64 jg_offset;
@@ -1643,9 +1755,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
* Coding Rule 11: All branch targets should be 16-byte
* aligned.
*/
- jg_target = PTR_ALIGN(prog, 16);
- if (jg_target != prog)
- emit_nops(&prog, jg_target - prog);
+ emit_align(&prog, 16);
jg_offset = prog - jg_reloc;
emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 5e3cc1651c78..139cdf7e7327 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -3504,6 +3504,126 @@ out:
}
EXPORT_SYMBOL_GPL(device_move);
+static int device_attrs_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ struct kobject *kobj = &dev->kobj;
+ struct class *class = dev->class;
+ const struct device_type *type = dev->type;
+ int error;
+
+ if (class) {
+ /*
+ * Change the device groups of the device class for @dev to
+ * @kuid/@kgid.
+ */
+ error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid,
+ kgid);
+ if (error)
+ return error;
+ }
+
+ if (type) {
+ /*
+ * Change the device groups of the device type for @dev to
+ * @kuid/@kgid.
+ */
+ error = sysfs_groups_change_owner(kobj, type->groups, kuid,
+ kgid);
+ if (error)
+ return error;
+ }
+
+ /* Change the device groups of @dev to @kuid/@kgid. */
+ error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid);
+ if (error)
+ return error;
+
+ if (device_supports_offline(dev) && !dev->offline_disabled) {
+ /* Change online device attributes of @dev to @kuid/@kgid. */
+ error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name,
+ kuid, kgid);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * device_change_owner - change the owner of an existing device.
+ * @dev: device.
+ * @kuid: new owner's kuid
+ * @kgid: new owner's kgid
+ *
+ * This changes the owner of @dev and its corresponding sysfs entries to
+ * @kuid/@kgid. This function closely mirrors how @dev was added via driver
+ * core.
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
+{
+ int error;
+ struct kobject *kobj = &dev->kobj;
+
+ dev = get_device(dev);
+ if (!dev)
+ return -EINVAL;
+
+ /*
+ * Change the kobject and the default attributes and groups of the
+ * ktype associated with it to @kuid/@kgid.
+ */
+ error = sysfs_change_owner(kobj, kuid, kgid);
+ if (error)
+ goto out;
+
+ /*
+ * Change the uevent file for @dev to the new owner. The uevent file
+ * was created in a separate step when @dev got added and we mirror
+ * that step here.
+ */
+ error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid,
+ kgid);
+ if (error)
+ goto out;
+
+ /*
+ * Change the device groups, the device groups associated with the
+ * device class, and the groups associated with the device type of @dev
+ * to @kuid/@kgid.
+ */
+ error = device_attrs_change_owner(dev, kuid, kgid);
+ if (error)
+ goto out;
+
+ error = dpm_sysfs_change_owner(dev, kuid, kgid);
+ if (error)
+ goto out;
+
+#ifdef CONFIG_BLOCK
+ if (sysfs_deprecated && dev->class == &block_class)
+ goto out;
+#endif
+
+ /*
+ * Change the owner of the symlink located in the class directory of
+ * the device class associated with @dev which points to the actual
+ * directory entry for @dev to @kuid/@kgid. This ensures that the
+ * symlink shows the same permissions as its target.
+ */
+ error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj,
+ dev_name(dev), kuid, kgid);
+ if (error)
+ goto out;
+
+out:
+ put_device(dev);
+ return error;
+}
+EXPORT_SYMBOL_GPL(device_change_owner);
+
/**
* device_shutdown - call ->shutdown() on each device to shutdown.
*/
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 444f5c169a0b..54292cdd7808 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -74,6 +74,7 @@ extern int pm_qos_sysfs_add_flags(struct device *dev);
extern void pm_qos_sysfs_remove_flags(struct device *dev);
extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev);
extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev);
+extern int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
#else /* CONFIG_PM */
@@ -88,6 +89,8 @@ static inline void pm_runtime_remove(struct device *dev) {}
static inline int dpm_sysfs_add(struct device *dev) { return 0; }
static inline void dpm_sysfs_remove(struct device *dev) {}
+static inline int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid) { return 0; }
#endif
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index d7d82db2e4bc..2b99fe1eb207 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -480,6 +480,14 @@ static ssize_t wakeup_last_time_ms_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
+static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ if (dev->power.wakeup && dev->power.wakeup->dev)
+ return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
+ return 0;
+}
+
static DEVICE_ATTR_RO(wakeup_last_time_ms);
#ifdef CONFIG_PM_AUTOSLEEP
@@ -501,7 +509,13 @@ static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
#endif /* CONFIG_PM_AUTOSLEEP */
-#endif /* CONFIG_PM_SLEEP */
+#else /* CONFIG_PM_SLEEP */
+static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ return 0;
+}
+#endif
#ifdef CONFIG_PM_ADVANCED_DEBUG
static ssize_t runtime_usage_show(struct device *dev,
@@ -684,6 +698,45 @@ int dpm_sysfs_add(struct device *dev)
return rc;
}
+int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
+{
+ int rc;
+
+ if (device_pm_not_required(dev))
+ return 0;
+
+ rc = sysfs_group_change_owner(&dev->kobj, &pm_attr_group, kuid, kgid);
+ if (rc)
+ return rc;
+
+ if (pm_runtime_callbacks_present(dev)) {
+ rc = sysfs_group_change_owner(
+ &dev->kobj, &pm_runtime_attr_group, kuid, kgid);
+ if (rc)
+ return rc;
+ }
+
+ if (device_can_wakeup(dev)) {
+ rc = sysfs_group_change_owner(&dev->kobj, &pm_wakeup_attr_group,
+ kuid, kgid);
+ if (rc)
+ return rc;
+
+ rc = dpm_sysfs_wakeup_change_owner(dev, kuid, kgid);
+ if (rc)
+ return rc;
+ }
+
+ if (dev->power.set_latency_tolerance) {
+ rc = sysfs_group_change_owner(
+ &dev->kobj, &pm_qos_latency_tolerance_attr_group, kuid,
+ kgid);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
int wakeup_sysfs_add(struct device *dev)
{
return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index f7aa2dc1ff85..4e73a531b377 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -211,12 +211,12 @@ config BT_HCIUART_RTL
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
depends on GPIOLIB
- depends on ACPI
+ depends on (ACPI || SERIAL_DEV_CTRL_TTYPORT)
select BT_HCIUART_3WIRE
select BT_RTL
help
The Realtek protocol support enables Bluetooth HCI over 3-Wire
- serial port internface for Realtek Bluetooth controllers.
+ serial port interface for Realtek Bluetooth controllers.
Say Y here to compile support for Realtek protocol.
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 0e5954cac98e..5a321b4076aa 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -133,8 +133,8 @@ static int bfusb_send_bulk(struct bfusb_data *data, struct sk_buff *skb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err) {
- BT_ERR("%s bulk tx submit failed urb %p err %d",
- data->hdev->name, urb, err);
+ bt_dev_err(data->hdev, "bulk tx submit failed urb %p err %d",
+ urb, err);
skb_unlink(skb, &data->pending_q);
usb_free_urb(urb);
} else
@@ -232,8 +232,8 @@ static int bfusb_rx_submit(struct bfusb_data *data, struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err) {
- BT_ERR("%s bulk rx submit failed urb %p err %d",
- data->hdev->name, urb, err);
+ bt_dev_err(data->hdev, "bulk rx submit failed urb %p err %d",
+ urb, err);
skb_unlink(skb, &data->pending_q);
kfree_skb(skb);
usb_free_urb(urb);
@@ -247,7 +247,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
BT_DBG("bfusb %p hdr 0x%02x data %p len %d", data, hdr, buf, len);
if (hdr & 0x10) {
- BT_ERR("%s error in block", data->hdev->name);
+ bt_dev_err(data->hdev, "error in block");
kfree_skb(data->reassembly);
data->reassembly = NULL;
return -EIO;
@@ -259,13 +259,13 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
int pkt_len = 0;
if (data->reassembly) {
- BT_ERR("%s unexpected start block", data->hdev->name);
+ bt_dev_err(data->hdev, "unexpected start block");
kfree_skb(data->reassembly);
data->reassembly = NULL;
}
if (len < 1) {
- BT_ERR("%s no packet type found", data->hdev->name);
+ bt_dev_err(data->hdev, "no packet type found");
return -EPROTO;
}
@@ -277,7 +277,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
struct hci_event_hdr *hdr = (struct hci_event_hdr *) buf;
pkt_len = HCI_EVENT_HDR_SIZE + hdr->plen;
} else {
- BT_ERR("%s event block is too short", data->hdev->name);
+ bt_dev_err(data->hdev, "event block is too short");
return -EILSEQ;
}
break;
@@ -287,7 +287,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
struct hci_acl_hdr *hdr = (struct hci_acl_hdr *) buf;
pkt_len = HCI_ACL_HDR_SIZE + __le16_to_cpu(hdr->dlen);
} else {
- BT_ERR("%s data block is too short", data->hdev->name);
+ bt_dev_err(data->hdev, "data block is too short");
return -EILSEQ;
}
break;
@@ -297,7 +297,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
struct hci_sco_hdr *hdr = (struct hci_sco_hdr *) buf;
pkt_len = HCI_SCO_HDR_SIZE + hdr->dlen;
} else {
- BT_ERR("%s audio block is too short", data->hdev->name);
+ bt_dev_err(data->hdev, "audio block is too short");
return -EILSEQ;
}
break;
@@ -305,7 +305,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
skb = bt_skb_alloc(pkt_len, GFP_ATOMIC);
if (!skb) {
- BT_ERR("%s no memory for the packet", data->hdev->name);
+ bt_dev_err(data->hdev, "no memory for the packet");
return -ENOMEM;
}
@@ -314,7 +314,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
data->reassembly = skb;
} else {
if (!data->reassembly) {
- BT_ERR("%s unexpected continuation block", data->hdev->name);
+ bt_dev_err(data->hdev, "unexpected continuation block");
return -EIO;
}
}
@@ -366,8 +366,7 @@ static void bfusb_rx_complete(struct urb *urb)
}
if (count < len) {
- BT_ERR("%s block extends over URB buffer ranges",
- data->hdev->name);
+ bt_dev_err(data->hdev, "block extends over URB buffer ranges");
}
if ((hdr & 0xe1) == 0xc1)
@@ -391,8 +390,8 @@ resubmit:
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err) {
- BT_ERR("%s bulk resubmit failed urb %p err %d",
- data->hdev->name, urb, err);
+ bt_dev_err(data->hdev, "bulk resubmit failed urb %p err %d",
+ urb, err);
}
unlock:
@@ -477,7 +476,7 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
/* Max HCI frame size seems to be 1511 + 1 */
nskb = bt_skb_alloc(count + 32, GFP_KERNEL);
if (!nskb) {
- BT_ERR("Can't allocate memory for new packet");
+ bt_dev_err(hdev, "Can't allocate memory for new packet");
return -ENOMEM;
}
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 62e781a18bf0..6a0e2c5a8beb 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -376,13 +376,13 @@ struct ibt_cp_reg_access {
__le32 addr;
__u8 mode;
__u8 len;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct ibt_rp_reg_access {
__u8 status;
__le32 addr;
- __u8 data[0];
+ __u8 data[];
} __packed;
static int regmap_ibt_read(void *context, const void *addr, size_t reg_size,
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index ec69e5dd7bd3..a16845c0751d 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -139,7 +139,7 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
static void qca_tlv_check_data(struct qca_fw_config *config,
- const struct firmware *fw)
+ const struct firmware *fw, enum qca_btsoc_type soc_type)
{
const u8 *data;
u32 type_len;
@@ -148,6 +148,7 @@ static void qca_tlv_check_data(struct qca_fw_config *config,
struct tlv_type_hdr *tlv;
struct tlv_type_patch *tlv_patch;
struct tlv_type_nvm *tlv_nvm;
+ uint8_t nvm_baud_rate = config->user_baud_rate;
tlv = (struct tlv_type_hdr *)fw->data;
@@ -216,7 +217,10 @@ static void qca_tlv_check_data(struct qca_fw_config *config,
tlv_nvm->data[0] |= 0x80;
/* UART Baud Rate */
- tlv_nvm->data[2] = config->user_baud_rate;
+ if (soc_type == QCA_WCN3991)
+ tlv_nvm->data[1] = nvm_baud_rate;
+ else
+ tlv_nvm->data[2] = nvm_baud_rate;
break;
@@ -354,7 +358,7 @@ static int qca_download_firmware(struct hci_dev *hdev,
return ret;
}
- qca_tlv_check_data(config, fw);
+ qca_tlv_check_data(config, fw, soc_type);
segment = fw->data;
remain = fw->size;
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index f5795b1a3779..e16a4d650597 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -79,7 +79,7 @@ struct qca_fw_config {
struct edl_event_hdr {
__u8 cresp;
__u8 rtype;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct qca_btsoc_version {
@@ -112,12 +112,12 @@ struct tlv_type_nvm {
__le16 tag_len;
__le32 reserve1;
__le32 reserve2;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct tlv_type_hdr {
__le32 type_len;
- __u8 data[0];
+ __u8 data[];
} __packed;
enum qca_btsoc_type {
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 577cfa3329db..67f4bc21e7c5 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -136,6 +136,18 @@ static const struct id_table ic_id_table[] = {
.fw_name = "rtl_bt/rtl8761a_fw.bin",
.cfg_name = "rtl_bt/rtl8761a_config" },
+ /* 8822C with UART interface */
+ { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV |
+ IC_MATCH_FL_HCIBUS,
+ .lmp_subver = RTL_ROM_LMP_8822B,
+ .hci_rev = 0x000c,
+ .hci_ver = 0x0a,
+ .hci_bus = HCI_UART,
+ .config_needed = true,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8822cs_fw.bin",
+ .cfg_name = "rtl_bt/rtl8822cs_config" },
+
/* 8822C with USB interface */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc),
.config_needed = false,
diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h
index 10ad40c3e42c..2a582682136d 100644
--- a/drivers/bluetooth/btrtl.h
+++ b/drivers/bluetooth/btrtl.h
@@ -38,13 +38,13 @@ struct rtl_epatch_header {
struct rtl_vendor_config_entry {
__le16 offset;
__u8 len;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct rtl_vendor_config {
__le32 signature;
__le16 total_len;
- struct rtl_vendor_config_entry entry[0];
+ struct rtl_vendor_config_entry entry[];
} __packed;
#if IS_ENABLED(CONFIG_BT_RTL)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f5924f3e8b8d..3bdec42c9612 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -57,6 +57,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_IFNUM_2 0x80000
#define BTUSB_CW6622 0x100000
#define BTUSB_MEDIATEK 0x200000
+#define BTUSB_WIDEBAND_SPEECH 0x400000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -333,15 +334,21 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x1286, 0x204e), .driver_info = BTUSB_MARVELL },
/* Intel Bluetooth devices */
- { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
- { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW },
- { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW },
+ { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
- { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
- { USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL },
- { USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_NEW },
+ { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_NEW |
+ BTUSB_WIDEBAND_SPEECH },
/* Other Intel Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
@@ -387,6 +394,7 @@ static const struct usb_device_id blacklist_table[] = {
/* Additional Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3548), .driver_info = BTUSB_REALTEK },
/* Silicon Wave based devices */
{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
@@ -1930,7 +1938,14 @@ static int btusb_setup_intel(struct hci_dev *hdev)
if (err)
return err;
- bt_dev_info(hdev, "Intel firmware patch completed and activated");
+ /* Need build number for downloaded fw patches in
+ * every power-on boot
+ */
+ err = btintel_read_version(hdev, &ver);
+ if (err)
+ return err;
+ bt_dev_info(hdev, "Intel BT fw patch 0x%02x completed & activated",
+ ver.fw_patch_num);
goto complete;
@@ -3859,6 +3874,9 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_BROKEN_ISOC)
data->isoc = NULL;
+ if (id->driver_info & BTUSB_WIDEBAND_SPEECH)
+ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+
if (id->driver_info & BTUSB_DIGIANSWER) {
data->cmdreq_type = USB_TYPE_VENDOR;
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
diff --git a/drivers/bluetooth/hci_ag6xx.c b/drivers/bluetooth/hci_ag6xx.c
index 8bafa650b5b0..1f55df93e4ce 100644
--- a/drivers/bluetooth/hci_ag6xx.c
+++ b/drivers/bluetooth/hci_ag6xx.c
@@ -27,7 +27,7 @@ struct ag6xx_data {
struct pbn_entry {
__le32 addr;
__le32 plen;
- __u8 data[0];
+ __u8 data[];
} __packed;
static int ag6xx_open(struct hci_uart *hu)
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 6dc1fbeb564b..4b3b14a34794 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -71,8 +71,6 @@ static int h4_close(struct hci_uart *hu)
{
struct h4_struct *h4 = hu->priv;
- hu->priv = NULL;
-
BT_DBG("hu %p", hu);
skb_queue_purge(&h4->txq);
@@ -85,7 +83,7 @@ static int h4_close(struct hci_uart *hu)
return 0;
}
-/* Enqueue frame for transmittion (padding, crc, etc) */
+/* Enqueue frame for transmission (padding, crc, etc) */
static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
struct h4_struct *h4 = hu->priv;
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 0b14547482a7..106c110efe56 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -11,6 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
#include <linux/serdev.h>
#include <linux/skbuff.h>
@@ -177,7 +178,7 @@ static void h5_peer_reset(struct hci_uart *hu)
{
struct h5 *h5 = hu->priv;
- BT_ERR("Peer device has reset");
+ bt_dev_err(hu->hdev, "Peer device has reset");
h5->state = H5_UNINITIALIZED;
@@ -437,21 +438,21 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
H5_HDR_LEN(hdr));
if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
- BT_ERR("Invalid header checksum");
+ bt_dev_err(hu->hdev, "Invalid header checksum");
h5_reset_rx(h5);
return 0;
}
if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
- BT_ERR("Out-of-order packet arrived (%u != %u)",
- H5_HDR_SEQ(hdr), h5->tx_ack);
+ bt_dev_err(hu->hdev, "Out-of-order packet arrived (%u != %u)",
+ H5_HDR_SEQ(hdr), h5->tx_ack);
h5_reset_rx(h5);
return 0;
}
if (h5->state != H5_ACTIVE &&
H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
- BT_ERR("Non-link packet received in non-active state");
+ bt_dev_err(hu->hdev, "Non-link packet received in non-active state");
h5_reset_rx(h5);
return 0;
}
@@ -474,7 +475,7 @@ static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
if (!h5->rx_skb) {
- BT_ERR("Can't allocate mem for new packet");
+ bt_dev_err(hu->hdev, "Can't allocate mem for new packet");
h5_reset_rx(h5);
return -ENOMEM;
}
@@ -550,7 +551,7 @@ static int h5_recv(struct hci_uart *hu, const void *data, int count)
if (h5->rx_pending > 0) {
if (*ptr == SLIP_DELIMITER) {
- BT_ERR("Too short H5 packet");
+ bt_dev_err(hu->hdev, "Too short H5 packet");
h5_reset_rx(h5);
continue;
}
@@ -577,13 +578,13 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
struct h5 *h5 = hu->priv;
if (skb->len > 0xfff) {
- BT_ERR("Packet too long (%u bytes)", skb->len);
+ bt_dev_err(hu->hdev, "Packet too long (%u bytes)", skb->len);
kfree_skb(skb);
return 0;
}
if (h5->state != H5_ACTIVE) {
- BT_ERR("Ignoring HCI data in non-active state");
+ bt_dev_err(hu->hdev, "Ignoring HCI data in non-active state");
kfree_skb(skb);
return 0;
}
@@ -600,7 +601,7 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
break;
default:
- BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb));
+ bt_dev_err(hu->hdev, "Unknown packet type %u", hci_skb_pkt_type(skb));
kfree_skb(skb);
break;
}
@@ -656,7 +657,7 @@ static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
int i;
if (!valid_packet_type(pkt_type)) {
- BT_ERR("Unknown packet type %u", pkt_type);
+ bt_dev_err(hu->hdev, "Unknown packet type %u", pkt_type);
return NULL;
}
@@ -733,7 +734,7 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
}
skb_queue_head(&h5->unrel, skb);
- BT_ERR("Could not dequeue pkt because alloc_skb failed");
+ bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed");
}
spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
@@ -753,7 +754,7 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
}
skb_queue_head(&h5->rel, skb);
- BT_ERR("Could not dequeue pkt because alloc_skb failed");
+ bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed");
}
unlock:
@@ -785,7 +786,6 @@ static const struct hci_uart_proto h5p = {
static int h5_serdev_probe(struct serdev_device *serdev)
{
- const struct acpi_device_id *match;
struct device *dev = &serdev->dev;
struct h5 *h5;
@@ -800,6 +800,8 @@ static int h5_serdev_probe(struct serdev_device *serdev)
serdev_device_set_drvdata(serdev, h5);
if (has_acpi_companion(dev)) {
+ const struct acpi_device_id *match;
+
match = acpi_match_device(dev->driver->acpi_match_table, dev);
if (!match)
return -ENODEV;
@@ -810,8 +812,17 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (h5->vnd->acpi_gpio_map)
devm_acpi_dev_add_driver_gpios(dev,
h5->vnd->acpi_gpio_map);
+ } else {
+ const void *data;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ h5->vnd = (const struct h5_vnd *)data;
}
+
h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(h5->enable_gpio))
return PTR_ERR(h5->enable_gpio);
@@ -1003,6 +1014,15 @@ static const struct dev_pm_ops h5_serdev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(h5_serdev_suspend, h5_serdev_resume)
};
+static const struct of_device_id rtl_bluetooth_of_match[] = {
+#ifdef CONFIG_BT_HCIUART_RTL
+ { .compatible = "realtek,rtl8822cs-bt",
+ .data = (const void *)&rtl_vnd },
+#endif
+ { },
+};
+MODULE_DEVICE_TABLE(of, rtl_bluetooth_of_match);
+
static struct serdev_device_driver h5_serdev_driver = {
.probe = h5_serdev_probe,
.remove = h5_serdev_remove,
@@ -1010,6 +1030,7 @@ static struct serdev_device_driver h5_serdev_driver = {
.name = "hci_uart_h5",
.acpi_match_table = ACPI_PTR(h5_acpi_match),
.pm = &h5_serdev_pm_ops,
+ .of_match_table = rtl_bluetooth_of_match,
},
};
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 31f25153087d..f1299da6eed8 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -49,7 +49,7 @@
struct hci_lpm_pkt {
__u8 opcode;
__u8 dlen;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct intel_device {
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index d6e0c99ee5eb..439392b1c043 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -29,6 +29,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
+#include <linux/mutex.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -69,7 +70,8 @@ enum qca_flags {
QCA_IBS_ENABLED,
QCA_DROP_VENDOR_EVENT,
QCA_SUSPENDING,
- QCA_MEMDUMP_COLLECTION
+ QCA_MEMDUMP_COLLECTION,
+ QCA_HW_ERROR_EVENT
};
@@ -138,18 +140,19 @@ struct qca_data {
u32 tx_idle_delay;
struct timer_list wake_retrans_timer;
u32 wake_retrans;
- struct timer_list memdump_timer;
struct workqueue_struct *workqueue;
struct work_struct ws_awake_rx;
struct work_struct ws_awake_device;
struct work_struct ws_rx_vote_off;
struct work_struct ws_tx_vote_off;
struct work_struct ctrl_memdump_evt;
+ struct delayed_work ctrl_memdump_timeout;
struct qca_memdump_data *qca_memdump;
unsigned long flags;
struct completion drop_ev_comp;
wait_queue_head_t suspend_wait_q;
enum qca_memdump_states memdump_state;
+ struct mutex hci_memdump_lock;
/* For debugging purpose */
u64 ibs_sent_wacks;
@@ -522,23 +525,28 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
hci_uart_tx_wakeup(hu);
}
-static void hci_memdump_timeout(struct timer_list *t)
+
+static void qca_controller_memdump_timeout(struct work_struct *work)
{
- struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
+ struct qca_data *qca = container_of(work, struct qca_data,
+ ctrl_memdump_timeout.work);
struct hci_uart *hu = qca->hu;
- struct qca_memdump_data *qca_memdump = qca->qca_memdump;
- char *memdump_buf = qca_memdump->memdump_buf_tail;
-
- bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
- /* Inject hw error event to reset the device and driver. */
- hci_reset_dev(hu->hdev);
- vfree(memdump_buf);
- kfree(qca_memdump);
- qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
- del_timer(&qca->memdump_timer);
- cancel_work_sync(&qca->ctrl_memdump_evt);
+
+ mutex_lock(&qca->hci_memdump_lock);
+ if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) {
+ qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
+ if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
+ /* Inject hw error event to reset the device
+ * and driver.
+ */
+ hci_reset_dev(hu->hdev);
+ }
+ }
+
+ mutex_unlock(&qca->hci_memdump_lock);
}
+
/* Initialize protocol */
static int qca_open(struct hci_uart *hu)
{
@@ -558,6 +566,7 @@ static int qca_open(struct hci_uart *hu)
skb_queue_head_init(&qca->tx_wait_q);
skb_queue_head_init(&qca->rx_memdump_q);
spin_lock_init(&qca->hci_ibs_lock);
+ mutex_init(&qca->hci_memdump_lock);
qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
if (!qca->workqueue) {
BT_ERR("QCA Workqueue not initialized properly");
@@ -570,6 +579,8 @@ static int qca_open(struct hci_uart *hu)
INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
+ INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout,
+ qca_controller_memdump_timeout);
init_waitqueue_head(&qca->suspend_wait_q);
qca->hu = hu;
@@ -596,7 +607,6 @@ static int qca_open(struct hci_uart *hu)
timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
- timer_setup(&qca->memdump_timer, hci_memdump_timeout, 0);
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
qca->tx_idle_delay, qca->wake_retrans);
@@ -677,7 +687,6 @@ static int qca_close(struct hci_uart *hu)
skb_queue_purge(&qca->rx_memdump_q);
del_timer(&qca->tx_idle_timer);
del_timer(&qca->wake_retrans_timer);
- del_timer(&qca->memdump_timer);
destroy_workqueue(qca->workqueue);
qca->hu = NULL;
@@ -963,11 +972,20 @@ static void qca_controller_memdump(struct work_struct *work)
while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
+ mutex_lock(&qca->hci_memdump_lock);
+ /* Skip processing the received packets if timeout detected. */
+ if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT) {
+ mutex_unlock(&qca->hci_memdump_lock);
+ return;
+ }
+
if (!qca_memdump) {
qca_memdump = kzalloc(sizeof(struct qca_memdump_data),
GFP_ATOMIC);
- if (!qca_memdump)
+ if (!qca_memdump) {
+ mutex_unlock(&qca->hci_memdump_lock);
return;
+ }
qca->qca_memdump = qca_memdump;
}
@@ -992,13 +1010,15 @@ static void qca_controller_memdump(struct work_struct *work)
if (!(dump_size)) {
bt_dev_err(hu->hdev, "Rx invalid memdump size");
kfree_skb(skb);
+ mutex_unlock(&qca->hci_memdump_lock);
return;
}
bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
dump_size);
- mod_timer(&qca->memdump_timer, (jiffies +
- msecs_to_jiffies(MEMDUMP_TIMEOUT_MS)));
+ queue_delayed_work(qca->workqueue,
+ &qca->ctrl_memdump_timeout,
+ msecs_to_jiffies(MEMDUMP_TIMEOUT_MS));
skb_pull(skb, sizeof(dump_size));
memdump_buf = vmalloc(dump_size);
@@ -1016,6 +1036,7 @@ static void qca_controller_memdump(struct work_struct *work)
kfree(qca_memdump);
kfree_skb(skb);
qca->qca_memdump = NULL;
+ mutex_unlock(&qca->hci_memdump_lock);
return;
}
@@ -1046,16 +1067,20 @@ static void qca_controller_memdump(struct work_struct *work)
memdump_buf = qca_memdump->memdump_buf_head;
dev_coredumpv(&hu->serdev->dev, memdump_buf,
qca_memdump->received_dump, GFP_KERNEL);
- del_timer(&qca->memdump_timer);
+ cancel_delayed_work(&qca->ctrl_memdump_timeout);
kfree(qca->qca_memdump);
qca->qca_memdump = NULL;
qca->memdump_state = QCA_MEMDUMP_COLLECTED;
+ clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
}
+
+ mutex_unlock(&qca->hci_memdump_lock);
}
}
-int qca_controller_memdump_event(struct hci_dev *hdev, struct sk_buff *skb)
+static int qca_controller_memdump_event(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
@@ -1406,30 +1431,21 @@ static void qca_wait_for_dump_collection(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
- struct qca_memdump_data *qca_memdump = qca->qca_memdump;
- char *memdump_buf = NULL;
wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION,
TASK_UNINTERRUPTIBLE, MEMDUMP_TIMEOUT_MS);
clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
- if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
- bt_dev_err(hu->hdev, "Clearing the buffers due to timeout");
- if (qca_memdump)
- memdump_buf = qca_memdump->memdump_buf_tail;
- vfree(memdump_buf);
- kfree(qca_memdump);
- qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
- del_timer(&qca->memdump_timer);
- cancel_work_sync(&qca->ctrl_memdump_evt);
- }
}
static void qca_hw_error(struct hci_dev *hdev, u8 code)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
+ struct qca_memdump_data *qca_memdump = qca->qca_memdump;
+ char *memdump_buf = NULL;
+ set_bit(QCA_HW_ERROR_EVENT, &qca->flags);
bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
@@ -1449,6 +1465,23 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
bt_dev_info(hdev, "waiting for dump to complete");
qca_wait_for_dump_collection(hdev);
}
+
+ if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
+ bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
+ mutex_lock(&qca->hci_memdump_lock);
+ if (qca_memdump)
+ memdump_buf = qca_memdump->memdump_buf_head;
+ vfree(memdump_buf);
+ kfree(qca_memdump);
+ qca->qca_memdump = NULL;
+ qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
+ cancel_delayed_work(&qca->ctrl_memdump_timeout);
+ skb_queue_purge(&qca->rx_memdump_q);
+ mutex_unlock(&qca->hci_memdump_lock);
+ cancel_work_sync(&qca->ctrl_memdump_evt);
+ }
+
+ clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
}
static void qca_cmd_timeout(struct hci_dev *hdev)
@@ -1529,9 +1562,11 @@ static int qca_power_on(struct hci_dev *hdev)
ret = qca_wcn3990_init(hu);
} else {
qcadev = serdev_device_get_drvdata(hu->serdev);
- gpiod_set_value_cansleep(qcadev->bt_en, 1);
- /* Controller needs time to bootup. */
- msleep(150);
+ if (qcadev->bt_en) {
+ gpiod_set_value_cansleep(qcadev->bt_en, 1);
+ /* Controller needs time to bootup. */
+ msleep(150);
+ }
}
return ret;
@@ -1717,7 +1752,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
host_set_baudrate(hu, 2400);
qca_send_power_pulse(hu, false);
qca_regulator_disable(qcadev);
- } else {
+ } else if (qcadev->bt_en) {
gpiod_set_value_cansleep(qcadev->bt_en, 0);
}
}
@@ -1726,9 +1761,11 @@ static int qca_power_off(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
/* Stop sending shutdown command if soc crashes. */
- if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
+ if (qca_is_wcn399x(soc_type)
+ && qca->memdump_state == QCA_MEMDUMP_IDLE) {
qca_send_pre_shutdown_cmd(hdev);
usleep_range(8000, 10000);
}
@@ -1755,7 +1792,11 @@ static int qca_regulator_enable(struct qca_serdev *qcadev)
power->vregs_on = true;
- return 0;
+ ret = clk_prepare_enable(qcadev->susclk);
+ if (ret)
+ qca_regulator_disable(qcadev);
+
+ return ret;
}
static void qca_regulator_disable(struct qca_serdev *qcadev)
@@ -1773,6 +1814,8 @@ static void qca_regulator_disable(struct qca_serdev *qcadev)
regulator_bulk_disable(power->num_vregs, power->vreg_bulk);
power->vregs_on = false;
+
+ clk_disable_unprepare(qcadev->susclk);
}
static int qca_init_regulators(struct qca_power *qca,
@@ -1811,6 +1854,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
struct hci_dev *hdev;
const struct qca_vreg_data *data;
int err;
+ bool power_ctrl_enabled = true;
qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL);
if (!qcadev)
@@ -1839,6 +1883,12 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_power->vregs_on = false;
+ qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
+ if (IS_ERR(qcadev->susclk)) {
+ dev_err(&serdev->dev, "failed to acquire clk\n");
+ return PTR_ERR(qcadev->susclk);
+ }
+
device_property_read_u32(&serdev->dev, "max-speed",
&qcadev->oper_speed);
if (!qcadev->oper_speed)
@@ -1851,38 +1901,40 @@ static int qca_serdev_probe(struct serdev_device *serdev)
}
} else {
qcadev->btsoc_type = QCA_ROME;
- qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable",
+ qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(qcadev->bt_en)) {
- dev_err(&serdev->dev, "failed to acquire enable gpio\n");
- return PTR_ERR(qcadev->bt_en);
+ if (!qcadev->bt_en) {
+ dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
+ power_ctrl_enabled = false;
}
- qcadev->susclk = devm_clk_get(&serdev->dev, NULL);
- if (IS_ERR(qcadev->susclk)) {
- dev_err(&serdev->dev, "failed to acquire clk\n");
- return PTR_ERR(qcadev->susclk);
- }
-
- err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
- if (err)
- return err;
+ qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
+ if (!qcadev->susclk) {
+ dev_warn(&serdev->dev, "failed to acquire clk\n");
+ } else {
+ err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
+ if (err)
+ return err;
- err = clk_prepare_enable(qcadev->susclk);
- if (err)
- return err;
+ err = clk_prepare_enable(qcadev->susclk);
+ if (err)
+ return err;
+ }
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) {
BT_ERR("Rome serdev registration failed");
- clk_disable_unprepare(qcadev->susclk);
+ if (qcadev->susclk)
+ clk_disable_unprepare(qcadev->susclk);
return err;
}
}
- hdev = qcadev->serdev_hu.hdev;
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
- hdev->shutdown = qca_power_off;
+ if (power_ctrl_enabled) {
+ hdev = qcadev->serdev_hu.hdev;
+ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hdev->shutdown = qca_power_off;
+ }
return 0;
}
@@ -1893,7 +1945,7 @@ static void qca_serdev_remove(struct serdev_device *serdev)
if (qca_is_wcn399x(qcadev->btsoc_type))
qca_power_shutdown(&qcadev->serdev_hu);
- else
+ else if (qcadev->susclk)
clk_disable_unprepare(qcadev->susclk);
hci_uart_unregister_device(&qcadev->serdev_hu);
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index f078b2686418..f2756836093f 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -42,3 +42,14 @@ config CRYPTO_DEV_CHELSIO_TLS
To compile this driver as a module, choose M here: the module
will be called chtls.
+
+config CHELSIO_TLS_DEVICE
+ bool "Chelsio Inline KTLS Offload"
+ depends on CHELSIO_T4
+ depends on TLS_DEVICE
+ select CRYPTO_DEV_CHELSIO
+ default y
+ help
+ This flag enables support for kernel tls offload over Chelsio T6
+ crypto accelerator. CONFIG_CHELSIO_TLS_DEVICE flag can be enabled
+ only if CONFIG_TLS and CONFIG_TLS_DEVICE flags are enabled.
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
index a3c05e2f4562..0e9d035927e9 100644
--- a/drivers/crypto/chelsio/Makefile
+++ b/drivers/crypto/chelsio/Makefile
@@ -3,5 +3,8 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
chcr-objs := chcr_core.o chcr_algo.o
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+chcr-objs += chcr_ktls.o
+#endif
chcr-$(CONFIG_CHELSIO_IPSEC_INLINE) += chcr_ipsec.o
obj-$(CONFIG_CRYPTO_DEV_CHELSIO_TLS) += chtls/
diff --git a/drivers/crypto/chelsio/chcr_common.h b/drivers/crypto/chelsio/chcr_common.h
new file mode 100644
index 000000000000..33f589cbfba1
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_common.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2020 Chelsio Communications. All rights reserved. */
+
+#ifndef __CHCR_COMMON_H__
+#define __CHCR_COMMON_H__
+
+#include "cxgb4.h"
+
+#define CHCR_MAX_SALT 4
+#define CHCR_KEYCTX_MAC_KEY_SIZE_128 0
+#define CHCR_KEYCTX_CIPHER_KEY_SIZE_128 0
+#define CHCR_SCMD_CIPHER_MODE_AES_GCM 2
+#define CHCR_SCMD_CIPHER_MODE_AES_CTR 3
+#define CHCR_CPL_TX_SEC_PDU_LEN_64BIT 2
+#define CHCR_SCMD_SEQ_NO_CTRL_64BIT 3
+#define CHCR_SCMD_PROTO_VERSION_TLS 0
+#define CHCR_SCMD_PROTO_VERSION_GENERIC 4
+#define CHCR_SCMD_AUTH_MODE_GHASH 4
+#define AES_BLOCK_LEN 16
+
+enum chcr_state {
+ CHCR_INIT = 0,
+ CHCR_ATTACH,
+ CHCR_DETACH,
+};
+
+struct chcr_dev {
+ spinlock_t lock_chcr_dev; /* chcr dev structure lock */
+ enum chcr_state state;
+ atomic_t inflight;
+ int wqretry;
+ struct delayed_work detach_work;
+ struct completion detach_comp;
+ unsigned char tx_channel_id;
+};
+
+struct uld_ctx {
+ struct list_head entry;
+ struct cxgb4_lld_info lldi;
+ struct chcr_dev dev;
+};
+
+struct ktls_key_ctx {
+ __be32 ctx_hdr;
+ u8 salt[CHCR_MAX_SALT];
+ __be64 iv_to_auth;
+ unsigned char key[TLS_CIPHER_AES_GCM_128_KEY_SIZE +
+ TLS_CIPHER_AES_GCM_256_TAG_SIZE];
+};
+
+/* Crypto key context */
+#define KEY_CONTEXT_CTX_LEN_S 24
+#define KEY_CONTEXT_CTX_LEN_V(x) ((x) << KEY_CONTEXT_CTX_LEN_S)
+
+#define KEY_CONTEXT_SALT_PRESENT_S 10
+#define KEY_CONTEXT_SALT_PRESENT_V(x) ((x) << KEY_CONTEXT_SALT_PRESENT_S)
+#define KEY_CONTEXT_SALT_PRESENT_F KEY_CONTEXT_SALT_PRESENT_V(1U)
+
+#define KEY_CONTEXT_VALID_S 0
+#define KEY_CONTEXT_VALID_V(x) ((x) << KEY_CONTEXT_VALID_S)
+#define KEY_CONTEXT_VALID_F KEY_CONTEXT_VALID_V(1U)
+
+#define KEY_CONTEXT_CK_SIZE_S 6
+#define KEY_CONTEXT_CK_SIZE_V(x) ((x) << KEY_CONTEXT_CK_SIZE_S)
+
+#define KEY_CONTEXT_MK_SIZE_S 2
+#define KEY_CONTEXT_MK_SIZE_V(x) ((x) << KEY_CONTEXT_MK_SIZE_S)
+
+#define KEY_CONTEXT_OPAD_PRESENT_S 11
+#define KEY_CONTEXT_OPAD_PRESENT_V(x) ((x) << KEY_CONTEXT_OPAD_PRESENT_S)
+#define KEY_CONTEXT_OPAD_PRESENT_F KEY_CONTEXT_OPAD_PRESENT_V(1U)
+
+#define FILL_KEY_CTX_HDR(ck_size, mk_size, ctx_len) \
+ htonl(KEY_CONTEXT_MK_SIZE_V(mk_size) | \
+ KEY_CONTEXT_CK_SIZE_V(ck_size) | \
+ KEY_CONTEXT_VALID_F | \
+ KEY_CONTEXT_SALT_PRESENT_F | \
+ KEY_CONTEXT_CTX_LEN_V((ctx_len)))
+
+struct uld_ctx *assign_chcr_device(void);
+
+static inline void *chcr_copy_to_txd(const void *src, const struct sge_txq *q,
+ void *pos, int length)
+{
+ int left = (void *)q->stat - pos;
+ u64 *p;
+
+ if (likely(length <= left)) {
+ memcpy(pos, src, length);
+ pos += length;
+ } else {
+ memcpy(pos, src, left);
+ memcpy(q->desc, src + left, length - left);
+ pos = (void *)q->desc + (length - left);
+ }
+ /* 0-pad to multiple of 16 */
+ p = PTR_ALIGN(pos, 8);
+ if ((uintptr_t)p & 8) {
+ *p = 0;
+ return p + 1;
+ }
+ return p;
+}
+
+static inline unsigned int chcr_txq_avail(const struct sge_txq *q)
+{
+ return q->size - 1 - q->in_use;
+}
+
+static inline void chcr_txq_advance(struct sge_txq *q, unsigned int n)
+{
+ q->in_use += n;
+ q->pidx += n;
+ if (q->pidx >= q->size)
+ q->pidx -= q->size;
+}
+
+static inline void chcr_eth_txq_stop(struct sge_eth_txq *q)
+{
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+}
+
+static inline unsigned int chcr_sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+static inline unsigned int chcr_flits_to_desc(unsigned int n)
+{
+ WARN_ON(n > SGE_MAX_WR_LEN / 8);
+ return DIV_ROUND_UP(n, 8);
+}
+#endif /* __CHCR_COMMON_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index e937605670ac..dfb53e746e51 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -28,13 +28,21 @@
static struct chcr_driver_data drv_data;
-typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
-static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
+typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input);
+static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input);
static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+static void update_netdev_features(void);
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_FW6_PLD] = cpl_fw6_pld_handler,
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ [CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl,
+ [CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl,
+#endif
};
static struct cxgb4_uld_info chcr_uld_info = {
@@ -45,9 +53,9 @@ static struct cxgb4_uld_info chcr_uld_info = {
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
.rx_handler = chcr_uld_rx_handler,
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+#if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
.tx_handler = chcr_uld_tx_handler,
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
};
static void detach_work_fn(struct work_struct *work)
@@ -125,8 +133,6 @@ static void chcr_dev_init(struct uld_ctx *u_ctx)
atomic_set(&dev->inflight, 0);
mutex_lock(&drv_data.drv_mutex);
list_add_tail(&u_ctx->entry, &drv_data.inact_dev);
- if (!drv_data.last_dev)
- drv_data.last_dev = u_ctx;
mutex_unlock(&drv_data.drv_mutex);
}
@@ -150,14 +156,13 @@ static int chcr_dev_move(struct uld_ctx *u_ctx)
return 0;
}
-static int cpl_fw6_pld_handler(struct chcr_dev *dev,
+static int cpl_fw6_pld_handler(struct adapter *adap,
unsigned char *input)
{
struct crypto_async_request *req;
struct cpl_fw6_pld *fw6_pld;
u32 ack_err_status = 0;
int error_status = 0;
- struct adapter *adap = padap(dev);
fw6_pld = (struct cpl_fw6_pld *)input;
req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
@@ -201,10 +206,11 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
}
u_ctx->lldi = *lld;
chcr_dev_init(u_ctx);
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
- if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
- chcr_add_xfrmops(lld);
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ if (lld->ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
+ chcr_enable_ktls(padap(&u_ctx->dev));
+#endif
out:
return u_ctx;
}
@@ -214,26 +220,37 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
{
struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
struct chcr_dev *dev = &u_ctx->dev;
+ struct adapter *adap = padap(dev);
const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
- if (rpl->opcode != CPL_FW6_PLD) {
- pr_err("Unsupported opcode\n");
+ if (!work_handlers[rpl->opcode]) {
+ pr_err("Unsupported opcode %d received\n", rpl->opcode);
return 0;
}
if (!pgl)
- work_handlers[rpl->opcode](dev, (unsigned char *)&rsp[1]);
+ work_handlers[rpl->opcode](adap, (unsigned char *)&rsp[1]);
else
- work_handlers[rpl->opcode](dev, pgl->va);
+ work_handlers[rpl->opcode](adap, pgl->va);
return 0;
}
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+#if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
{
+ /* In case if skb's decrypted bit is set, it's nic tls packet, else it's
+ * ipsec packet.
+ */
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ if (skb->decrypted)
+ return chcr_ktls_xmit(skb, dev);
+#endif
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
return chcr_ipsec_xmit(skb, dev);
+#endif
+ return 0;
}
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
static void chcr_detach_device(struct uld_ctx *u_ctx)
{
@@ -280,6 +297,24 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
return ret;
}
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+static void update_netdev_features(void)
+{
+ struct uld_ctx *u_ctx, *tmp;
+
+ mutex_lock(&drv_data.drv_mutex);
+ list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
+ if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE)
+ chcr_add_xfrmops(&u_ctx->lldi);
+ }
+ list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
+ if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE)
+ chcr_add_xfrmops(&u_ctx->lldi);
+ }
+ mutex_unlock(&drv_data.drv_mutex);
+}
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
static int __init chcr_crypto_init(void)
{
INIT_LIST_HEAD(&drv_data.act_dev);
@@ -289,6 +324,12 @@ static int __init chcr_crypto_init(void)
drv_data.last_dev = NULL;
cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
+ #ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ rtnl_lock();
+ update_netdev_features();
+ rtnl_unlock();
+ #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
return 0;
}
@@ -304,12 +345,20 @@ static void __exit chcr_crypto_exit(void)
list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
adap = padap(&u_ctx->dev);
memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
+ chcr_disable_ktls(adap);
+#endif
list_del(&u_ctx->entry);
kfree(u_ctx);
}
list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
adap = padap(&u_ctx->dev);
memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
+ chcr_disable_ktls(adap);
+#endif
list_del(&u_ctx->entry);
kfree(u_ctx);
}
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index ad874d548aa5..b5b371b8d343 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -222,4 +222,11 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int err);
int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+void chcr_enable_ktls(struct adapter *adap);
+void chcr_disable_ktls(struct adapter *adap);
+int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
+int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
+int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
+#endif
#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 9da0f93a330b..9fd3b9d1ec2f 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -99,9 +99,7 @@ void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
netdev->hw_enc_features |= NETIF_F_HW_ESP;
netdev->features |= NETIF_F_HW_ESP;
- rtnl_lock();
netdev_change_features(netdev);
- rtnl_unlock();
}
}
diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c
new file mode 100644
index 000000000000..73658b71d4a3
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_ktls.c
@@ -0,0 +1,2020 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Chelsio Communications. All rights reserved. */
+
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#include "chcr_ktls.h"
+#include "clip_tbl.h"
+
+static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
+/*
+ * chcr_ktls_save_keys: calculate and save crypto keys.
+ * @tx_info - driver specific tls info.
+ * @crypto_info - tls crypto information.
+ * @direction - TX/RX direction.
+ * return - SUCCESS/FAILURE.
+ */
+static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info,
+ struct tls_crypto_info *crypto_info,
+ enum tls_offload_ctx_dir direction)
+{
+ int ck_size, key_ctx_size, mac_key_size, keylen, ghash_size, ret;
+ unsigned char ghash_h[TLS_CIPHER_AES_GCM_256_TAG_SIZE];
+ struct tls12_crypto_info_aes_gcm_128 *info_128_gcm;
+ struct ktls_key_ctx *kctx = &tx_info->key_ctx;
+ struct crypto_cipher *cipher;
+ unsigned char *key, *salt;
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ info_128_gcm =
+ (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ tx_info->salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
+ mac_key_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
+ tx_info->iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
+ tx_info->iv = be64_to_cpu(*(__be64 *)info_128_gcm->iv);
+
+ ghash_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
+ key = info_128_gcm->key;
+ salt = info_128_gcm->salt;
+ tx_info->record_no = *(u64 *)info_128_gcm->rec_seq;
+
+ /* The SCMD fields used when encrypting a full TLS
+ * record. Its a one time calculation till the
+ * connection exists.
+ */
+ tx_info->scmd0_seqno_numivs =
+ SCMD_SEQ_NO_CTRL_V(CHCR_SCMD_SEQ_NO_CTRL_64BIT) |
+ SCMD_CIPH_AUTH_SEQ_CTRL_F |
+ SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_TLS) |
+ SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_GCM) |
+ SCMD_AUTH_MODE_V(CHCR_SCMD_AUTH_MODE_GHASH) |
+ SCMD_IV_SIZE_V(TLS_CIPHER_AES_GCM_128_IV_SIZE >> 1) |
+ SCMD_NUM_IVS_V(1);
+
+ /* keys will be sent inline. */
+ tx_info->scmd0_ivgen_hdrlen = SCMD_KEY_CTX_INLINE_F;
+
+ /* The SCMD fields used when encrypting a partial TLS
+ * record (no trailer and possibly a truncated payload).
+ */
+ tx_info->scmd0_short_seqno_numivs =
+ SCMD_CIPH_AUTH_SEQ_CTRL_F |
+ SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_GENERIC) |
+ SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_CTR) |
+ SCMD_IV_SIZE_V(AES_BLOCK_LEN >> 1);
+
+ tx_info->scmd0_short_ivgen_hdrlen =
+ tx_info->scmd0_ivgen_hdrlen | SCMD_AADIVDROP_F;
+
+ break;
+
+ default:
+ pr_err("GCM: cipher type 0x%x not supported\n",
+ crypto_info->cipher_type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ key_ctx_size = CHCR_KTLS_KEY_CTX_LEN +
+ roundup(keylen, 16) + ghash_size;
+ /* Calculate the H = CIPH(K, 0 repeated 16 times).
+ * It will go in key context
+ */
+ cipher = crypto_alloc_cipher("aes", 0, 0);
+ if (IS_ERR(cipher)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = crypto_cipher_setkey(cipher, key, keylen);
+ if (ret)
+ goto out1;
+
+ memset(ghash_h, 0, ghash_size);
+ crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
+
+ /* fill the Key context */
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
+ kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ mac_key_size,
+ key_ctx_size >> 4);
+ } else {
+ ret = -EINVAL;
+ goto out1;
+ }
+
+ memcpy(kctx->salt, salt, tx_info->salt_size);
+ memcpy(kctx->key, key, keylen);
+ memcpy(kctx->key + keylen, ghash_h, ghash_size);
+ tx_info->key_ctx_len = key_ctx_size;
+
+out1:
+ crypto_free_cipher(cipher);
+out:
+ return ret;
+}
+
+static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info,
+ int new_state)
+{
+ unsigned long flags;
+
+ /* This function can be called from both rx (interrupt context) and tx
+ * queue contexts.
+ */
+ spin_lock_irqsave(&tx_info->lock, flags);
+ switch (tx_info->connection_state) {
+ case KTLS_CONN_CLOSED:
+ tx_info->connection_state = new_state;
+ break;
+
+ case KTLS_CONN_ACT_OPEN_REQ:
+ /* only go forward if state is greater than current state. */
+ if (new_state <= tx_info->connection_state)
+ break;
+ /* update to the next state and also initialize TCB */
+ tx_info->connection_state = new_state;
+ /* FALLTHRU */
+ case KTLS_CONN_ACT_OPEN_RPL:
+ /* if we are stuck in this state, means tcb init might not
+ * received by HW, try sending it again.
+ */
+ if (!chcr_init_tcb_fields(tx_info))
+ tx_info->connection_state = KTLS_CONN_SET_TCB_REQ;
+ break;
+
+ case KTLS_CONN_SET_TCB_REQ:
+ /* only go forward if state is greater than current state. */
+ if (new_state <= tx_info->connection_state)
+ break;
+ /* update to the next state and check if l2t_state is valid */
+ tx_info->connection_state = new_state;
+ /* FALLTHRU */
+ case KTLS_CONN_SET_TCB_RPL:
+ /* Check if l2t state is valid, then move to ready state. */
+ if (cxgb4_check_l2t_valid(tx_info->l2te)) {
+ tx_info->connection_state = KTLS_CONN_TX_READY;
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_ctx);
+ }
+ break;
+
+ case KTLS_CONN_TX_READY:
+ /* nothing to be done here */
+ break;
+
+ default:
+ pr_err("unknown KTLS connection state\n");
+ break;
+ }
+ spin_unlock_irqrestore(&tx_info->lock, flags);
+
+ return tx_info->connection_state;
+}
+/*
+ * chcr_ktls_act_open_req: creates TCB entry for ipv4 connection.
+ * @sk - tcp socket.
+ * @tx_info - driver specific tls info.
+ * @atid - connection active tid.
+ * return - send success/failure.
+ */
+static int chcr_ktls_act_open_req(struct sock *sk,
+ struct chcr_ktls_info *tx_info,
+ int atid)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct cpl_t6_act_open_req *cpl6;
+ struct cpl_act_open_req *cpl;
+ struct sk_buff *skb;
+ unsigned int len;
+ int qid_atid;
+ u64 options;
+
+ len = sizeof(*cpl6);
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (unlikely(!skb))
+ return -ENOMEM;
+ /* mark it a control pkt */
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
+
+ cpl6 = __skb_put_zero(skb, len);
+ cpl = (struct cpl_act_open_req *)cpl6;
+ INIT_TP_WR(cpl6, 0);
+ qid_atid = TID_QID_V(tx_info->rx_qid) |
+ TID_TID_V(atid);
+ OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid));
+ cpl->local_port = inet->inet_sport;
+ cpl->peer_port = inet->inet_dport;
+ cpl->local_ip = inet->inet_rcv_saddr;
+ cpl->peer_ip = inet->inet_daddr;
+
+ /* fill first 64 bit option field. */
+ options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
+ SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
+ cpl->opt0 = cpu_to_be64(options);
+
+ /* next 64 bit option field. */
+ options =
+ TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
+ cpl->opt2 = htonl(options);
+
+ return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
+}
+
+/*
+ * chcr_ktls_act_open_req6: creates TCB entry for ipv6 connection.
+ * @sk - tcp socket.
+ * @tx_info - driver specific tls info.
+ * @atid - connection active tid.
+ * return - send success/failure.
+ */
+static int chcr_ktls_act_open_req6(struct sock *sk,
+ struct chcr_ktls_info *tx_info,
+ int atid)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct cpl_t6_act_open_req6 *cpl6;
+ struct cpl_act_open_req6 *cpl;
+ struct sk_buff *skb;
+ unsigned int len;
+ int qid_atid;
+ u64 options;
+
+ len = sizeof(*cpl6);
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (unlikely(!skb))
+ return -ENOMEM;
+ /* mark it a control pkt */
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
+
+ cpl6 = __skb_put_zero(skb, len);
+ cpl = (struct cpl_act_open_req6 *)cpl6;
+ INIT_TP_WR(cpl6, 0);
+ qid_atid = TID_QID_V(tx_info->rx_qid) | TID_TID_V(atid);
+ OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid));
+ cpl->local_port = inet->inet_sport;
+ cpl->peer_port = inet->inet_dport;
+ cpl->local_ip_hi = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[0];
+ cpl->local_ip_lo = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[8];
+ cpl->peer_ip_hi = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[0];
+ cpl->peer_ip_lo = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[8];
+
+ /* first 64 bit option field. */
+ options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
+ SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
+ cpl->opt0 = cpu_to_be64(options);
+ /* next 64 bit option field. */
+ options =
+ TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
+ cpl->opt2 = htonl(options);
+
+ return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
+}
+
+/*
+ * chcr_setup_connection: create a TCB entry so that TP will form tcp packets.
+ * @sk - tcp socket.
+ * @tx_info - driver specific tls info.
+ * return: NET_TX_OK/NET_XMIT_DROP
+ */
+static int chcr_setup_connection(struct sock *sk,
+ struct chcr_ktls_info *tx_info)
+{
+ struct tid_info *t = &tx_info->adap->tids;
+ int atid, ret = 0;
+
+ atid = cxgb4_alloc_atid(t, tx_info);
+ if (atid == -1)
+ return -EINVAL;
+
+ tx_info->atid = atid;
+ tx_info->ip_family = sk->sk_family;
+
+ if (sk->sk_family == AF_INET ||
+ (sk->sk_family == AF_INET6 && !sk->sk_ipv6only &&
+ ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)) {
+ tx_info->ip_family = AF_INET;
+ ret = chcr_ktls_act_open_req(sk, tx_info, atid);
+ } else {
+ tx_info->ip_family = AF_INET6;
+ ret =
+ cxgb4_clip_get(tx_info->netdev,
+ (const u32 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8,
+ 1);
+ if (ret)
+ goto out;
+ ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
+ }
+
+ /* if return type is NET_XMIT_CN, msg will be sent but delayed, mark ret
+ * success, if any other return type clear atid and return that failure.
+ */
+ if (ret) {
+ if (ret == NET_XMIT_CN)
+ ret = 0;
+ else
+ cxgb4_free_atid(t, atid);
+ goto out;
+ }
+
+ /* update the connection state */
+ chcr_ktls_update_connection_state(tx_info, KTLS_CONN_ACT_OPEN_REQ);
+out:
+ return ret;
+}
+
+/*
+ * chcr_set_tcb_field: update tcb fields.
+ * @tx_info - driver specific tls info.
+ * @word - TCB word.
+ * @mask - TCB word related mask.
+ * @val - TCB word related value.
+ * @no_reply - set 1 if not looking for TP response.
+ */
+static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
+ u64 mask, u64 val, int no_reply)
+{
+ struct cpl_set_tcb_field *req;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
+ INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, tx_info->tid);
+ req->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
+ NO_REPLY_V(no_reply));
+ req->word_cookie = htons(TCB_WORD_V(word));
+ req->mask = cpu_to_be64(mask);
+ req->val = cpu_to_be64(val);
+
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
+ return cxgb4_ofld_send(tx_info->netdev, skb);
+}
+
+/*
+ * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
+ * @tx_info - driver specific tls info.
+ * return: NET_TX_OK/NET_XMIT_DROP.
+ */
+static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
+{
+ return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
+ TCB_T_STATE_V(TCB_T_STATE_M),
+ CHCR_TCB_STATE_CLOSED, 1);
+}
+
+/*
+ * chcr_ktls_dev_del: call back for tls_dev_del.
+ * Remove the tid and l2t entry and close the connection.
+ * it per connection basis.
+ * @netdev - net device.
+ * @tls_cts - tls context.
+ * @direction - TX/RX crypto direction
+ */
+static void chcr_ktls_dev_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx =
+ chcr_get_ktls_tx_context(tls_ctx);
+ struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
+ struct sock *sk;
+
+ if (!tx_info)
+ return;
+ sk = tx_info->sk;
+
+ spin_lock(&tx_info->lock);
+ tx_info->connection_state = KTLS_CONN_CLOSED;
+ spin_unlock(&tx_info->lock);
+
+ /* clear l2t entry */
+ if (tx_info->l2te)
+ cxgb4_l2t_release(tx_info->l2te);
+
+ /* clear clip entry */
+ if (tx_info->ip_family == AF_INET6)
+ cxgb4_clip_release(netdev,
+ (const u32 *)&sk->sk_v6_daddr.in6_u.u6_addr8,
+ 1);
+
+ /* clear tid */
+ if (tx_info->tid != -1) {
+ /* clear tcb state and then release tid */
+ chcr_ktls_mark_tcb_close(tx_info);
+ cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+ tx_info->tid, tx_info->ip_family);
+ }
+
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_connection_close);
+ kvfree(tx_info);
+ tx_ctx->chcr_info = NULL;
+}
+
+/*
+ * chcr_ktls_dev_add: call back for tls_dev_add.
+ * Create a tcb entry for TP. Also add l2t entry for the connection. And
+ * generate keys & save those keys locally.
+ * @netdev - net device.
+ * @tls_cts - tls context.
+ * @direction - TX/RX crypto direction
+ * return: SUCCESS/FAILURE.
+ */
+static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_info *tx_info;
+ struct dst_entry *dst;
+ struct adapter *adap;
+ struct port_info *pi;
+ struct neighbour *n;
+ u8 daaddr[16];
+ int ret = -1;
+
+ tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
+
+ pi = netdev_priv(netdev);
+ adap = pi->adapter;
+ if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
+ pr_err("not expecting for RX direction\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (tx_ctx->chcr_info) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
+ if (!tx_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock_init(&tx_info->lock);
+
+ /* clear connection state */
+ spin_lock(&tx_info->lock);
+ tx_info->connection_state = KTLS_CONN_CLOSED;
+ spin_unlock(&tx_info->lock);
+
+ tx_info->sk = sk;
+ /* initialize tid and atid to -1, 0 is a also a valid id. */
+ tx_info->tid = -1;
+ tx_info->atid = -1;
+
+ tx_info->adap = adap;
+ tx_info->netdev = netdev;
+ tx_info->first_qset = pi->first_qset;
+ tx_info->tx_chan = pi->tx_chan;
+ tx_info->smt_idx = pi->smt_idx;
+ tx_info->port_id = pi->port_id;
+
+ tx_info->rx_qid = chcr_get_first_rx_qid(adap);
+ if (unlikely(tx_info->rx_qid < 0))
+ goto out2;
+
+ tx_info->prev_seq = start_offload_tcp_sn;
+ tx_info->tcp_start_seq_number = start_offload_tcp_sn;
+
+ /* save crypto keys */
+ ret = chcr_ktls_save_keys(tx_info, crypto_info, direction);
+ if (ret < 0)
+ goto out2;
+
+ /* get peer ip */
+ if (sk->sk_family == AF_INET ||
+ (sk->sk_family == AF_INET6 && !sk->sk_ipv6only &&
+ ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)) {
+ memcpy(daaddr, &sk->sk_daddr, 4);
+ } else {
+ memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
+ }
+
+ /* get the l2t index */
+ dst = sk_dst_get(sk);
+ if (!dst) {
+ pr_err("DST entry not found\n");
+ goto out2;
+ }
+ n = dst_neigh_lookup(dst, daaddr);
+ if (!n || !n->dev) {
+ pr_err("neighbour not found\n");
+ dst_release(dst);
+ goto out2;
+ }
+ tx_info->l2te = cxgb4_l2t_get(adap->l2t, n, n->dev, 0);
+
+ neigh_release(n);
+ dst_release(dst);
+
+ if (!tx_info->l2te) {
+ pr_err("l2t entry not found\n");
+ goto out2;
+ }
+
+ tx_ctx->chcr_info = tx_info;
+
+ /* create a filter and call cxgb4_l2t_send to send the packet out, which
+ * will take care of updating l2t entry in hw if not already done.
+ */
+ ret = chcr_setup_connection(sk, tx_info);
+ if (ret)
+ goto out2;
+
+ atomic64_inc(&adap->chcr_stats.ktls_tx_connection_open);
+ return 0;
+out2:
+ kvfree(tx_info);
+out:
+ atomic64_inc(&adap->chcr_stats.ktls_tx_connection_fail);
+ return ret;
+}
+
+static const struct tlsdev_ops chcr_ktls_ops = {
+ .tls_dev_add = chcr_ktls_dev_add,
+ .tls_dev_del = chcr_ktls_dev_del,
+};
+
+/*
+ * chcr_enable_ktls: add NETIF_F_HW_TLS_TX flag in all the ports.
+ */
+void chcr_enable_ktls(struct adapter *adap)
+{
+ struct net_device *netdev;
+ int i;
+
+ for_each_port(adap, i) {
+ netdev = adap->port[i];
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->tlsdev_ops = &chcr_ktls_ops;
+ }
+}
+
+/*
+ * chcr_disable_ktls: remove NETIF_F_HW_TLS_TX flag from all the ports.
+ */
+void chcr_disable_ktls(struct adapter *adap)
+{
+ struct net_device *netdev;
+ int i;
+
+ for_each_port(adap, i) {
+ netdev = adap->port[i];
+ netdev->features &= ~NETIF_F_HW_TLS_TX;
+ netdev->hw_features &= ~NETIF_F_HW_TLS_TX;
+ netdev->tlsdev_ops = NULL;
+ }
+}
+
+/*
+ * chcr_init_tcb_fields: Initialize tcb fields to handle TCP seq number
+ * handling.
+ * @tx_info - driver specific tls info.
+ * return: NET_TX_OK/NET_XMIT_DROP
+ */
+static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info)
+{
+ int ret = 0;
+
+ /* set tcb in offload and bypass */
+ ret =
+ chcr_set_tcb_field(tx_info, TCB_T_FLAGS_W,
+ TCB_T_FLAGS_V(TF_CORE_BYPASS_F | TF_NON_OFFLOAD_F),
+ TCB_T_FLAGS_V(TF_CORE_BYPASS_F), 1);
+ if (ret)
+ return ret;
+ /* reset snd_una and snd_next fields in tcb */
+ ret = chcr_set_tcb_field(tx_info, TCB_SND_UNA_RAW_W,
+ TCB_SND_NXT_RAW_V(TCB_SND_NXT_RAW_M) |
+ TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
+ 0, 1);
+ if (ret)
+ return ret;
+
+ /* reset send max */
+ ret = chcr_set_tcb_field(tx_info, TCB_SND_MAX_RAW_W,
+ TCB_SND_MAX_RAW_V(TCB_SND_MAX_RAW_M),
+ 0, 1);
+ if (ret)
+ return ret;
+
+ /* update l2t index and request for tp reply to confirm tcb is
+ * initialised to handle tx traffic.
+ */
+ ret = chcr_set_tcb_field(tx_info, TCB_L2T_IX_W,
+ TCB_L2T_IX_V(TCB_L2T_IX_M),
+ TCB_L2T_IX_V(tx_info->l2te->idx), 0);
+ return ret;
+}
+
+/*
+ * chcr_ktls_cpl_act_open_rpl: connection reply received from TP.
+ */
+int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input)
+{
+ const struct cpl_act_open_rpl *p = (void *)input;
+ struct chcr_ktls_info *tx_info = NULL;
+ unsigned int atid, tid, status;
+ struct tid_info *t;
+
+ tid = GET_TID(p);
+ status = AOPEN_STATUS_G(ntohl(p->atid_status));
+ atid = TID_TID_G(AOPEN_ATID_G(ntohl(p->atid_status)));
+
+ t = &adap->tids;
+ tx_info = lookup_atid(t, atid);
+
+ if (!tx_info || tx_info->atid != atid) {
+ pr_err("tx_info or atid is not correct\n");
+ return -1;
+ }
+
+ if (!status) {
+ tx_info->tid = tid;
+ cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
+
+ cxgb4_free_atid(t, atid);
+ tx_info->atid = -1;
+ /* update the connection state */
+ chcr_ktls_update_connection_state(tx_info,
+ KTLS_CONN_ACT_OPEN_RPL);
+ }
+ return 0;
+}
+
+/*
+ * chcr_ktls_cpl_set_tcb_rpl: TCB reply received from TP.
+ */
+int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
+{
+ const struct cpl_set_tcb_rpl *p = (void *)input;
+ struct chcr_ktls_info *tx_info = NULL;
+ struct tid_info *t;
+ u32 tid;
+
+ tid = GET_TID(p);
+
+ t = &adap->tids;
+ tx_info = lookup_tid(t, tid);
+ if (!tx_info || tx_info->tid != tid) {
+ pr_err("tx_info or atid is not correct\n");
+ return -1;
+ }
+ /* update the connection state */
+ chcr_ktls_update_connection_state(tx_info, KTLS_CONN_SET_TCB_RPL);
+ return 0;
+}
+
+/*
+ * chcr_write_cpl_set_tcb_ulp: update tcb values.
+ * TCB is responsible to create tcp headers, so all the related values
+ * should be correctly updated.
+ * @tx_info - driver specific tls info.
+ * @q - tx queue on which packet is going out.
+ * @tid - TCB identifier.
+ * @pos - current index where should we start writing.
+ * @word - TCB word.
+ * @mask - TCB word related mask.
+ * @val - TCB word related value.
+ * @reply - set 1 if looking for TP response.
+ * return - next position to write.
+ */
+static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q, u32 tid,
+ void *pos, u16 word, u64 mask,
+ u64 val, u32 reply)
+{
+ struct cpl_set_tcb_field_core *cpl;
+ struct ulptx_idata *idata;
+ struct ulp_txpkt *txpkt;
+ void *save_pos = NULL;
+ u8 buf[48] = {0};
+ int left;
+
+ left = (void *)q->q.stat - pos;
+ if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) {
+ if (!left) {
+ pos = q->q.desc;
+ } else {
+ save_pos = pos;
+ pos = buf;
+ }
+ }
+ /* ULP_TXPKT */
+ txpkt = pos;
+ txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
+ txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
+
+ /* ULPTX_IDATA sub-command */
+ idata = (struct ulptx_idata *)(txpkt + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
+ idata->len = htonl(sizeof(*cpl));
+ pos = idata + 1;
+
+ cpl = pos;
+ /* CPL_SET_TCB_FIELD */
+ OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+ cpl->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
+ NO_REPLY_V(!reply));
+ cpl->word_cookie = htons(TCB_WORD_V(word));
+ cpl->mask = cpu_to_be64(mask);
+ cpl->val = cpu_to_be64(val);
+
+ /* ULPTX_NOOP */
+ idata = (struct ulptx_idata *)(cpl + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
+ idata->len = htonl(0);
+
+ if (save_pos) {
+ pos = chcr_copy_to_txd(buf, &q->q, save_pos,
+ CHCR_SET_TCB_FIELD_LEN);
+ } else {
+ /* check again if we are at the end of the queue */
+ if (left == CHCR_SET_TCB_FIELD_LEN)
+ pos = q->q.desc;
+ else
+ pos = idata + 1;
+ }
+
+ return pos;
+}
+
+/*
+ * chcr_ktls_xmit_tcb_cpls: update tcb entry so that TP will create the header
+ * with updated values like tcp seq, ack, window etc.
+ * @tx_info - driver specific tls info.
+ * @q - TX queue.
+ * @tcp_seq
+ * @tcp_ack
+ * @tcp_win
+ * return: NETDEV_TX_BUSY/NET_TX_OK.
+ */
+static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q, u64 tcp_seq,
+ u64 tcp_ack, u64 tcp_win)
+{
+ bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
+ u32 len, cpl = 0, ndesc, wr_len;
+ struct fw_ulptx_wr *wr;
+ int credits;
+ void *pos;
+
+ wr_len = sizeof(*wr);
+ /* there can be max 4 cpls, check if we have enough credits */
+ len = wr_len + 4 * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
+ ndesc = DIV_ROUND_UP(len, 64);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+ /* make space for WR, we'll fill it later when we know all the cpls
+ * being sent out and have complete length.
+ */
+ wr = pos;
+ pos += wr_len;
+ /* update tx_max if its a re-transmit or the first wr */
+ if (first_wr || tcp_seq != tx_info->prev_seq) {
+ pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+ TCB_TX_MAX_W,
+ TCB_TX_MAX_V(TCB_TX_MAX_M),
+ TCB_TX_MAX_V(tcp_seq), 0);
+ cpl++;
+ }
+ /* reset snd una if it's a re-transmit pkt */
+ if (tcp_seq != tx_info->prev_seq) {
+ /* reset snd_una */
+ pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+ TCB_SND_UNA_RAW_W,
+ TCB_SND_UNA_RAW_V
+ (TCB_SND_UNA_RAW_M),
+ TCB_SND_UNA_RAW_V(0), 0);
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_ooo);
+ cpl++;
+ }
+ /* update ack */
+ if (first_wr || tx_info->prev_ack != tcp_ack) {
+ pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+ TCB_RCV_NXT_W,
+ TCB_RCV_NXT_V(TCB_RCV_NXT_M),
+ TCB_RCV_NXT_V(tcp_ack), 0);
+ tx_info->prev_ack = tcp_ack;
+ cpl++;
+ }
+ /* update receive window */
+ if (first_wr || tx_info->prev_win != tcp_win) {
+ pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+ TCB_RCV_WND_W,
+ TCB_RCV_WND_V(TCB_RCV_WND_M),
+ TCB_RCV_WND_V(tcp_win), 0);
+ tx_info->prev_win = tcp_win;
+ cpl++;
+ }
+
+ if (cpl) {
+ /* get the actual length */
+ len = wr_len + cpl * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
+ /* ULPTX wr */
+ wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr->cookie = 0;
+ /* fill len in wr field */
+ wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
+
+ ndesc = DIV_ROUND_UP(len, 64);
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+ }
+ return 0;
+}
+
+/*
+ * chcr_ktls_skb_copy
+ * @nskb - new skb where the frags to be added.
+ * @skb - old skb from which frags will be copied.
+ */
+static void chcr_ktls_skb_copy(struct sk_buff *skb, struct sk_buff *nskb)
+{
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_shinfo(nskb)->frags[i] = skb_shinfo(skb)->frags[i];
+ __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
+ }
+
+ skb_shinfo(nskb)->nr_frags = skb_shinfo(skb)->nr_frags;
+ nskb->len += skb->data_len;
+ nskb->data_len = skb->data_len;
+ nskb->truesize += skb->data_len;
+}
+
+/*
+ * chcr_ktls_get_tx_flits
+ * returns number of flits to be sent out, it includes key context length, WR
+ * size and skb fragments.
+ */
+static unsigned int
+chcr_ktls_get_tx_flits(const struct sk_buff *skb, unsigned int key_ctx_len)
+{
+ return chcr_sgl_len(skb_shinfo(skb)->nr_frags) +
+ DIV_ROUND_UP(key_ctx_len + CHCR_KTLS_WR_SIZE, 8);
+}
+
+/*
+ * chcr_ktls_check_tcp_options: To check if there is any TCP option availbale
+ * other than timestamp.
+ * @skb - skb contains partial record..
+ * return: 1 / 0
+ */
+static int
+chcr_ktls_check_tcp_options(struct tcphdr *tcp)
+{
+ int cnt, opt, optlen;
+ u_char *cp;
+
+ cp = (u_char *)(tcp + 1);
+ cnt = (tcp->doff << 2) - sizeof(struct tcphdr);
+ for (; cnt > 0; cnt -= optlen, cp += optlen) {
+ opt = cp[0];
+ if (opt == TCPOPT_EOL)
+ break;
+ if (opt == TCPOPT_NOP) {
+ optlen = 1;
+ } else {
+ if (cnt < 2)
+ break;
+ optlen = cp[1];
+ if (optlen < 2 || optlen > cnt)
+ break;
+ }
+ switch (opt) {
+ case TCPOPT_NOP:
+ break;
+ default:
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * chcr_ktls_write_tcp_options : TP can't send out all the options, we need to
+ * send out separately.
+ * @tx_info - driver specific tls info.
+ * @skb - skb contains partial record..
+ * @q - TX queue.
+ * @tx_chan - channel number.
+ * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
+ */
+static int
+chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
+ struct sge_eth_txq *q, uint32_t tx_chan)
+{
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ u32 ctrl, iplen, maclen;
+ struct ipv6hdr *ip6;
+ unsigned int ndesc;
+ struct tcphdr *tcp;
+ int len16, pktlen;
+ struct iphdr *ip;
+ int credits;
+ u8 buf[150];
+ void *pos;
+
+ iplen = skb_network_header_len(skb);
+ maclen = skb_mac_header_len(skb);
+
+ /* packet length = eth hdr len + ip hdr len + tcp hdr len
+ * (including options).
+ */
+ pktlen = skb->len - skb->data_len;
+
+ ctrl = sizeof(*cpl) + pktlen;
+ len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
+ /* check how many descriptors needed */
+ ndesc = DIV_ROUND_UP(len16, 4);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+ wr = pos;
+
+ /* Firmware work request header */
+ wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN_V(ctrl));
+
+ wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(len16));
+ wr->r3 = 0;
+
+ cpl = (void *)(wr + 1);
+
+ /* CPL header */
+ cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) | TXPKT_INTF_V(tx_chan) |
+ TXPKT_PF_V(tx_info->adap->pf));
+ cpl->pack = 0;
+ cpl->len = htons(pktlen);
+ /* checksum offload */
+ cpl->ctrl1 = 0;
+
+ pos = cpl + 1;
+
+ memcpy(buf, skb->data, pktlen);
+ if (tx_info->ip_family == AF_INET) {
+ /* we need to correct ip header len */
+ ip = (struct iphdr *)(buf + maclen);
+ ip->tot_len = htons(pktlen - maclen);
+ } else {
+ ip6 = (struct ipv6hdr *)(buf + maclen);
+ ip6->payload_len = htons(pktlen - maclen - iplen);
+ }
+ /* now take care of the tcp header, if fin is not set then clear push
+ * bit as well, and if fin is set, it will be sent at the last so we
+ * need to update the tcp sequence number as per the last packet.
+ */
+ tcp = (struct tcphdr *)(buf + maclen + iplen);
+
+ if (!tcp->fin)
+ tcp->psh = 0;
+ else
+ tcp->seq = htonl(tx_info->prev_seq);
+
+ chcr_copy_to_txd(buf, &q->q, pos, pktlen);
+
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+ return 0;
+}
+
+/* chcr_ktls_skb_shift - Shifts request length paged data from skb to another.
+ * @tgt- buffer into which tail data gets added
+ * @skb- buffer from which the paged data comes from
+ * @shiftlen- shift up to this many bytes
+ */
+static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
+ int shiftlen)
+{
+ skb_frag_t *fragfrom, *fragto;
+ int from, to, todo;
+
+ WARN_ON(shiftlen > skb->data_len);
+
+ todo = shiftlen;
+ from = 0;
+ to = 0;
+ fragfrom = &skb_shinfo(skb)->frags[from];
+
+ while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
+ fragfrom = &skb_shinfo(skb)->frags[from];
+ fragto = &skb_shinfo(tgt)->frags[to];
+
+ if (todo >= skb_frag_size(fragfrom)) {
+ *fragto = *fragfrom;
+ todo -= skb_frag_size(fragfrom);
+ from++;
+ to++;
+
+ } else {
+ __skb_frag_ref(fragfrom);
+ skb_frag_page_copy(fragto, fragfrom);
+ skb_frag_off_copy(fragto, fragfrom);
+ skb_frag_size_set(fragto, todo);
+
+ skb_frag_off_add(fragfrom, todo);
+ skb_frag_size_sub(fragfrom, todo);
+ todo = 0;
+
+ to++;
+ break;
+ }
+ }
+
+ /* Ready to "commit" this state change to tgt */
+ skb_shinfo(tgt)->nr_frags = to;
+
+ /* Reposition in the original skb */
+ to = 0;
+ while (from < skb_shinfo(skb)->nr_frags)
+ skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
+
+ skb_shinfo(skb)->nr_frags = to;
+
+ WARN_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
+
+ skb->len -= shiftlen;
+ skb->data_len -= shiftlen;
+ skb->truesize -= shiftlen;
+ tgt->len += shiftlen;
+ tgt->data_len += shiftlen;
+ tgt->truesize += shiftlen;
+
+ return shiftlen;
+}
+
+/*
+ * chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb
+ * received has partial end part of the record, send out the complete record, so
+ * that crypto block will be able to generate TAG/HASH.
+ * @skb - segment which has complete or partial end part.
+ * @tx_info - driver specific tls info.
+ * @q - TX queue.
+ * @tcp_seq
+ * @tcp_push - tcp push bit.
+ * @mss - segment size.
+ * return: NETDEV_TX_BUSY/NET_TX_OK.
+ */
+static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
+ struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q, u32 tcp_seq,
+ bool tcp_push, u32 mss)
+{
+ u32 len16, wr_mid = 0, flits = 0, ndesc, cipher_start;
+ struct adapter *adap = tx_info->adap;
+ int credits, left, last_desc;
+ struct tx_sw_desc *sgl_sdesc;
+ struct cpl_tx_data *tx_data;
+ struct cpl_tx_sec_pdu *cpl;
+ struct ulptx_idata *idata;
+ struct ulp_txpkt *ulptx;
+ struct fw_ulptx_wr *wr;
+ void *pos;
+ u64 *end;
+
+ /* get the number of flits required */
+ flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len);
+ /* number of descriptors */
+ ndesc = chcr_flits_to_desc(flits);
+ /* check if enough credits available */
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ /* Credits are below the threshold vaues, stop the queue after
+ * injecting the Work Request for this packet.
+ */
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
+ if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
+ q->mapping_err++;
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+ end = (u64 *)pos + flits;
+ /* FW_ULPTX_WR */
+ wr = pos;
+ /* WR will need len16 */
+ len16 = DIV_ROUND_UP(flits, 2);
+ wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
+ wr->cookie = 0;
+ pos += sizeof(*wr);
+ /* ULP_TXPKT */
+ ulptx = pos;
+ ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
+ ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
+ ULP_TXPKT_FID_V(q->q.cntxt_id) |
+ ULP_TXPKT_RO_F);
+ ulptx->len = htonl(len16 - 1);
+ /* ULPTX_IDATA sub-command */
+ idata = (struct ulptx_idata *)(ulptx + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
+ /* idata length will include cpl_tx_sec_pdu + key context size +
+ * cpl_tx_data header.
+ */
+ idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len +
+ sizeof(*tx_data));
+ /* SEC CPL */
+ cpl = (struct cpl_tx_sec_pdu *)(idata + 1);
+ cpl->op_ivinsrtofst =
+ htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
+ CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
+ CPL_TX_SEC_PDU_IVINSRTOFST_V(TLS_HEADER_SIZE + 1));
+ cpl->pldlen = htonl(skb->data_len);
+
+ /* encryption should start after tls header size + iv size */
+ cipher_start = TLS_HEADER_SIZE + tx_info->iv_size + 1;
+
+ cpl->aadstart_cipherstop_hi =
+ htonl(CPL_TX_SEC_PDU_AADSTART_V(1) |
+ CPL_TX_SEC_PDU_AADSTOP_V(TLS_HEADER_SIZE) |
+ CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
+
+ /* authentication will also start after tls header + iv size */
+ cpl->cipherstop_lo_authinsert =
+ htonl(CPL_TX_SEC_PDU_AUTHSTART_V(cipher_start) |
+ CPL_TX_SEC_PDU_AUTHSTOP_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE) |
+ CPL_TX_SEC_PDU_AUTHINSERT_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE));
+
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ cpl->seqno_numivs = htonl(tx_info->scmd0_seqno_numivs);
+ cpl->ivgen_hdrlen = htonl(tx_info->scmd0_ivgen_hdrlen);
+ cpl->scmd1 = cpu_to_be64(tx_info->record_no);
+
+ pos = cpl + 1;
+ /* check if space left to fill the keys */
+ left = (void *)q->q.stat - pos;
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+
+ pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
+ tx_info->key_ctx_len);
+ left = (void *)q->q.stat - pos;
+
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+ /* CPL_TX_DATA */
+ tx_data = (void *)pos;
+ OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
+ tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(skb->data_len));
+
+ tx_data->rsvd = htonl(tcp_seq);
+
+ tx_data->flags = htonl(TX_BYPASS_F);
+ if (tcp_push)
+ tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
+
+ /* check left again, it might go beyond queue limit */
+ pos = tx_data + 1;
+ left = (void *)q->q.stat - pos;
+
+ /* check the position again */
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+
+ /* send the complete packet except the header */
+ cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
+ sgl_sdesc->addr);
+ sgl_sdesc->skb = skb;
+
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
+ atomic64_inc(&adap->chcr_stats.ktls_tx_send_records);
+
+ return 0;
+}
+
+/*
+ * chcr_ktls_xmit_wr_short: This is to send out partial records. If its
+ * a middle part of a record, fetch the prior data to make it 16 byte aligned
+ * and then only send it out.
+ *
+ * @skb - skb contains partial record..
+ * @tx_info - driver specific tls info.
+ * @q - TX queue.
+ * @tcp_seq
+ * @tcp_push - tcp push bit.
+ * @mss - segment size.
+ * @tls_rec_offset - offset from start of the tls record.
+ * @perior_data - data before the current segment, required to make this record
+ * 16 byte aligned.
+ * @prior_data_len - prior_data length (less than 16)
+ * return: NETDEV_TX_BUSY/NET_TX_OK.
+ */
+static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
+ struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q,
+ u32 tcp_seq, bool tcp_push, u32 mss,
+ u32 tls_rec_offset, u8 *prior_data,
+ u32 prior_data_len)
+{
+ struct adapter *adap = tx_info->adap;
+ u32 len16, wr_mid = 0, cipher_start;
+ unsigned int flits = 0, ndesc;
+ int credits, left, last_desc;
+ struct tx_sw_desc *sgl_sdesc;
+ struct cpl_tx_data *tx_data;
+ struct cpl_tx_sec_pdu *cpl;
+ struct ulptx_idata *idata;
+ struct ulp_txpkt *ulptx;
+ struct fw_ulptx_wr *wr;
+ __be64 iv_record;
+ void *pos;
+ u64 *end;
+
+ /* get the number of flits required, it's a partial record so 2 flits
+ * (AES_BLOCK_SIZE) will be added.
+ */
+ flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len) + 2;
+ /* get the correct 8 byte IV of this record */
+ iv_record = cpu_to_be64(tx_info->iv + tx_info->record_no);
+ /* If it's a middle record and not 16 byte aligned to run AES CTR, need
+ * to make it 16 byte aligned. So atleadt 2 extra flits of immediate
+ * data will be added.
+ */
+ if (prior_data_len)
+ flits += 2;
+ /* number of descriptors */
+ ndesc = chcr_flits_to_desc(flits);
+ /* check if enough credits available */
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
+ if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
+ q->mapping_err++;
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+ end = (u64 *)pos + flits;
+ /* FW_ULPTX_WR */
+ wr = pos;
+ /* WR will need len16 */
+ len16 = DIV_ROUND_UP(flits, 2);
+ wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
+ wr->cookie = 0;
+ pos += sizeof(*wr);
+ /* ULP_TXPKT */
+ ulptx = pos;
+ ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
+ ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
+ ULP_TXPKT_FID_V(q->q.cntxt_id) |
+ ULP_TXPKT_RO_F);
+ ulptx->len = htonl(len16 - 1);
+ /* ULPTX_IDATA sub-command */
+ idata = (struct ulptx_idata *)(ulptx + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
+ /* idata length will include cpl_tx_sec_pdu + key context size +
+ * cpl_tx_data header.
+ */
+ idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len +
+ sizeof(*tx_data) + AES_BLOCK_LEN + prior_data_len);
+ /* SEC CPL */
+ cpl = (struct cpl_tx_sec_pdu *)(idata + 1);
+ /* cipher start will have tls header + iv size extra if its a header
+ * part of tls record. else only 16 byte IV will be added.
+ */
+ cipher_start =
+ AES_BLOCK_LEN + 1 +
+ (!tls_rec_offset ? TLS_HEADER_SIZE + tx_info->iv_size : 0);
+
+ cpl->op_ivinsrtofst =
+ htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
+ CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
+ CPL_TX_SEC_PDU_IVINSRTOFST_V(1));
+ cpl->pldlen = htonl(skb->data_len + AES_BLOCK_LEN + prior_data_len);
+ cpl->aadstart_cipherstop_hi =
+ htonl(CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
+ cpl->cipherstop_lo_authinsert = 0;
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ cpl->seqno_numivs = htonl(tx_info->scmd0_short_seqno_numivs);
+ cpl->ivgen_hdrlen = htonl(tx_info->scmd0_short_ivgen_hdrlen);
+ cpl->scmd1 = 0;
+
+ pos = cpl + 1;
+ /* check if space left to fill the keys */
+ left = (void *)q->q.stat - pos;
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+
+ pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
+ tx_info->key_ctx_len);
+ left = (void *)q->q.stat - pos;
+
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+ /* CPL_TX_DATA */
+ tx_data = (void *)pos;
+ OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
+ tx_data->len = htonl(TX_DATA_MSS_V(mss) |
+ TX_LENGTH_V(skb->data_len + prior_data_len));
+ tx_data->rsvd = htonl(tcp_seq);
+ tx_data->flags = htonl(TX_BYPASS_F);
+ if (tcp_push)
+ tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
+
+ /* check left again, it might go beyond queue limit */
+ pos = tx_data + 1;
+ left = (void *)q->q.stat - pos;
+
+ /* check the position again */
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+ /* copy the 16 byte IV for AES-CTR, which includes 4 bytes of salt, 8
+ * bytes of actual IV and 4 bytes of 16 byte-sequence.
+ */
+ memcpy(pos, tx_info->key_ctx.salt, tx_info->salt_size);
+ memcpy(pos + tx_info->salt_size, &iv_record, tx_info->iv_size);
+ *(__be32 *)(pos + tx_info->salt_size + tx_info->iv_size) =
+ htonl(2 + (tls_rec_offset ? ((tls_rec_offset -
+ (TLS_HEADER_SIZE + tx_info->iv_size)) / AES_BLOCK_LEN) : 0));
+
+ pos += 16;
+ /* Prior_data_len will always be less than 16 bytes, fill the
+ * prio_data_len after AES_CTRL_BLOCK and clear the remaining length
+ * to 0.
+ */
+ if (prior_data_len)
+ pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
+ /* send the complete packet except the header */
+ cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
+ sgl_sdesc->addr);
+ sgl_sdesc->skb = skb;
+
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
+
+ return 0;
+}
+
+/*
+ * chcr_ktls_tx_plaintxt: This handler will take care of the records which has
+ * only plain text (only tls header and iv)
+ * @tx_info - driver specific tls info.
+ * @skb - skb contains partial record..
+ * @tcp_seq
+ * @mss - segment size.
+ * @tcp_push - tcp push bit.
+ * @q - TX queue.
+ * @port_id : port number
+ * @perior_data - data before the current segment, required to make this record
+ * 16 byte aligned.
+ * @prior_data_len - prior_data length (less than 16)
+ * return: NETDEV_TX_BUSY/NET_TX_OK.
+ */
+static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
+ struct sk_buff *skb, u32 tcp_seq, u32 mss,
+ bool tcp_push, struct sge_eth_txq *q,
+ u32 port_id, u8 *prior_data,
+ u32 prior_data_len)
+{
+ int credits, left, len16, last_desc;
+ unsigned int flits = 0, ndesc;
+ struct tx_sw_desc *sgl_sdesc;
+ struct cpl_tx_data *tx_data;
+ struct ulptx_idata *idata;
+ struct ulp_txpkt *ulptx;
+ struct fw_ulptx_wr *wr;
+ u32 wr_mid = 0;
+ void *pos;
+ u64 *end;
+
+ flits = DIV_ROUND_UP(CHCR_PLAIN_TX_DATA_LEN, 8);
+ flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags);
+ if (prior_data_len)
+ flits += 2;
+ /* WR will need len16 */
+ len16 = DIV_ROUND_UP(flits, 2);
+ /* check how many descriptors needed */
+ ndesc = DIV_ROUND_UP(flits, 8);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
+ if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb,
+ sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
+ q->mapping_err++;
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+ end = (u64 *)pos + flits;
+ /* FW_ULPTX_WR */
+ wr = pos;
+ wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
+ wr->cookie = 0;
+ pos += sizeof(*wr);
+ /* ULP_TXPKT */
+ ulptx = (struct ulp_txpkt *)(wr + 1);
+ ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
+ ULP_TXPKT_DATAMODIFY_V(0) |
+ ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
+ ULP_TXPKT_DEST_V(0) |
+ ULP_TXPKT_FID_V(q->q.cntxt_id) | ULP_TXPKT_RO_V(1));
+ ulptx->len = htonl(len16 - 1);
+ /* ULPTX_IDATA sub-command */
+ idata = (struct ulptx_idata *)(ulptx + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
+ idata->len = htonl(sizeof(*tx_data) + prior_data_len);
+ /* CPL_TX_DATA */
+ tx_data = (struct cpl_tx_data *)(idata + 1);
+ OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
+ tx_data->len = htonl(TX_DATA_MSS_V(mss) |
+ TX_LENGTH_V(skb->data_len + prior_data_len));
+ /* set tcp seq number */
+ tx_data->rsvd = htonl(tcp_seq);
+ tx_data->flags = htonl(TX_BYPASS_F);
+ if (tcp_push)
+ tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
+
+ pos = tx_data + 1;
+ /* apart from prior_data_len, we should set remaining part of 16 bytes
+ * to be zero.
+ */
+ if (prior_data_len)
+ pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
+
+ /* check left again, it might go beyond queue limit */
+ left = (void *)q->q.stat - pos;
+
+ /* check the position again */
+ if (!left) {
+ left = (void *)end - (void *)q->q.stat;
+ pos = q->q.desc;
+ end = pos + left;
+ }
+ /* send the complete packet including the header */
+ cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
+ sgl_sdesc->addr);
+ sgl_sdesc->skb = skb;
+
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+ return 0;
+}
+
+/*
+ * chcr_ktls_copy_record_in_skb
+ * @nskb - new skb where the frags to be added.
+ * @record - specific record which has complete 16k record in frags.
+ */
+static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
+ struct tls_record_info *record)
+{
+ int i = 0;
+
+ for (i = 0; i < record->num_frags; i++) {
+ skb_shinfo(nskb)->frags[i] = record->frags[i];
+ /* increase the frag ref count */
+ __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
+ }
+
+ skb_shinfo(nskb)->nr_frags = record->num_frags;
+ nskb->data_len = record->len;
+ nskb->len += record->len;
+ nskb->truesize += record->len;
+}
+
+/*
+ * chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid
+ * sending the same segment again. It will discard the segment which is before
+ * the current tx max.
+ * @tx_info - driver specific tls info.
+ * @q - TX queue.
+ * return: NET_TX_OK/NET_XMIT_DROP.
+ */
+static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q)
+{
+ struct fw_ulptx_wr *wr;
+ unsigned int ndesc;
+ int credits;
+ void *pos;
+ u32 len;
+
+ len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
+ ndesc = DIV_ROUND_UP(len, 64);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return NETDEV_TX_BUSY;
+ }
+
+ pos = &q->q.desc[q->q.pidx];
+
+ wr = pos;
+ /* ULPTX wr */
+ wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr->cookie = 0;
+ /* fill len in wr field */
+ wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
+
+ pos += sizeof(*wr);
+
+ pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+ TCB_SND_UNA_RAW_W,
+ TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
+ TCB_SND_UNA_RAW_V(0), 0);
+
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+
+ return 0;
+}
+
+/*
+ * chcr_end_part_handler: This handler will handle the record which
+ * is complete or if record's end part is received. T6 adapter has a issue that
+ * it can't send out TAG with partial record so if its an end part then we have
+ * to send TAG as well and for which we need to fetch the complete record and
+ * send it to crypto module.
+ * @tx_info - driver specific tls info.
+ * @skb - skb contains partial record.
+ * @record - complete record of 16K size.
+ * @tcp_seq
+ * @mss - segment size in which TP needs to chop a packet.
+ * @tcp_push_no_fin - tcp push if fin is not set.
+ * @q - TX queue.
+ * @tls_end_offset - offset from end of the record.
+ * @last wr : check if this is the last part of the skb going out.
+ * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
+ */
+static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
+ struct sk_buff *skb,
+ struct tls_record_info *record,
+ u32 tcp_seq, int mss, bool tcp_push_no_fin,
+ struct sge_eth_txq *q,
+ u32 tls_end_offset, bool last_wr)
+{
+ struct sk_buff *nskb = NULL;
+ /* check if it is a complete record */
+ if (tls_end_offset == record->len) {
+ nskb = skb;
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_complete_pkts);
+ } else {
+ dev_kfree_skb_any(skb);
+
+ nskb = alloc_skb(0, GFP_KERNEL);
+ if (!nskb)
+ return NETDEV_TX_BUSY;
+ /* copy complete record in skb */
+ chcr_ktls_copy_record_in_skb(nskb, record);
+ /* packet is being sent from the beginning, update the tcp_seq
+ * accordingly.
+ */
+ tcp_seq = tls_record_start_seq(record);
+ /* reset snd una, so the middle record won't send the already
+ * sent part.
+ */
+ if (chcr_ktls_update_snd_una(tx_info, q))
+ goto out;
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_end_pkts);
+ }
+
+ if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
+ (last_wr && tcp_push_no_fin),
+ mss)) {
+ goto out;
+ }
+ return 0;
+out:
+ dev_kfree_skb_any(nskb);
+ return NETDEV_TX_BUSY;
+}
+
+/*
+ * chcr_short_record_handler: This handler will take care of the records which
+ * doesn't have end part (1st part or the middle part(/s) of a record). In such
+ * cases, AES CTR will be used in place of AES GCM to send out partial packet.
+ * This partial record might be the first part of the record, or the middle
+ * part. In case of middle record we should fetch the prior data to make it 16
+ * byte aligned. If it has a partial tls header or iv then get to the start of
+ * tls header. And if it has partial TAG, then remove the complete TAG and send
+ * only the payload.
+ * There is one more possibility that it gets a partial header, send that
+ * portion as a plaintext.
+ * @tx_info - driver specific tls info.
+ * @skb - skb contains partial record..
+ * @record - complete record of 16K size.
+ * @tcp_seq
+ * @mss - segment size in which TP needs to chop a packet.
+ * @tcp_push_no_fin - tcp push if fin is not set.
+ * @q - TX queue.
+ * @tls_end_offset - offset from end of the record.
+ * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
+ */
+static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
+ struct sk_buff *skb,
+ struct tls_record_info *record,
+ u32 tcp_seq, int mss, bool tcp_push_no_fin,
+ struct sge_eth_txq *q, u32 tls_end_offset)
+{
+ u32 tls_rec_offset = tcp_seq - tls_record_start_seq(record);
+ u8 prior_data[16] = {0};
+ u32 prior_data_len = 0;
+ u32 data_len;
+
+ /* check if the skb is ending in middle of tag/HASH, its a big
+ * trouble, send the packet before the HASH.
+ */
+ int remaining_record = tls_end_offset - skb->data_len;
+
+ if (remaining_record > 0 &&
+ remaining_record < TLS_CIPHER_AES_GCM_128_TAG_SIZE) {
+ int trimmed_len = skb->data_len -
+ (TLS_CIPHER_AES_GCM_128_TAG_SIZE - remaining_record);
+ struct sk_buff *tmp_skb = NULL;
+ /* don't process the pkt if it is only a partial tag */
+ if (skb->data_len < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
+ goto out;
+
+ WARN_ON(trimmed_len > skb->data_len);
+
+ /* shift to those many bytes */
+ tmp_skb = alloc_skb(0, GFP_KERNEL);
+ if (unlikely(!tmp_skb))
+ goto out;
+
+ chcr_ktls_skb_shift(tmp_skb, skb, trimmed_len);
+ /* free the last trimmed portion */
+ dev_kfree_skb_any(skb);
+ skb = tmp_skb;
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_trimmed_pkts);
+ }
+ data_len = skb->data_len;
+ /* check if the middle record's start point is 16 byte aligned. CTR
+ * needs 16 byte aligned start point to start encryption.
+ */
+ if (tls_rec_offset) {
+ /* there is an offset from start, means its a middle record */
+ int remaining = 0;
+
+ if (tls_rec_offset < (TLS_HEADER_SIZE + tx_info->iv_size)) {
+ prior_data_len = tls_rec_offset;
+ tls_rec_offset = 0;
+ remaining = 0;
+ } else {
+ prior_data_len =
+ (tls_rec_offset -
+ (TLS_HEADER_SIZE + tx_info->iv_size))
+ % AES_BLOCK_LEN;
+ remaining = tls_rec_offset - prior_data_len;
+ }
+
+ /* if prior_data_len is not zero, means we need to fetch prior
+ * data to make this record 16 byte aligned, or we need to reach
+ * to start offset.
+ */
+ if (prior_data_len) {
+ int i = 0;
+ u8 *data = NULL;
+ skb_frag_t *f;
+ u8 *vaddr;
+ int frag_size = 0, frag_delta = 0;
+
+ while (remaining > 0) {
+ frag_size = skb_frag_size(&record->frags[i]);
+ if (remaining < frag_size)
+ break;
+
+ remaining -= frag_size;
+ i++;
+ }
+ f = &record->frags[i];
+ vaddr = kmap_atomic(skb_frag_page(f));
+
+ data = vaddr + skb_frag_off(f) + remaining;
+ frag_delta = skb_frag_size(f) - remaining;
+
+ if (frag_delta >= prior_data_len) {
+ memcpy(prior_data, data, prior_data_len);
+ kunmap_atomic(vaddr);
+ } else {
+ memcpy(prior_data, data, frag_delta);
+ kunmap_atomic(vaddr);
+ /* get the next page */
+ f = &record->frags[i + 1];
+ vaddr = kmap_atomic(skb_frag_page(f));
+ data = vaddr + skb_frag_off(f);
+ memcpy(prior_data + frag_delta,
+ data, (prior_data_len - frag_delta));
+ kunmap_atomic(vaddr);
+ }
+ /* reset tcp_seq as per the prior_data_required len */
+ tcp_seq -= prior_data_len;
+ /* include prio_data_len for further calculation.
+ */
+ data_len += prior_data_len;
+ }
+ /* reset snd una, so the middle record won't send the already
+ * sent part.
+ */
+ if (chcr_ktls_update_snd_una(tx_info, q))
+ goto out;
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_middle_pkts);
+ } else {
+ /* Else means, its a partial first part of the record. Check if
+ * its only the header, don't need to send for encryption then.
+ */
+ if (data_len <= TLS_HEADER_SIZE + tx_info->iv_size) {
+ if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
+ tcp_push_no_fin, q,
+ tx_info->port_id,
+ prior_data,
+ prior_data_len)) {
+ goto out;
+ }
+ return 0;
+ }
+ atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_start_pkts);
+ }
+
+ if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
+ mss, tls_rec_offset, prior_data,
+ prior_data_len)) {
+ goto out;
+ }
+
+ return 0;
+out:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_BUSY;
+}
+
+/* nic tls TX handler */
+int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct tcphdr *th = tcp_hdr(skb);
+ int data_len, qidx, ret = 0, mss;
+ struct tls_record_info *record;
+ struct chcr_stats_debug *stats;
+ struct chcr_ktls_info *tx_info;
+ u32 tls_end_offset, tcp_seq;
+ struct tls_context *tls_ctx;
+ struct sk_buff *local_skb;
+ int new_connection_state;
+ struct sge_eth_txq *q;
+ struct adapter *adap;
+ unsigned long flags;
+
+ tcp_seq = ntohl(th->seq);
+
+ mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : skb->data_len;
+
+ /* check if we haven't set it for ktls offload */
+ if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
+ goto out;
+
+ tls_ctx = tls_get_ctx(skb->sk);
+ if (unlikely(tls_ctx->netdev != dev))
+ goto out;
+
+ tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
+ tx_info = tx_ctx->chcr_info;
+
+ if (unlikely(!tx_info))
+ goto out;
+
+ /* check the connection state, we don't need to pass new connection
+ * state, state machine will check and update the new state if it is
+ * stuck due to responses not received from HW.
+ * Start the tx handling only if state is KTLS_CONN_TX_READY.
+ */
+ new_connection_state = chcr_ktls_update_connection_state(tx_info, 0);
+ if (new_connection_state != KTLS_CONN_TX_READY)
+ goto out;
+
+ /* don't touch the original skb, make a new skb to extract each records
+ * and send them separately.
+ */
+ local_skb = alloc_skb(0, GFP_KERNEL);
+
+ if (unlikely(!local_skb))
+ return NETDEV_TX_BUSY;
+
+ adap = tx_info->adap;
+ stats = &adap->chcr_stats;
+
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + tx_info->first_qset];
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
+ /* if tcp options are set but finish is not send the options first */
+ if (!th->fin && chcr_ktls_check_tcp_options(th)) {
+ ret = chcr_ktls_write_tcp_options(tx_info, skb, q,
+ tx_info->tx_chan);
+ if (ret)
+ return NETDEV_TX_BUSY;
+ }
+ /* update tcb */
+ ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, ntohl(th->seq),
+ ntohl(th->ack_seq),
+ ntohs(th->window));
+ if (ret) {
+ dev_kfree_skb_any(local_skb);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* copy skb contents into local skb */
+ chcr_ktls_skb_copy(skb, local_skb);
+
+ /* go through the skb and send only one record at a time. */
+ data_len = skb->data_len;
+ /* TCP segments can be in received either complete or partial.
+ * chcr_end_part_handler will handle cases if complete record or end
+ * part of the record is received. Incase of partial end part of record,
+ * we will send the complete record again.
+ */
+
+ do {
+ int i;
+
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
+ /* lock taken */
+ spin_lock_irqsave(&tx_ctx->base.lock, flags);
+ /* fetch the tls record */
+ record = tls_get_record(&tx_ctx->base, tcp_seq,
+ &tx_info->record_no);
+ /* By the time packet reached to us, ACK is received, and record
+ * won't be found in that case, handle it gracefully.
+ */
+ if (unlikely(!record)) {
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ atomic64_inc(&stats->ktls_tx_drop_no_sync_data);
+ goto out;
+ }
+
+ if (unlikely(tls_record_is_start_marker(record))) {
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ atomic64_inc(&stats->ktls_tx_skip_no_sync_data);
+ goto out;
+ }
+
+ /* increase page reference count of the record, so that there
+ * won't be any chance of page free in middle if in case stack
+ * receives ACK and try to delete the record.
+ */
+ for (i = 0; i < record->num_frags; i++)
+ __skb_frag_ref(&record->frags[i]);
+ /* lock cleared */
+ spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+
+ tls_end_offset = record->end_seq - tcp_seq;
+
+ pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
+ tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
+ /* if a tls record is finishing in this SKB */
+ if (tls_end_offset <= data_len) {
+ struct sk_buff *nskb = NULL;
+
+ if (tls_end_offset < data_len) {
+ nskb = alloc_skb(0, GFP_KERNEL);
+ if (unlikely(!nskb)) {
+ ret = -ENOMEM;
+ goto clear_ref;
+ }
+
+ chcr_ktls_skb_shift(nskb, local_skb,
+ tls_end_offset);
+ } else {
+ /* its the only record in this skb, directly
+ * point it.
+ */
+ nskb = local_skb;
+ }
+ ret = chcr_end_part_handler(tx_info, nskb, record,
+ tcp_seq, mss,
+ (!th->fin && th->psh), q,
+ tls_end_offset,
+ (nskb == local_skb));
+
+ if (ret && nskb != local_skb)
+ dev_kfree_skb_any(local_skb);
+
+ data_len -= tls_end_offset;
+ /* tcp_seq increment is required to handle next record.
+ */
+ tcp_seq += tls_end_offset;
+ } else {
+ ret = chcr_short_record_handler(tx_info, local_skb,
+ record, tcp_seq, mss,
+ (!th->fin && th->psh),
+ q, tls_end_offset);
+ data_len = 0;
+ }
+clear_ref:
+ /* clear the frag ref count which increased locally before */
+ for (i = 0; i < record->num_frags; i++) {
+ /* clear the frag ref count */
+ __skb_frag_unref(&record->frags[i]);
+ }
+ /* if any failure, come out from the loop. */
+ if (ret)
+ goto out;
+ /* length should never be less than 0 */
+ WARN_ON(data_len < 0);
+
+ } while (data_len > 0);
+
+ tx_info->prev_seq = ntohl(th->seq) + skb->data_len;
+
+ atomic64_inc(&stats->ktls_tx_encrypted_packets);
+ atomic64_add(skb->data_len, &stats->ktls_tx_encrypted_bytes);
+
+ /* tcp finish is set, send a separate tcp msg including all the options
+ * as well.
+ */
+ if (th->fin)
+ chcr_ktls_write_tcp_options(tx_info, skb, q, tx_info->tx_chan);
+
+out:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
diff --git a/drivers/crypto/chelsio/chcr_ktls.h b/drivers/crypto/chelsio/chcr_ktls.h
new file mode 100644
index 000000000000..5a7ae2ca446e
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_ktls.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2020 Chelsio Communications. All rights reserved. */
+
+#ifndef __CHCR_KTLS_H__
+#define __CHCR_KTLS_H__
+
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#include <net/tls.h>
+#include "cxgb4.h"
+#include "t4_msg.h"
+#include "t4_tcb.h"
+#include "l2t.h"
+#include "chcr_common.h"
+#include "cxgb4_uld.h"
+
+#define CHCR_TCB_STATE_CLOSED 0
+#define CHCR_KTLS_KEY_CTX_LEN 16
+#define CHCR_SET_TCB_FIELD_LEN sizeof(struct cpl_set_tcb_field)
+#define CHCR_PLAIN_TX_DATA_LEN (sizeof(struct fw_ulptx_wr) +\
+ sizeof(struct ulp_txpkt) +\
+ sizeof(struct ulptx_idata) +\
+ sizeof(struct cpl_tx_data))
+
+#define CHCR_KTLS_WR_SIZE (CHCR_PLAIN_TX_DATA_LEN +\
+ sizeof(struct cpl_tx_sec_pdu))
+
+enum chcr_ktls_conn_state {
+ KTLS_CONN_CLOSED,
+ KTLS_CONN_ACT_OPEN_REQ,
+ KTLS_CONN_ACT_OPEN_RPL,
+ KTLS_CONN_SET_TCB_REQ,
+ KTLS_CONN_SET_TCB_RPL,
+ KTLS_CONN_TX_READY,
+};
+
+struct chcr_ktls_info {
+ struct sock *sk;
+ spinlock_t lock; /* state machine lock */
+ struct ktls_key_ctx key_ctx;
+ struct adapter *adap;
+ struct l2t_entry *l2te;
+ struct net_device *netdev;
+ u64 iv;
+ u64 record_no;
+ int tid;
+ int atid;
+ int rx_qid;
+ u32 iv_size;
+ u32 prev_seq;
+ u32 prev_ack;
+ u32 salt_size;
+ u32 key_ctx_len;
+ u32 scmd0_seqno_numivs;
+ u32 scmd0_ivgen_hdrlen;
+ u32 tcp_start_seq_number;
+ u32 scmd0_short_seqno_numivs;
+ u32 scmd0_short_ivgen_hdrlen;
+ enum chcr_ktls_conn_state connection_state;
+ u16 prev_win;
+ u8 tx_chan;
+ u8 smt_idx;
+ u8 port_id;
+ u8 ip_family;
+ u8 first_qset;
+};
+
+struct chcr_ktls_ofld_ctx_tx {
+ struct tls_offload_context_tx base;
+ struct chcr_ktls_info *chcr_info;
+};
+
+static inline struct chcr_ktls_ofld_ctx_tx *
+chcr_get_ktls_tx_context(struct tls_context *tls_ctx)
+{
+ BUILD_BUG_ON(sizeof(struct chcr_ktls_ofld_ctx_tx) >
+ TLS_OFFLOAD_CONTEXT_SIZE_TX);
+ return container_of(tls_offload_ctx_tx(tls_ctx),
+ struct chcr_ktls_ofld_ctx_tx,
+ base);
+}
+
+static inline int chcr_get_first_rx_qid(struct adapter *adap)
+{
+ /* u_ctx is saved in adap, fetch it */
+ struct uld_ctx *u_ctx = adap->uld[CXGB4_ULD_CRYPTO].handle;
+
+ if (!u_ctx)
+ return -1;
+ return u_ctx->lldi.rxq_ids[0];
+}
+
+void chcr_enable_ktls(struct adapter *adap);
+void chcr_disable_ktls(struct adapter *adap);
+int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
+int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
+int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+#endif /* __CHCR_KTLS_H__ */
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 9b2745ad9e38..d5720a859443 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -445,6 +445,7 @@ void chtls_destroy_sock(struct sock *sk)
chtls_purge_write_queue(sk);
free_tls_keyid(sk);
kref_put(&csk->kref, chtls_sock_release);
+ csk->cdev = NULL;
sk->sk_prot = &tcp_prot;
sk->sk_prot->destroy(sk);
}
@@ -759,8 +760,10 @@ static void chtls_release_resources(struct sock *sk)
csk->l2t_entry = NULL;
}
- cxgb4_remove_tid(tids, csk->port_id, tid, sk->sk_family);
- sock_put(sk);
+ if (sk->sk_state != TCP_SYN_SENT) {
+ cxgb4_remove_tid(tids, csk->port_id, tid, sk->sk_family);
+ sock_put(sk);
+ }
}
static void chtls_conn_done(struct sock *sk)
@@ -1716,6 +1719,9 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+ if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
+ goto out;
+
sk->sk_shutdown |= RCV_SHUTDOWN;
sock_set_flag(sk, SOCK_DONE);
@@ -1748,6 +1754,7 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
else
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
+out:
kfree_skb(skb);
}
@@ -1758,6 +1765,10 @@ static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp;
csk = rcu_dereference_sk_user_data(sk);
+
+ if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING))
+ goto out;
+
tp = tcp_sk(sk);
tp->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */
@@ -1787,6 +1798,7 @@ static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
default:
pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
}
+out:
kfree_skb(skb);
}
@@ -1896,6 +1908,7 @@ static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
}
set_abort_rpl_wr(reply_skb, tid, status);
+ kfree_skb(skb);
set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
if (csk_conn_inline(csk)) {
struct l2t_entry *e = csk->l2t_entry;
@@ -1906,7 +1919,6 @@ static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
}
}
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
- kfree_skb(skb);
}
/*
@@ -2008,7 +2020,8 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
chtls_conn_done(sk);
}
- chtls_send_abort_rpl(sk, skb, csk->cdev, rst_status, queue);
+ chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
+ rst_status, queue);
}
static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
@@ -2042,6 +2055,7 @@ static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb)
struct cpl_peer_close *req = cplhdr(skb) + RSS_HDR;
void (*fn)(struct sock *sk, struct sk_buff *skb);
unsigned int hwtid = GET_TID(req);
+ struct chtls_sock *csk;
struct sock *sk;
u8 opcode;
@@ -2051,6 +2065,8 @@ static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb)
if (!sk)
goto rel_skb;
+ csk = sk->sk_user_data;
+
switch (opcode) {
case CPL_PEER_CLOSE:
fn = chtls_peer_close;
@@ -2059,6 +2075,11 @@ static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb)
fn = chtls_close_con_rpl;
break;
case CPL_ABORT_REQ_RSS:
+ /*
+ * Save the offload device in the skb, we may process this
+ * message after the socket has closed.
+ */
+ BLOG_SKB_CB(skb)->cdev = csk->cdev;
fn = chtls_abort_req_rss;
break;
case CPL_ABORT_RPL_RSS:
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 5cf9b021220b..e1651adb9d06 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -902,14 +902,6 @@ static int chtls_skb_copy_to_page_nocache(struct sock *sk,
return 0;
}
-/* Read TLS header to find content type and data length */
-static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
-{
- if (copy_from_iter(thdr, sizeof(*thdr), from) != sizeof(*thdr))
- return -EFAULT;
- return (__force int)cpu_to_be16(thdr->length);
-}
-
static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
{
return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
@@ -981,6 +973,37 @@ do_interrupted:
goto do_rm_wq;
}
+static int chtls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
+ unsigned char *record_type)
+{
+ struct cmsghdr *cmsg;
+ int rc = -EINVAL;
+
+ for_each_cmsghdr(cmsg, msg) {
+ if (!CMSG_OK(msg, cmsg))
+ return -EINVAL;
+ if (cmsg->cmsg_level != SOL_TLS)
+ continue;
+
+ switch (cmsg->cmsg_type) {
+ case TLS_SET_RECORD_TYPE:
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
+ return -EINVAL;
+
+ if (msg->msg_flags & MSG_MORE)
+ return -EINVAL;
+
+ *record_type = *(unsigned char *)CMSG_DATA(cmsg);
+ rc = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return rc;
+}
+
int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
@@ -1022,15 +1045,21 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
goto wait_for_sndbuf;
if (is_tls_tx(csk) && !csk->tlshws.txleft) {
- struct tls_hdr hdr;
+ unsigned char record_type = TLS_RECORD_TYPE_DATA;
- recordsz = tls_header_read(&hdr, &msg->msg_iter);
- size -= TLS_HEADER_LENGTH;
- copied += TLS_HEADER_LENGTH;
+ if (unlikely(msg->msg_controllen)) {
+ err = chtls_proccess_cmsg(sk, msg,
+ &record_type);
+ if (err)
+ goto out_err;
+ }
+
+ recordsz = size;
csk->tlshws.txleft = recordsz;
- csk->tlshws.type = hdr.type;
+ csk->tlshws.type = record_type;
+
if (skb)
- ULP_SKB_CB(skb)->ulp.tls.type = hdr.type;
+ ULP_SKB_CB(skb)->ulp.tls.type = record_type;
}
if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
@@ -1521,6 +1550,22 @@ found_ok_skb:
}
}
}
+ /* Set record type if not already done. For a non-data record,
+ * do not proceed if record type could not be copied.
+ */
+ if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
+ struct tls_hdr *thdr = (struct tls_hdr *)skb->data;
+ int cerr = 0;
+
+ cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
+ sizeof(thdr->type), &thdr->type);
+
+ if (cerr && thdr->type != TLS_RECORD_TYPE_DATA)
+ return -EIO;
+ /* don't send tls header, skip copy */
+ goto skip_copy;
+ }
+
if (skb_copy_datagram_msg(skb, offset, msg, avail)) {
if (!copied) {
copied = -EFAULT;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index ffa7c2100edb..3efa7493456b 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3570,7 +3570,8 @@ static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_2);
- MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
@@ -6391,6 +6392,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
spin_lock_init(&dev->reset_flow_resource_lock);
xa_init(&dev->odp_mkeys);
xa_init(&dev->sig_mrs);
+ spin_lock_init(&dev->mkey_lock);
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index f3bdbd5e5096..fc19dc1cf2e1 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -994,6 +994,11 @@ struct mlx5_ib_dev {
/* sync used page count stats
*/
struct mlx5_ib_resources devr;
+
+ /* protect mkey key part */
+ spinlock_t mkey_lock;
+ u8 mkey_key;
+
struct mlx5_mr_cache cache;
struct timer_list delay_timer;
/* Prevents soft lock on massive reg MRs */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 6fa0a83c19de..8508af500972 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -47,6 +47,46 @@ enum {
#define MLX5_UMR_ALIGN 2048
+static void
+create_mkey_callback(int status, struct mlx5_async_work *context);
+
+static void
+assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
+ u32 *in)
+{
+ void *mkc;
+ u8 key;
+
+ spin_lock_irq(&dev->mkey_lock);
+ key = dev->mkey_key++;
+ spin_unlock_irq(&dev->mkey_lock);
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, mkey_7_0, key);
+ mkey->key = key;
+}
+
+static int
+mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
+ u32 *in, int inlen)
+{
+ assign_mkey_variant(dev, mkey, in);
+ return mlx5_core_create_mkey(dev->mdev, mkey, in, inlen);
+}
+
+static int
+mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
+ struct mlx5_core_mkey *mkey,
+ struct mlx5_async_ctx *async_ctx,
+ u32 *in, int inlen, u32 *out, int outlen,
+ struct mlx5_async_work *context)
+{
+ MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
+ assign_mkey_variant(dev, mkey, in);
+ return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
+ create_mkey_callback, context);
+}
+
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
@@ -79,7 +119,7 @@ static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
}
-static void reg_mr_callback(int status, struct mlx5_async_work *context)
+static void create_mkey_callback(int status, struct mlx5_async_work *context)
{
struct mlx5_ib_mr *mr =
container_of(context, struct mlx5_ib_mr, cb_work);
@@ -87,7 +127,6 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context)
struct mlx5_mr_cache *cache = &dev->cache;
int c = order2idx(dev, mr->order);
struct mlx5_cache_ent *ent = &cache->ent[c];
- u8 key;
unsigned long flags;
spin_lock_irqsave(&ent->lock, flags);
@@ -102,10 +141,8 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context)
}
mr->mmkey.type = MLX5_MKEY_MR;
- spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
- key = dev->mdev->priv.mkey_key++;
- spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
- mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
+ mr->mmkey.key |= mlx5_idx_to_mkey(
+ MLX5_GET(create_mkey_out, mr->out, mkey_index));
cache->last_add = jiffies;
@@ -163,10 +200,10 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
spin_lock_irq(&ent->lock);
ent->pending++;
spin_unlock_irq(&ent->lock);
- err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
+ err = mlx5_ib_create_mkey_cb(dev, &mr->mmkey,
&dev->async_ctx, in, inlen,
mr->out, sizeof(mr->out),
- reg_mr_callback, &mr->cb_work);
+ &mr->cb_work);
if (err) {
spin_lock_irq(&ent->lock);
ent->pending--;
@@ -685,7 +722,6 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
- struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_mr *mr;
void *mkc;
u32 *in;
@@ -707,7 +743,7 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
MLX5_SET(mkc, mkc, length64, 1);
set_mkc_access_pd_addr_fields(mkc, acc, 0, pd);
- err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
if (err)
goto err_in;
@@ -1097,7 +1133,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
get_octo_len(virt_addr, length, page_shift));
}
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
if (err) {
mlx5_ib_warn(dev, "create mkey failed\n");
goto err_2;
@@ -1137,7 +1173,6 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
- struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_mr *mr;
void *mkc;
u32 *in;
@@ -1160,7 +1195,7 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
MLX5_SET64(mkc, mkc, len, length);
set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd);
- err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
if (err)
goto err_in;
@@ -1638,7 +1673,7 @@ static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift);
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
if (err)
goto err_free_descs;
@@ -1905,7 +1940,7 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
MLX5_SET(mkc, mkc, qpn, 0xffffff);
- err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
+ err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen);
if (err)
goto free;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 63e4f9d15fd9..a10a0c2ca2da 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -213,6 +213,8 @@ static int ipoib_get_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops ipoib_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_link_ksettings = ipoib_get_link_ksettings,
.get_drvinfo = ipoib_get_drvinfo,
.get_coalesce = ipoib_get_coalesce,
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index db8884ad6d40..b103fbdd0f68 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -259,6 +259,19 @@ config GENEVE
To compile this driver as a module, choose M here: the module
will be called geneve.
+config BAREUDP
+ tristate "Bare UDP Encapsulation"
+ depends on INET
+ depends on IPV6 || !IPV6
+ select NET_UDP_TUNNEL
+ select GRO_CELLS
+ help
+ This adds a bare UDP tunnel module for tunnelling different
+ kinds of traffic like MPLS, IP, etc. inside a UDP tunnel.
+
+ To compile this driver as a module, choose M here: the module
+ will be called bareudp.
+
config GTP
tristate "GPRS Tunneling Protocol datapath (GTP-U)"
depends on INET
@@ -432,6 +445,8 @@ source "drivers/net/fddi/Kconfig"
source "drivers/net/hippi/Kconfig"
+source "drivers/net/ipa/Kconfig"
+
config NET_SB1000
tristate "General Instruments Surfboard 1000"
depends on PNP
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 71b88ffc5587..94b60800887a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
obj-$(CONFIG_VXLAN) += vxlan.o
obj-$(CONFIG_GENEVE) += geneve.o
+obj-$(CONFIG_BAREUDP) += bareudp.o
obj-$(CONFIG_GTP) += gtp.o
obj-$(CONFIG_NLMON) += nlmon.o
obj-$(CONFIG_NET_VRF) += vrf.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_ETHERNET) += ethernet/
obj-$(CONFIG_FDDI) += fddi/
obj-$(CONFIG_HIPPI) += hippi/
obj-$(CONFIG_HAMRADIO) += hamradio/
+obj-$(CONFIG_QCOM_IPA) += ipa/
obj-$(CONFIG_PLIP) += plip/
obj-$(CONFIG_PPP) += ppp/
obj-$(CONFIG_PPP_ASYNC) += ppp/
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
new file mode 100644
index 000000000000..cc0703c3d57f
--- /dev/null
+++ b/drivers/net/bareudp.c
@@ -0,0 +1,817 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Bareudp: UDP tunnel encasulation for different Payload types like
+ * MPLS, NSH, IP, etc.
+ * Copyright (c) 2019 Nokia, Inc.
+ * Authors: Martin Varghese, <martin.varghese@nokia.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/hash.h>
+#include <net/dst_metadata.h>
+#include <net/gro_cells.h>
+#include <net/rtnetlink.h>
+#include <net/protocol.h>
+#include <net/ip6_tunnel.h>
+#include <net/ip_tunnels.h>
+#include <net/udp_tunnel.h>
+#include <net/bareudp.h>
+
+#define BAREUDP_BASE_HLEN sizeof(struct udphdr)
+#define BAREUDP_IPV4_HLEN (sizeof(struct iphdr) + \
+ sizeof(struct udphdr))
+#define BAREUDP_IPV6_HLEN (sizeof(struct ipv6hdr) + \
+ sizeof(struct udphdr))
+
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
+/* per-network namespace private data for this module */
+
+static unsigned int bareudp_net_id;
+
+struct bareudp_net {
+ struct list_head bareudp_list;
+};
+
+/* Pseudo network device */
+struct bareudp_dev {
+ struct net *net; /* netns for packet i/o */
+ struct net_device *dev; /* netdev for bareudp tunnel */
+ __be16 ethertype;
+ __be16 port;
+ u16 sport_min;
+ bool multi_proto_mode;
+ struct socket __rcu *sock;
+ struct list_head next; /* bareudp node on namespace list */
+ struct gro_cells gro_cells;
+};
+
+static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct metadata_dst *tun_dst = NULL;
+ struct pcpu_sw_netstats *stats;
+ struct bareudp_dev *bareudp;
+ unsigned short family;
+ unsigned int len;
+ __be16 proto;
+ void *oiph;
+ int err;
+
+ bareudp = rcu_dereference_sk_user_data(sk);
+ if (!bareudp)
+ goto drop;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ family = AF_INET;
+ else
+ family = AF_INET6;
+
+ if (bareudp->ethertype == htons(ETH_P_IP)) {
+ struct iphdr *iphdr;
+
+ iphdr = (struct iphdr *)(skb->data + BAREUDP_BASE_HLEN);
+ if (iphdr->version == 4) {
+ proto = bareudp->ethertype;
+ } else if (bareudp->multi_proto_mode && (iphdr->version == 6)) {
+ proto = htons(ETH_P_IPV6);
+ } else {
+ bareudp->dev->stats.rx_dropped++;
+ goto drop;
+ }
+ } else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
+ struct iphdr *tunnel_hdr;
+
+ tunnel_hdr = (struct iphdr *)skb_network_header(skb);
+ if (tunnel_hdr->version == 4) {
+ if (!ipv4_is_multicast(tunnel_hdr->daddr)) {
+ proto = bareudp->ethertype;
+ } else if (bareudp->multi_proto_mode &&
+ ipv4_is_multicast(tunnel_hdr->daddr)) {
+ proto = htons(ETH_P_MPLS_MC);
+ } else {
+ bareudp->dev->stats.rx_dropped++;
+ goto drop;
+ }
+ } else {
+ int addr_type;
+ struct ipv6hdr *tunnel_hdr_v6;
+
+ tunnel_hdr_v6 = (struct ipv6hdr *)skb_network_header(skb);
+ addr_type =
+ ipv6_addr_type((struct in6_addr *)&tunnel_hdr_v6->daddr);
+ if (!(addr_type & IPV6_ADDR_MULTICAST)) {
+ proto = bareudp->ethertype;
+ } else if (bareudp->multi_proto_mode &&
+ (addr_type & IPV6_ADDR_MULTICAST)) {
+ proto = htons(ETH_P_MPLS_MC);
+ } else {
+ bareudp->dev->stats.rx_dropped++;
+ goto drop;
+ }
+ }
+ } else {
+ proto = bareudp->ethertype;
+ }
+
+ if (iptunnel_pull_header(skb, BAREUDP_BASE_HLEN,
+ proto,
+ !net_eq(bareudp->net,
+ dev_net(bareudp->dev)))) {
+ bareudp->dev->stats.rx_dropped++;
+ goto drop;
+ }
+
+ tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
+ if (!tun_dst) {
+ bareudp->dev->stats.rx_dropped++;
+ goto drop;
+ }
+ skb_dst_set(skb, &tun_dst->dst);
+ skb->dev = bareudp->dev;
+ oiph = skb_network_header(skb);
+ skb_reset_network_header(skb);
+
+ if (family == AF_INET)
+ err = IP_ECN_decapsulate(oiph, skb);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ err = IP6_ECN_decapsulate(oiph, skb);
+#endif
+
+ if (unlikely(err)) {
+ if (log_ecn_error) {
+ if (family == AF_INET)
+ net_info_ratelimited("non-ECT from %pI4 "
+ "with TOS=%#x\n",
+ &((struct iphdr *)oiph)->saddr,
+ ((struct iphdr *)oiph)->tos);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ net_info_ratelimited("non-ECT from %pI6\n",
+ &((struct ipv6hdr *)oiph)->saddr);
+#endif
+ }
+ if (err > 1) {
+ ++bareudp->dev->stats.rx_frame_errors;
+ ++bareudp->dev->stats.rx_errors;
+ goto drop;
+ }
+ }
+
+ len = skb->len;
+ err = gro_cells_receive(&bareudp->gro_cells, skb);
+ if (likely(err == NET_RX_SUCCESS)) {
+ stats = this_cpu_ptr(bareudp->dev->tstats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
+ }
+ return 0;
+drop:
+ /* Consume bad packet */
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int bareudp_err_lookup(struct sock *sk, struct sk_buff *skb)
+{
+ return 0;
+}
+
+static int bareudp_init(struct net_device *dev)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+ int err;
+
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ err = gro_cells_init(&bareudp->gro_cells, dev);
+ if (err) {
+ free_percpu(dev->tstats);
+ return err;
+ }
+ return 0;
+}
+
+static void bareudp_uninit(struct net_device *dev)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+
+ gro_cells_destroy(&bareudp->gro_cells);
+ free_percpu(dev->tstats);
+}
+
+static struct socket *bareudp_create_sock(struct net *net, __be16 port)
+{
+ struct udp_port_cfg udp_conf;
+ struct socket *sock;
+ int err;
+
+ memset(&udp_conf, 0, sizeof(udp_conf));
+#if IS_ENABLED(CONFIG_IPV6)
+ udp_conf.family = AF_INET6;
+#else
+ udp_conf.family = AF_INET;
+#endif
+ udp_conf.local_udp_port = port;
+ /* Open UDP socket */
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return sock;
+}
+
+/* Create new listen socket if needed */
+static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port)
+{
+ struct udp_tunnel_sock_cfg tunnel_cfg;
+ struct socket *sock;
+
+ sock = bareudp_create_sock(bareudp->net, port);
+ if (IS_ERR(sock))
+ return PTR_ERR(sock);
+
+ /* Mark socket as an encapsulation socket */
+ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
+ tunnel_cfg.sk_user_data = bareudp;
+ tunnel_cfg.encap_type = 1;
+ tunnel_cfg.encap_rcv = bareudp_udp_encap_recv;
+ tunnel_cfg.encap_err_lookup = bareudp_err_lookup;
+ tunnel_cfg.encap_destroy = NULL;
+ setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg);
+
+ /* As the setup_udp_tunnel_sock does not call udp_encap_enable if the
+ * socket type is v6 an explicit call to udp_encap_enable is needed.
+ */
+ if (sock->sk->sk_family == AF_INET6)
+ udp_encap_enable();
+
+ rcu_assign_pointer(bareudp->sock, sock);
+ return 0;
+}
+
+static int bareudp_open(struct net_device *dev)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+ int ret = 0;
+
+ ret = bareudp_socket_create(bareudp, bareudp->port);
+ return ret;
+}
+
+static void bareudp_sock_release(struct bareudp_dev *bareudp)
+{
+ struct socket *sock;
+
+ sock = bareudp->sock;
+ rcu_assign_pointer(bareudp->sock, NULL);
+ synchronize_net();
+ udp_tunnel_sock_release(sock);
+}
+
+static int bareudp_stop(struct net_device *dev)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+
+ bareudp_sock_release(bareudp);
+ return 0;
+}
+
+static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ struct bareudp_dev *bareudp,
+ const struct ip_tunnel_info *info)
+{
+ bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
+ struct socket *sock = rcu_dereference(bareudp->sock);
+ bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
+ const struct ip_tunnel_key *key = &info->key;
+ struct rtable *rt;
+ __be16 sport, df;
+ int min_headroom;
+ __u8 tos, ttl;
+ __be32 saddr;
+ int err;
+
+ if (!sock)
+ return -ESHUTDOWN;
+
+ rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, info,
+ IPPROTO_UDP, use_cache);
+
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
+
+ skb_tunnel_check_pmtu(skb, &rt->dst,
+ BAREUDP_IPV4_HLEN + info->options_len);
+
+ sport = udp_flow_src_port(bareudp->net, skb,
+ bareudp->sport_min, USHRT_MAX,
+ true);
+ tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
+ ttl = key->ttl;
+ df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ skb_scrub_packet(skb, xnet);
+
+ err = -ENOSPC;
+ if (!skb_pull(skb, skb_network_offset(skb)))
+ goto free_dst;
+
+ min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len +
+ BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
+
+ err = skb_cow_head(skb, min_headroom);
+ if (unlikely(err))
+ goto free_dst;
+
+ err = udp_tunnel_handle_offloads(skb, udp_sum);
+ if (err)
+ goto free_dst;
+
+ skb_set_inner_protocol(skb, bareudp->ethertype);
+ udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
+ tos, ttl, df, sport, bareudp->port,
+ !net_eq(bareudp->net, dev_net(bareudp->dev)),
+ !(info->key.tun_flags & TUNNEL_CSUM));
+ return 0;
+
+free_dst:
+ dst_release(&rt->dst);
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+ struct bareudp_dev *bareudp,
+ const struct ip_tunnel_info *info)
+{
+ bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
+ struct socket *sock = rcu_dereference(bareudp->sock);
+ bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
+ const struct ip_tunnel_key *key = &info->key;
+ struct dst_entry *dst = NULL;
+ struct in6_addr saddr, daddr;
+ int min_headroom;
+ __u8 prio, ttl;
+ __be16 sport;
+ int err;
+
+ if (!sock)
+ return -ESHUTDOWN;
+
+ dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, &saddr, info,
+ IPPROTO_UDP, use_cache);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len);
+
+ sport = udp_flow_src_port(bareudp->net, skb,
+ bareudp->sport_min, USHRT_MAX,
+ true);
+ prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
+ ttl = key->ttl;
+
+ skb_scrub_packet(skb, xnet);
+
+ err = -ENOSPC;
+ if (!skb_pull(skb, skb_network_offset(skb)))
+ goto free_dst;
+
+ min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
+ BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
+
+ err = skb_cow_head(skb, min_headroom);
+ if (unlikely(err))
+ goto free_dst;
+
+ err = udp_tunnel_handle_offloads(skb, udp_sum);
+ if (err)
+ goto free_dst;
+
+ daddr = info->key.u.ipv6.dst;
+ udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev,
+ &saddr, &daddr, prio, ttl,
+ info->key.label, sport, bareudp->port,
+ !(info->key.tun_flags & TUNNEL_CSUM));
+ return 0;
+
+free_dst:
+ dst_release(dst);
+ return err;
+}
+#endif
+
+static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+ struct ip_tunnel_info *info = NULL;
+ int err;
+
+ if (skb->protocol != bareudp->ethertype) {
+ if (!bareudp->multi_proto_mode ||
+ (skb->protocol != htons(ETH_P_MPLS_MC) &&
+ skb->protocol != htons(ETH_P_IPV6))) {
+ err = -EINVAL;
+ goto tx_error;
+ }
+ }
+
+ info = skb_tunnel_info(skb);
+ if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
+ err = -EINVAL;
+ goto tx_error;
+ }
+
+ rcu_read_lock();
+#if IS_ENABLED(CONFIG_IPV6)
+ if (info->mode & IP_TUNNEL_INFO_IPV6)
+ err = bareudp6_xmit_skb(skb, dev, bareudp, info);
+ else
+#endif
+ err = bareudp_xmit_skb(skb, dev, bareudp, info);
+
+ rcu_read_unlock();
+
+ if (likely(!err))
+ return NETDEV_TX_OK;
+tx_error:
+ dev_kfree_skb(skb);
+
+ if (err == -ELOOP)
+ dev->stats.collisions++;
+ else if (err == -ENETUNREACH)
+ dev->stats.tx_carrier_errors++;
+
+ dev->stats.tx_errors++;
+ return NETDEV_TX_OK;
+}
+
+static int bareudp_fill_metadata_dst(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ struct ip_tunnel_info *info = skb_tunnel_info(skb);
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+ bool use_cache;
+
+ use_cache = ip_tunnel_dst_cache_usable(skb, info);
+
+ if (ip_tunnel_info_af(info) == AF_INET) {
+ struct rtable *rt;
+ __be32 saddr;
+
+ rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr,
+ info, IPPROTO_UDP, use_cache);
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
+
+ ip_rt_put(rt);
+ info->key.u.ipv4.src = saddr;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (ip_tunnel_info_af(info) == AF_INET6) {
+ struct dst_entry *dst;
+ struct in6_addr saddr;
+ struct socket *sock = rcu_dereference(bareudp->sock);
+
+ dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock,
+ &saddr, info, IPPROTO_UDP,
+ use_cache);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ dst_release(dst);
+ info->key.u.ipv6.src = saddr;
+#endif
+ } else {
+ return -EINVAL;
+ }
+
+ info->key.tp_src = udp_flow_src_port(bareudp->net, skb,
+ bareudp->sport_min,
+ USHRT_MAX, true);
+ info->key.tp_dst = bareudp->port;
+ return 0;
+}
+
+static const struct net_device_ops bareudp_netdev_ops = {
+ .ndo_init = bareudp_init,
+ .ndo_uninit = bareudp_uninit,
+ .ndo_open = bareudp_open,
+ .ndo_stop = bareudp_stop,
+ .ndo_start_xmit = bareudp_xmit,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_fill_metadata_dst = bareudp_fill_metadata_dst,
+};
+
+static const struct nla_policy bareudp_policy[IFLA_BAREUDP_MAX + 1] = {
+ [IFLA_BAREUDP_PORT] = { .type = NLA_U16 },
+ [IFLA_BAREUDP_ETHERTYPE] = { .type = NLA_U16 },
+ [IFLA_BAREUDP_SRCPORT_MIN] = { .type = NLA_U16 },
+ [IFLA_BAREUDP_MULTIPROTO_MODE] = { .type = NLA_FLAG },
+};
+
+/* Info for udev, that this is a virtual tunnel endpoint */
+static struct device_type bareudp_type = {
+ .name = "bareudp",
+};
+
+/* Initialize the device structure. */
+static void bareudp_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &bareudp_netdev_ops;
+ dev->needs_free_netdev = true;
+ SET_NETDEV_DEVTYPE(dev, &bareudp_type);
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->mtu = ETH_DATA_LEN;
+ dev->min_mtu = IPV4_MIN_MTU;
+ dev->max_mtu = IP_MAX_MTU - BAREUDP_BASE_HLEN;
+ dev->type = ARPHRD_NONE;
+ netif_keep_dst(dev);
+ dev->priv_flags |= IFF_NO_QUEUE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+}
+
+static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ if (!data) {
+ NL_SET_ERR_MSG(extack,
+ "Not enough attributes provided to perform the operation");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
+ struct netlink_ext_ack *extack)
+{
+ if (!data[IFLA_BAREUDP_PORT]) {
+ NL_SET_ERR_MSG(extack, "port not specified");
+ return -EINVAL;
+ }
+ if (!data[IFLA_BAREUDP_ETHERTYPE]) {
+ NL_SET_ERR_MSG(extack, "ethertype not specified");
+ return -EINVAL;
+ }
+
+ if (data[IFLA_BAREUDP_PORT])
+ conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
+
+ if (data[IFLA_BAREUDP_ETHERTYPE])
+ conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
+
+ if (data[IFLA_BAREUDP_SRCPORT_MIN])
+ conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
+
+ return 0;
+}
+
+static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn,
+ const struct bareudp_conf *conf)
+{
+ struct bareudp_dev *bareudp, *t = NULL;
+
+ list_for_each_entry(bareudp, &bn->bareudp_list, next) {
+ if (conf->port == bareudp->port)
+ t = bareudp;
+ }
+ return t;
+}
+
+static int bareudp_configure(struct net *net, struct net_device *dev,
+ struct bareudp_conf *conf)
+{
+ struct bareudp_net *bn = net_generic(net, bareudp_net_id);
+ struct bareudp_dev *t, *bareudp = netdev_priv(dev);
+ int err;
+
+ bareudp->net = net;
+ bareudp->dev = dev;
+ t = bareudp_find_dev(bn, conf);
+ if (t)
+ return -EBUSY;
+
+ if (conf->multi_proto_mode &&
+ (conf->ethertype != htons(ETH_P_MPLS_UC) &&
+ conf->ethertype != htons(ETH_P_IP)))
+ return -EINVAL;
+
+ bareudp->port = conf->port;
+ bareudp->ethertype = conf->ethertype;
+ bareudp->sport_min = conf->sport_min;
+ bareudp->multi_proto_mode = conf->multi_proto_mode;
+ err = register_netdevice(dev);
+ if (err)
+ return err;
+
+ list_add(&bareudp->next, &bn->bareudp_list);
+ return 0;
+}
+
+static int bareudp_link_config(struct net_device *dev,
+ struct nlattr *tb[])
+{
+ int err;
+
+ if (tb[IFLA_MTU]) {
+ err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int bareudp_newlink(struct net *net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct bareudp_conf conf;
+ int err;
+
+ err = bareudp2info(data, &conf, extack);
+ if (err)
+ return err;
+
+ err = bareudp_configure(net, dev, &conf);
+ if (err)
+ return err;
+
+ err = bareudp_link_config(dev, tb);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void bareudp_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+
+ list_del(&bareudp->next);
+ unregister_netdevice_queue(dev, head);
+}
+
+static size_t bareudp_get_size(const struct net_device *dev)
+{
+ return nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_PORT */
+ nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_ETHERTYPE */
+ nla_total_size(sizeof(__u16)) + /* IFLA_BAREUDP_SRCPORT_MIN */
+ nla_total_size(0) + /* IFLA_BAREUDP_MULTIPROTO_MODE */
+ 0;
+}
+
+static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+
+ if (nla_put_be16(skb, IFLA_BAREUDP_PORT, bareudp->port))
+ goto nla_put_failure;
+ if (nla_put_be16(skb, IFLA_BAREUDP_ETHERTYPE, bareudp->ethertype))
+ goto nla_put_failure;
+ if (nla_put_u16(skb, IFLA_BAREUDP_SRCPORT_MIN, bareudp->sport_min))
+ goto nla_put_failure;
+ if (bareudp->multi_proto_mode &&
+ nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops bareudp_link_ops __read_mostly = {
+ .kind = "bareudp",
+ .maxtype = IFLA_BAREUDP_MAX,
+ .policy = bareudp_policy,
+ .priv_size = sizeof(struct bareudp_dev),
+ .setup = bareudp_setup,
+ .validate = bareudp_validate,
+ .newlink = bareudp_newlink,
+ .dellink = bareudp_dellink,
+ .get_size = bareudp_get_size,
+ .fill_info = bareudp_fill_info,
+};
+
+struct net_device *bareudp_dev_create(struct net *net, const char *name,
+ u8 name_assign_type,
+ struct bareudp_conf *conf)
+{
+ struct nlattr *tb[IFLA_MAX + 1];
+ struct net_device *dev;
+ LIST_HEAD(list_kill);
+ int err;
+
+ memset(tb, 0, sizeof(tb));
+ dev = rtnl_create_link(net, name, name_assign_type,
+ &bareudp_link_ops, tb, NULL);
+ if (IS_ERR(dev))
+ return dev;
+
+ err = bareudp_configure(net, dev, conf);
+ if (err) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+ err = dev_set_mtu(dev, IP_MAX_MTU - BAREUDP_BASE_HLEN);
+ if (err)
+ goto err;
+
+ err = rtnl_configure_link(dev, NULL);
+ if (err < 0)
+ goto err;
+
+ return dev;
+err:
+ bareudp_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(bareudp_dev_create);
+
+static __net_init int bareudp_init_net(struct net *net)
+{
+ struct bareudp_net *bn = net_generic(net, bareudp_net_id);
+
+ INIT_LIST_HEAD(&bn->bareudp_list);
+ return 0;
+}
+
+static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
+{
+ struct bareudp_net *bn = net_generic(net, bareudp_net_id);
+ struct bareudp_dev *bareudp, *next;
+
+ list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next)
+ unregister_netdevice_queue(bareudp->dev, head);
+}
+
+static void __net_exit bareudp_exit_batch_net(struct list_head *net_list)
+{
+ struct net *net;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ list_for_each_entry(net, net_list, exit_list)
+ bareudp_destroy_tunnels(net, &list);
+
+ /* unregister the devices gathered above */
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
+static struct pernet_operations bareudp_net_ops = {
+ .init = bareudp_init_net,
+ .exit_batch = bareudp_exit_batch_net,
+ .id = &bareudp_net_id,
+ .size = sizeof(struct bareudp_net),
+};
+
+static int __init bareudp_init_module(void)
+{
+ int rc;
+
+ rc = register_pernet_subsys(&bareudp_net_ops);
+ if (rc)
+ goto out1;
+
+ rc = rtnl_link_register(&bareudp_link_ops);
+ if (rc)
+ goto out2;
+
+ return 0;
+out2:
+ unregister_pernet_subsys(&bareudp_net_ops);
+out1:
+ return rc;
+}
+late_initcall(bareudp_init_module);
+
+static void __exit bareudp_cleanup_module(void)
+{
+ rtnl_link_unregister(&bareudp_link_ops);
+ unregister_pernet_subsys(&bareudp_net_ops);
+}
+module_exit(bareudp_cleanup_module);
+
+MODULE_ALIAS_RTNL_LINK("bareudp");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Varghese <martin.varghese@nokia.com>");
+MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic");
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d10805e5e623..2e70e43c5df5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1265,7 +1265,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
skb->dev = bond->dev;
if (BOND_MODE(bond) == BOND_MODE_ALB &&
- bond->dev->priv_flags & IFF_BRIDGE_PORT &&
+ netif_is_bridge_port(bond->dev) &&
skb->pkt_type == PACKET_HOST) {
if (unlikely(skb_cow_head(skb,
@@ -4370,7 +4370,6 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
struct ethtool_drvinfo *drvinfo)
{
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
BOND_ABI_VERSION);
}
@@ -5008,8 +5007,6 @@ static int __init bonding_init(void)
int i;
int res;
- pr_info("%s", bond_version);
-
res = bond_check_params(&bonding_defaults);
if (res)
goto out;
@@ -5064,6 +5061,5 @@ static void __exit bonding_exit(void)
module_init(bonding_init);
module_exit(bonding_exit);
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
diff --git a/drivers/net/bonding/bonding_priv.h b/drivers/net/bonding/bonding_priv.h
index 5a4d81a9437c..45b77bc8c7b3 100644
--- a/drivers/net/bonding/bonding_priv.h
+++ b/drivers/net/bonding/bonding_priv.h
@@ -14,12 +14,11 @@
#ifndef _BONDING_PRIV_H
#define _BONDING_PRIV_H
+#include <linux/vermagic.h>
-#define DRV_VERSION "3.7.1"
-#define DRV_RELDATE "April 27, 2011"
#define DRV_NAME "bonding"
#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
-#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
+#define bond_version DRV_DESCRIPTION ": v" UTS_RELEASE "\n"
#endif
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index a3664281a33f..086dfb1b9d0b 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -348,11 +348,8 @@ static void slcan_write_wakeup(struct tty_struct *tty)
rcu_read_lock();
sl = rcu_dereference(tty->disc_data);
- if (!sl)
- goto out;
-
- schedule_work(&sl->tx_work);
-out:
+ if (sl)
+ schedule_work(&sl->tx_work);
rcu_read_unlock();
}
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 1a69286daa8d..68e2381694b9 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -681,7 +681,9 @@ int b53_configure_vlan(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
struct b53_vlan vl = { 0 };
+ struct b53_vlan *v;
int i, def_vid;
+ u16 vid;
def_vid = b53_default_pvid(dev);
@@ -699,8 +701,18 @@ int b53_configure_vlan(struct dsa_switch *ds)
b53_write16(dev, B53_VLAN_PAGE,
B53_VLAN_PORT_DEF_TAG(i), def_vid);
- if (!is5325(dev) && !is5365(dev))
- b53_set_jumbo(dev, dev->enable_jumbo, false);
+ /* Upon initial call we have not set-up any VLANs, but upon
+ * system resume, we need to restore all VLAN entries.
+ */
+ for (vid = def_vid; vid < dev->num_vlans; vid++) {
+ v = &dev->vlans[vid];
+
+ if (!v->members)
+ continue;
+
+ b53_set_vlan_entry(dev, vid, v);
+ b53_fast_age_vlan(dev, vid);
+ }
return 0;
}
@@ -807,8 +819,6 @@ static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
static int b53_reset_switch(struct b53_device *priv)
{
/* reset vlans */
- priv->enable_jumbo = false;
-
memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
@@ -1289,7 +1299,9 @@ EXPORT_SYMBOL(b53_phylink_mac_link_down);
void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev)
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct b53_device *dev = ds->priv;
@@ -1343,6 +1355,14 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
return -EOPNOTSUPP;
+ /* Port 7 on 7278 connects to the ASP's UniMAC which is not capable of
+ * receiving VLAN tagged frames at all, we can still allow the port to
+ * be configured for egress untagged.
+ */
+ if (dev->chip_id == BCM7278_DEVICE_ID && port == 7 &&
+ !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
+ return -EINVAL;
+
if (vlan->vid_end > dev->num_vlans)
return -ERANGE;
@@ -1708,6 +1728,12 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
u16 pvlan, reg;
unsigned int i;
+ /* On 7278, port 7 which connects to the ASP should only receive
+ * traffic from matching CFP rules.
+ */
+ if (dev->chip_id == BCM7278_DEVICE_ID && port == 7)
+ return -EINVAL;
+
/* Make this port leave the all VLANs join since we will have proper
* VLAN entries from now on
*/
@@ -2063,6 +2089,26 @@ int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
}
EXPORT_SYMBOL(b53_set_mac_eee);
+static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
+{
+ struct b53_device *dev = ds->priv;
+ bool enable_jumbo;
+ bool allow_10_100;
+
+ if (is5325(dev) || is5365(dev))
+ return -EOPNOTSUPP;
+
+ enable_jumbo = (mtu >= JMS_MIN_SIZE);
+ allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID);
+
+ return b53_set_jumbo(dev, enable_jumbo, allow_10_100);
+}
+
+static int b53_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ return JMS_MAX_SIZE;
+}
+
static const struct dsa_switch_ops b53_switch_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = b53_setup,
@@ -2100,6 +2146,8 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_mdb_prepare = b53_mdb_prepare,
.port_mdb_add = b53_mdb_add,
.port_mdb_del = b53_mdb_del,
+ .port_max_mtu = b53_get_max_mtu,
+ .port_change_mtu = b53_change_mtu,
};
struct b53_chip_data {
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 3c30f3a7eb29..3d42318bc3f1 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -338,7 +338,9 @@ void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev);
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
int b53_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index b0f5280a83cb..affa5c6e135c 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -178,9 +178,17 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
core_writel(priv, reg, CORE_DIS_LEARN);
/* Enable Broadcom tags for that port if requested */
- if (priv->brcm_tag_mask & BIT(port))
+ if (priv->brcm_tag_mask & BIT(port)) {
b53_brcm_hdr_setup(ds, port);
+ /* Disable learning on ASP port */
+ if (port == 7) {
+ reg = core_readl(priv, CORE_DIS_LEARN);
+ reg |= BIT(port);
+ core_writel(priv, reg, CORE_DIS_LEARN);
+ }
+ }
+
/* Configure Traffic Class to QoS mapping, allow each priority to map
* to a different queue number
*/
@@ -648,7 +656,9 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev)
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_eee *p = &priv->dev->ports[port].eee;
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 1962c8330daa..f707edc641cf 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -13,6 +13,8 @@
#include <net/dsa.h>
#include <linux/bitmap.h>
#include <net/flow_offload.h>
+#include <net/switchdev.h>
+#include <uapi/linux/if_bridge.h>
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
@@ -261,16 +263,27 @@ static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
struct flow_dissector_key_ipv4_addrs *addrs,
struct flow_dissector_key_ports *ports,
- unsigned int slice_num,
+ const __be16 vlan_tci,
+ unsigned int slice_num, u8 num_udf,
bool mask)
{
u32 reg, offset;
+ /* UDF_Valid[7:0] [31:24]
+ * S-Tag [23:8]
+ * C-Tag [7:0]
+ */
+ reg = udf_lower_bits(num_udf) << 24 | be16_to_cpu(vlan_tci) >> 8;
+ if (mask)
+ core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
+ else
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
+
/* C-Tag [31:24]
* UDF_n_A8 [23:8]
* UDF_n_A7 [7:0]
*/
- reg = 0;
+ reg = (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
if (mask)
offset = CORE_CFP_MASK_PORT(4);
else
@@ -336,6 +349,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
struct ethtool_rx_flow_spec *fs)
{
struct ethtool_rx_flow_spec_input input = {};
+ __be16 vlan_tci = 0 , vlan_m_tci = 0xffff;
const struct cfp_udf_layout *layout;
unsigned int slice_num, rule_index;
struct ethtool_rx_flow_rule *flow;
@@ -360,6 +374,12 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
+ /* Extract VLAN TCI */
+ if (fs->flow_type & FLOW_EXT) {
+ vlan_tci = fs->h_ext.vlan_tci;
+ vlan_m_tci = fs->m_ext.vlan_tci;
+ }
+
/* Locate the first rule available */
if (fs->location == RX_CLS_LOC_ANY)
rule_index = find_first_zero_bit(priv->cfp.used,
@@ -421,18 +441,11 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
core_writel(priv, layout->udfs[slice_num].mask_value |
udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6));
- /* UDF_Valid[7:0] [31:24]
- * S-Tag [23:8]
- * C-Tag [7:0]
- */
- core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5));
-
- /* Mask all but valid UDFs */
- core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
-
/* Program the match and the mask */
- bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, slice_num, false);
- bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, SLICE_NUM_MASK, true);
+ bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, vlan_tci,
+ slice_num, num_udf, false);
+ bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, vlan_m_tci,
+ SLICE_NUM_MASK, num_udf, true);
/* Insert into TCAM now */
bcm_sf2_cfp_rule_addr_set(priv, rule_index);
@@ -468,17 +481,29 @@ out_err_flow_rule:
static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
const __be32 *ip6_addr, const __be16 port,
- unsigned int slice_num,
+ const __be16 vlan_tci,
+ unsigned int slice_num, u32 udf_bits,
bool mask)
{
u32 reg, tmp, val, offset;
+ /* UDF_Valid[7:0] [31:24]
+ * S-Tag [23:8]
+ * C-Tag [7:0]
+ */
+ reg = udf_bits << 24 | be16_to_cpu(vlan_tci) >> 8;
+ if (mask)
+ core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
+ else
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
+
/* C-Tag [31:24]
* UDF_n_B8 [23:8] (port)
* UDF_n_B7 (upper) [7:0] (addr[15:8])
*/
reg = be32_to_cpu(ip6_addr[3]);
val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff);
+ val |= (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
if (mask)
offset = CORE_CFP_MASK_PORT(4);
else
@@ -587,6 +612,11 @@ static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
+ /* Compare VLAN TCI values as well */
+ if (rule->fs.flow_type & FLOW_EXT) {
+ ret |= rule->fs.h_ext.vlan_tci != fs->h_ext.vlan_tci;
+ ret |= rule->fs.m_ext.vlan_tci != fs->m_ext.vlan_tci;
+ }
if (ret == 0)
break;
}
@@ -600,6 +630,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
struct ethtool_rx_flow_spec *fs)
{
struct ethtool_rx_flow_spec_input input = {};
+ __be16 vlan_tci = 0, vlan_m_tci = 0xffff;
unsigned int slice_num, rule_index[2];
const struct cfp_udf_layout *layout;
struct ethtool_rx_flow_rule *flow;
@@ -623,6 +654,12 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
+ /* Extract VLAN TCI */
+ if (fs->flow_type & FLOW_EXT) {
+ vlan_tci = fs->h_ext.vlan_tci;
+ vlan_m_tci = fs->m_ext.vlan_tci;
+ }
+
layout = &udf_tcpip6_layout;
slice_num = bcm_sf2_get_slice_number(layout, 0);
if (slice_num == UDF_NUM_SLICES)
@@ -704,20 +741,13 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf);
core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
- /* UDF_Valid[7:0] [31:24]
- * S-Tag [23:8]
- * C-Tag [7:0]
- */
- core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5));
-
- /* Mask all but valid UDFs */
- core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
-
/* Slice the IPv6 source address and port */
bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
- ports.key->src, slice_num, false);
+ ports.key->src, vlan_tci, slice_num,
+ udf_lower_bits(num_udf), false);
bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
- ports.mask->src, SLICE_NUM_MASK, true);
+ ports.mask->src, vlan_m_tci, SLICE_NUM_MASK,
+ udf_lower_bits(num_udf), true);
/* Insert into TCAM now because we need to insert a second rule */
bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
@@ -768,16 +798,12 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
udf_lower_bits(num_udf) << 8;
core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
- /* Don't care */
- core_writel(priv, 0, CORE_CFP_DATA_PORT(5));
-
- /* Mask all */
- core_writel(priv, 0, CORE_CFP_MASK_PORT(5));
-
bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
- ports.key->dst, slice_num, false);
+ ports.key->dst, 0, slice_num,
+ 0, false);
bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
- ports.key->dst, SLICE_NUM_MASK, true);
+ ports.key->dst, 0, SLICE_NUM_MASK,
+ 0, true);
/* Insert into TCAM now */
bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
@@ -823,7 +849,9 @@ static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
__u64 ring_cookie = fs->ring_cookie;
+ struct switchdev_obj_port_vlan vlan;
unsigned int queue_num, port_num;
+ u16 vid;
int ret;
/* This rule is a Wake-on-LAN filter and we must specifically
@@ -843,6 +871,34 @@ static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
dsa_is_cpu_port(ds, port_num)) ||
port_num >= priv->hw_params.num_ports)
return -EINVAL;
+
+ /* If the rule is matching a particular VLAN, make sure that we honor
+ * the matching and have it tagged or untagged on the destination port,
+ * we do this on egress with a VLAN entry. The egress tagging attribute
+ * is expected to be provided in h_ext.data[1] bit 0. A 1 means untagged,
+ * a 0 means tagged.
+ */
+ if (fs->flow_type & FLOW_EXT) {
+ /* We cannot support matching multiple VLAN IDs yet */
+ if ((be16_to_cpu(fs->m_ext.vlan_tci) & VLAN_VID_MASK) !=
+ VLAN_VID_MASK)
+ return -EINVAL;
+
+ vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK;
+ vlan.vid_begin = vid;
+ vlan.vid_end = vid;
+ if (cpu_to_be32(fs->h_ext.data[1]) & 1)
+ vlan.flags = BRIDGE_VLAN_INFO_UNTAGGED;
+ else
+ vlan.flags = 0;
+
+ ret = ds->ops->port_vlan_prepare(ds, port_num, &vlan);
+ if (ret)
+ return ret;
+
+ ds->ops->port_vlan_add(ds, port_num, &vlan);
+ }
+
/*
* We have a small oddity where Port 6 just does not have a
* valid bit here (so we substract by one).
@@ -878,21 +934,22 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
int ret = -EINVAL;
/* Check for unsupported extensions */
- if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
- fs->m_ext.data[1]))
+ if (fs->flow_type & FLOW_MAC_EXT)
return -EINVAL;
- if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
+ if (fs->location != RX_CLS_LOC_ANY &&
+ fs->location > bcm_sf2_cfp_rule_size(priv))
return -EINVAL;
+ if ((fs->flow_type & FLOW_EXT) &&
+ !(ds->ops->port_vlan_prepare || ds->ops->port_vlan_add ||
+ ds->ops->port_vlan_del))
+ return -EOPNOTSUPP;
+
if (fs->location != RX_CLS_LOC_ANY &&
test_bit(fs->location, priv->cfp.used))
return -EBUSY;
- if (fs->location != RX_CLS_LOC_ANY &&
- fs->location > bcm_sf2_cfp_rule_size(priv))
- return -EINVAL;
-
ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
if (ret == 0)
return -EEXIST;
@@ -973,7 +1030,7 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
struct cfp_rule *rule;
int ret;
- if (loc >= CFP_NUM_RULES)
+ if (loc > bcm_sf2_cfp_rule_size(priv))
return -EINVAL;
/* Refuse deleting unused rules, and those that are not unique since
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 0369c22fe3e1..cf6fa8fede33 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1517,7 +1517,9 @@ static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev)
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct gswip_priv *priv = ds->priv;
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index 1d7870c6df3c..4ec6a47b7f72 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_DSA_MICROCHIP_KSZ_COMMON
+ select NET_DSA_TAG_KSZ
tristate
menuconfig NET_DSA_MICROCHIP_KSZ9477
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index d8fda4a02640..fd1d6676ae4f 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -67,7 +67,7 @@ static void port_r_cnt(struct ksz_device *dev, int port)
static void ksz_mib_read_work(struct work_struct *work)
{
struct ksz_device *dev = container_of(work, struct ksz_device,
- mib_read);
+ mib_read.work);
struct ksz_port_mib *mib;
struct ksz_port *p;
int i;
@@ -93,32 +93,24 @@ static void ksz_mib_read_work(struct work_struct *work)
p->read = false;
mutex_unlock(&mib->cnt_mutex);
}
-}
-
-static void mib_monitor(struct timer_list *t)
-{
- struct ksz_device *dev = from_timer(dev, t, mib_read_timer);
- mod_timer(&dev->mib_read_timer, jiffies + dev->mib_read_interval);
- schedule_work(&dev->mib_read);
+ schedule_delayed_work(&dev->mib_read, dev->mib_read_interval);
}
void ksz_init_mib_timer(struct ksz_device *dev)
{
int i;
+ INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
+
/* Read MIB counters every 30 seconds to avoid overflow. */
dev->mib_read_interval = msecs_to_jiffies(30000);
- INIT_WORK(&dev->mib_read, ksz_mib_read_work);
- timer_setup(&dev->mib_read_timer, mib_monitor, 0);
-
for (i = 0; i < dev->mib_port_cnt; i++)
dev->dev_ops->port_init_cnt(dev, i);
/* Start the timer 2 seconds later. */
- dev->mib_read_timer.expires = jiffies + msecs_to_jiffies(2000);
- add_timer(&dev->mib_read_timer);
+ schedule_delayed_work(&dev->mib_read, msecs_to_jiffies(2000));
}
EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
@@ -152,7 +144,7 @@ void ksz_adjust_link(struct dsa_switch *ds, int port,
/* Read all MIB counters when the link is going down. */
if (!phydev->link) {
p->read = true;
- schedule_work(&dev->mib_read);
+ schedule_delayed_work(&dev->mib_read, 0);
}
mutex_lock(&dev->dev_mutex);
if (!phydev->link)
@@ -477,10 +469,8 @@ EXPORT_SYMBOL(ksz_switch_register);
void ksz_switch_remove(struct ksz_device *dev)
{
/* timer started */
- if (dev->mib_read_timer.expires) {
- del_timer_sync(&dev->mib_read_timer);
- flush_work(&dev->mib_read);
- }
+ if (dev->mib_read_interval)
+ cancel_delayed_work_sync(&dev->mib_read);
dev->dev_ops->exit(dev);
dsa_unregister_switch(dev->ds);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index a20ebb749377..f2c9bb68fd33 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -80,8 +80,7 @@ struct ksz_device {
struct vlan_table *vlan_cache;
struct ksz_port *ports;
- struct timer_list mib_read_timer;
- struct work_struct mib_read;
+ struct delayed_work mib_read;
unsigned long mib_read_interval;
u16 br_member;
u16 member;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 7cbd1bd4c5a6..ef57552db260 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -563,17 +563,6 @@ mt7530_mib_reset(struct dsa_switch *ds)
mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE);
}
-static void
-mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable)
-{
- u32 mask = PMCR_TX_EN | PMCR_RX_EN | PMCR_FORCE_LNK;
-
- if (enable)
- mt7530_set(priv, MT7530_PMCR_P(port), mask);
- else
- mt7530_clear(priv, MT7530_PMCR_P(port), mask);
-}
-
static int mt7530_phy_read(struct dsa_switch *ds, int port, int regnum)
{
struct mt7530_priv *priv = ds->priv;
@@ -750,7 +739,7 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
priv->ports[port].enable = true;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
priv->ports[port].pm);
- mt7530_port_set_status(priv, port, 0);
+ mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
mutex_unlock(&priv->reg_mutex);
@@ -773,7 +762,7 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
priv->ports[port].enable = false;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
- mt7530_port_set_status(priv, port, 0);
+ mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
mutex_unlock(&priv->reg_mutex);
}
@@ -1222,6 +1211,64 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
+static int mt7530_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 val;
+
+ /* Check for existent entry */
+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
+ return -EEXIST;
+
+ val = mt7530_read(priv, MT7530_MFC);
+
+ /* MT7530 only supports one monitor port */
+ if (val & MIRROR_EN && MIRROR_PORT(val) != mirror->to_local_port)
+ return -EEXIST;
+
+ val |= MIRROR_EN;
+ val &= ~MIRROR_MASK;
+ val |= mirror->to_local_port;
+ mt7530_write(priv, MT7530_MFC, val);
+
+ val = mt7530_read(priv, MT7530_PCR_P(port));
+ if (ingress) {
+ val |= PORT_RX_MIR;
+ priv->mirror_rx |= BIT(port);
+ } else {
+ val |= PORT_TX_MIR;
+ priv->mirror_tx |= BIT(port);
+ }
+ mt7530_write(priv, MT7530_PCR_P(port), val);
+
+ return 0;
+}
+
+static void mt7530_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 val;
+
+ val = mt7530_read(priv, MT7530_PCR_P(port));
+ if (mirror->ingress) {
+ val &= ~PORT_RX_MIR;
+ priv->mirror_rx &= ~BIT(port);
+ } else {
+ val &= ~PORT_TX_MIR;
+ priv->mirror_tx &= ~BIT(port);
+ }
+ mt7530_write(priv, MT7530_PCR_P(port), val);
+
+ if (!priv->mirror_rx && !priv->mirror_tx) {
+ val = mt7530_read(priv, MT7530_MFC);
+ val &= ~MIRROR_EN;
+ mt7530_write(priv, MT7530_MFC, val);
+ }
+}
+
static enum dsa_tag_protocol
mtk_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
@@ -1441,8 +1488,7 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
mcr_new = mcr_cur;
- mcr_new &= ~(PMCR_FORCE_SPEED_1000 | PMCR_FORCE_SPEED_100 |
- PMCR_FORCE_FDX | PMCR_TX_FC_EN | PMCR_RX_FC_EN);
+ mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
PMCR_BACKPR_EN | PMCR_FORCE_MODE;
@@ -1450,22 +1496,6 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
if (port == 5 && dsa_is_user_port(ds, 5))
mcr_new |= PMCR_EXT_PHY;
- switch (state->speed) {
- case SPEED_1000:
- mcr_new |= PMCR_FORCE_SPEED_1000;
- break;
- case SPEED_100:
- mcr_new |= PMCR_FORCE_SPEED_100;
- break;
- }
- if (state->duplex == DUPLEX_FULL) {
- mcr_new |= PMCR_FORCE_FDX;
- if (state->pause & MLO_PAUSE_TX)
- mcr_new |= PMCR_TX_FC_EN;
- if (state->pause & MLO_PAUSE_RX)
- mcr_new |= PMCR_RX_FC_EN;
- }
-
if (mcr_new != mcr_cur)
mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
}
@@ -1476,17 +1506,38 @@ static void mt7530_phylink_mac_link_down(struct dsa_switch *ds, int port,
{
struct mt7530_priv *priv = ds->priv;
- mt7530_port_set_status(priv, port, 0);
+ mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
}
static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev)
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct mt7530_priv *priv = ds->priv;
+ u32 mcr;
+
+ mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
+
+ switch (speed) {
+ case SPEED_1000:
+ mcr |= PMCR_FORCE_SPEED_1000;
+ break;
+ case SPEED_100:
+ mcr |= PMCR_FORCE_SPEED_100;
+ break;
+ }
+ if (duplex == DUPLEX_FULL) {
+ mcr |= PMCR_FORCE_FDX;
+ if (tx_pause)
+ mcr |= PMCR_TX_FC_EN;
+ if (rx_pause)
+ mcr |= PMCR_RX_FC_EN;
+ }
- mt7530_port_set_status(priv, port, 1);
+ mt7530_set(priv, MT7530_PMCR_P(port), mcr);
}
static void mt7530_phylink_validate(struct dsa_switch *ds, int port,
@@ -1611,6 +1662,8 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_vlan_prepare = mt7530_port_vlan_prepare,
.port_vlan_add = mt7530_port_vlan_add,
.port_vlan_del = mt7530_port_vlan_del,
+ .port_mirror_add = mt7530_port_mirror_add,
+ .port_mirror_del = mt7530_port_mirror_del,
.phylink_validate = mt7530_phylink_validate,
.phylink_mac_link_state = mt7530_phylink_mac_link_state,
.phylink_mac_config = mt7530_phylink_mac_config,
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index ccb9da8cad0d..ef9b52f3152b 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -36,6 +36,9 @@ enum {
#define CPU_EN BIT(7)
#define CPU_PORT(x) ((x) << 4)
#define CPU_MASK (0xf << 4)
+#define MIRROR_EN BIT(3)
+#define MIRROR_PORT(x) ((x) & 0x7)
+#define MIRROR_MASK 0x7
/* Registers for address table access */
#define MT7530_ATA1 0x74
@@ -141,6 +144,8 @@ enum mt7530_stp_state {
/* Register for port control */
#define MT7530_PCR_P(x) (0x2004 + ((x) * 0x100))
+#define PORT_TX_MIR BIT(9)
+#define PORT_RX_MIR BIT(8)
#define PORT_VLAN(x) ((x) & 0x3)
enum mt7530_port_mode {
@@ -201,6 +206,10 @@ enum mt7530_vlan_port_attr {
#define PMCR_FORCE_LNK BIT(0)
#define PMCR_SPEED_MASK (PMCR_FORCE_SPEED_100 | \
PMCR_FORCE_SPEED_1000)
+#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
+ PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
+ PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
+ PMCR_FORCE_FDX | PMCR_FORCE_LNK)
#define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100)
#define PMSR_EEE1G BIT(7)
@@ -460,6 +469,8 @@ struct mt7530_priv {
phy_interface_t p6_interface;
phy_interface_t p5_interface;
unsigned int p5_intf_sel;
+ u8 mirror_rx;
+ u8 mirror_tx;
struct mt7530_port ports[MT7530_NUM_PORTS];
/* protect among processes for registers access*/
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 2f993e673ec7..221593261e8f 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -397,32 +397,35 @@ static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
mv88e6xxx_reg_unlock(chip);
}
-int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
- int speed, int duplex, int pause,
- phy_interface_t mode)
+static int mv88e6xxx_port_config_interface(struct mv88e6xxx_chip *chip,
+ int port, phy_interface_t interface)
{
- struct phylink_link_state state;
int err;
- if (!chip->info->ops->port_set_link)
- return 0;
+ if (chip->info->ops->port_set_rgmii_delay) {
+ err = chip->info->ops->port_set_rgmii_delay(chip, port,
+ interface);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
- if (!chip->info->ops->port_link_state)
- return 0;
+ if (chip->info->ops->port_set_cmode) {
+ err = chip->info->ops->port_set_cmode(chip, port,
+ interface);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
- err = chip->info->ops->port_link_state(chip, port, &state);
- if (err)
- return err;
+ return 0;
+}
- /* Has anything actually changed? We don't expect the
- * interface mode to change without one of the other
- * parameters also changing
- */
- if (state.link == link &&
- state.speed == speed &&
- state.duplex == duplex &&
- (state.interface == mode ||
- state.interface == PHY_INTERFACE_MODE_NA))
+static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
+ int link, int speed, int duplex, int pause,
+ phy_interface_t mode)
+{
+ int err;
+
+ if (!chip->info->ops->port_set_link)
return 0;
/* Port's MAC control must not be changed unless the link is down */
@@ -430,8 +433,9 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
if (err)
return err;
- if (chip->info->ops->port_set_speed) {
- err = chip->info->ops->port_set_speed(chip, port, speed);
+ if (chip->info->ops->port_set_speed_duplex) {
+ err = chip->info->ops->port_set_speed_duplex(chip, port,
+ speed, duplex);
if (err && err != -EOPNOTSUPP)
goto restore_link;
}
@@ -445,25 +449,7 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
goto restore_link;
}
- if (chip->info->ops->port_set_duplex) {
- err = chip->info->ops->port_set_duplex(chip, port, duplex);
- if (err && err != -EOPNOTSUPP)
- goto restore_link;
- }
-
- if (chip->info->ops->port_set_rgmii_delay) {
- err = chip->info->ops->port_set_rgmii_delay(chip, port, mode);
- if (err && err != -EOPNOTSUPP)
- goto restore_link;
- }
-
- if (chip->info->ops->port_set_cmode) {
- err = chip->info->ops->port_set_cmode(chip, port, mode);
- if (err && err != -EOPNOTSUPP)
- goto restore_link;
- }
-
- err = 0;
+ err = mv88e6xxx_port_config_interface(chip, port, mode);
restore_link:
if (chip->info->ops->port_set_link(chip, port, link))
dev_err(chip->dev, "p%d: failed to restore MAC's link\n", port);
@@ -478,6 +464,97 @@ static int mv88e6xxx_phy_is_internal(struct dsa_switch *ds, int port)
return port < chip->info->num_internal_phys;
}
+static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err) {
+ dev_err(chip->dev,
+ "p%d: %s: failed to read port status\n",
+ port, __func__);
+ return err;
+ }
+
+ return !!(reg & MV88E6XXX_PORT_STS_PHY_DETECT);
+}
+
+static int mv88e6xxx_serdes_pcs_get_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ u8 lane;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane && chip->info->ops->serdes_pcs_get_state)
+ err = chip->info->ops->serdes_pcs_get_state(chip, port, lane,
+ state);
+ else
+ err = -EOPNOTSUPP;
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise)
+{
+ const struct mv88e6xxx_ops *ops = chip->info->ops;
+ u8 lane;
+
+ if (ops->serdes_pcs_config) {
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane)
+ return ops->serdes_pcs_config(chip, port, lane, mode,
+ interface, advertise);
+ }
+
+ return 0;
+}
+
+static void mv88e6xxx_serdes_pcs_an_restart(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ const struct mv88e6xxx_ops *ops;
+ int err = 0;
+ u8 lane;
+
+ ops = chip->info->ops;
+
+ if (ops->serdes_pcs_an_restart) {
+ mv88e6xxx_reg_lock(chip);
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane)
+ err = ops->serdes_pcs_an_restart(chip, port, lane);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ dev_err(ds->dev, "p%d: failed to restart AN\n", port);
+ }
+}
+
+static int mv88e6xxx_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
+ unsigned int mode,
+ int speed, int duplex)
+{
+ const struct mv88e6xxx_ops *ops = chip->info->ops;
+ u8 lane;
+
+ if (!phylink_autoneg_inband(mode) && ops->serdes_pcs_link_up) {
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane)
+ return ops->serdes_pcs_link_up(chip, port, lane,
+ speed, duplex);
+ }
+
+ return 0;
+}
+
static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port,
unsigned long *mask,
struct phylink_link_state *state)
@@ -582,83 +659,106 @@ static void mv88e6xxx_validate(struct dsa_switch *ds, int port,
phylink_helper_basex_speed(state);
}
-static int mv88e6xxx_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- mv88e6xxx_reg_lock(chip);
- if (chip->info->ops->port_link_state)
- err = chip->info->ops->port_link_state(chip, port, state);
- else
- err = -EOPNOTSUPP;
- mv88e6xxx_reg_unlock(chip);
-
- return err;
-}
-
static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int speed, duplex, link, pause, err;
+ int err;
+ /* FIXME: is this the correct test? If we're in fixed mode on an
+ * internal port, why should we process this any different from
+ * PHY mode? On the other hand, the port may be automedia between
+ * an internal PHY and the serdes...
+ */
if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port))
return;
- if (mode == MLO_AN_FIXED) {
- link = LINK_FORCED_UP;
- speed = state->speed;
- duplex = state->duplex;
- } else if (!mv88e6xxx_phy_is_internal(ds, port)) {
- link = state->link;
- speed = state->speed;
- duplex = state->duplex;
- } else {
- speed = SPEED_UNFORCED;
- duplex = DUPLEX_UNFORCED;
- link = LINK_UNFORCED;
- }
- pause = !!phylink_test(state->advertising, Pause);
-
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex, pause,
- state->interface);
+ /* FIXME: should we force the link down here - but if we do, how
+ * do we restore the link force/unforce state? The driver layering
+ * gets in the way.
+ */
+ err = mv88e6xxx_port_config_interface(chip, port, state->interface);
+ if (err && err != -EOPNOTSUPP)
+ goto err_unlock;
+
+ err = mv88e6xxx_serdes_pcs_config(chip, port, mode, state->interface,
+ state->advertising);
+ /* FIXME: we should restart negotiation if something changed - which
+ * is something we get if we convert to using phylinks PCS operations.
+ */
+ if (err > 0)
+ err = 0;
+
+err_unlock:
mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP)
- dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
+ dev_err(ds->dev, "p%d: failed to configure MAC/PCS\n", port);
}
-static void mv88e6xxx_mac_link_force(struct dsa_switch *ds, int port, int link)
+static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int err;
+ const struct mv88e6xxx_ops *ops;
+ int err = 0;
+
+ ops = chip->info->ops;
mv88e6xxx_reg_lock(chip);
- err = chip->info->ops->port_set_link(chip, port, link);
+ if (!mv88e6xxx_port_ppu_updates(chip, port) && ops->port_set_link)
+ err = ops->port_set_link(chip, port, LINK_FORCED_DOWN);
mv88e6xxx_reg_unlock(chip);
if (err)
- dev_err(chip->dev, "p%d: failed to force MAC link\n", port);
-}
-
-static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface)
-{
- if (mode == MLO_AN_FIXED)
- mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_DOWN);
+ dev_err(chip->dev,
+ "p%d: failed to force MAC link down\n", port);
}
static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode, phy_interface_t interface,
- struct phy_device *phydev)
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
- if (mode == MLO_AN_FIXED)
- mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_UP);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ const struct mv88e6xxx_ops *ops;
+ int err = 0;
+
+ ops = chip->info->ops;
+
+ mv88e6xxx_reg_lock(chip);
+ if (!mv88e6xxx_port_ppu_updates(chip, port)) {
+ /* FIXME: for an automedia port, should we force the link
+ * down here - what if the link comes up due to "other" media
+ * while we're bringing the port up, how is the exclusivity
+ * handled in the Marvell hardware? E.g. port 2 on 88E6390
+ * shared between internal PHY and Serdes.
+ */
+ err = mv88e6xxx_serdes_pcs_link_up(chip, port, mode, speed,
+ duplex);
+ if (err)
+ goto error;
+
+ if (ops->port_set_speed_duplex) {
+ err = ops->port_set_speed_duplex(chip, port,
+ speed, duplex);
+ if (err && err != -EOPNOTSUPP)
+ goto error;
+ }
+
+ if (ops->port_set_link)
+ err = ops->port_set_link(chip, port, LINK_FORCED_UP);
+ }
+error:
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err && err != -EOPNOTSUPP)
+ dev_err(ds->dev,
+ "p%d: failed to configure MAC link up\n", port);
}
static int mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
@@ -1018,7 +1118,14 @@ static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
{
- return 32 * sizeof(u16);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int len;
+
+ len = 32 * sizeof(u16);
+ if (chip->info->ops->serdes_get_regs_len)
+ len += chip->info->ops->serdes_get_regs_len(chip, port);
+
+ return len;
}
static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
@@ -1043,6 +1150,9 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
p[i] = reg;
}
+ if (chip->info->ops->serdes_get_regs)
+ chip->info->ops->serdes_get_regs(chip, port, &p[i]);
+
mv88e6xxx_reg_unlock(chip);
}
@@ -1785,7 +1895,7 @@ static int mv88e6xxx_broadcast_setup(struct mv88e6xxx_chip *chip, u16 vid)
}
static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
- u16 vid, u8 member)
+ u16 vid, u8 member, bool warn)
{
const u8 non_member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER;
struct mv88e6xxx_vtu_entry vlan;
@@ -1830,7 +1940,7 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
if (err)
return err;
- } else {
+ } else if (warn) {
dev_info(chip->dev, "p%d: already a member of VLAN %d\n",
port, vid);
}
@@ -1844,6 +1954,7 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ bool warn;
u8 member;
u16 vid;
@@ -1857,10 +1968,15 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
else
member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_TAGGED;
+ /* net/dsa/slave.c will call dsa_port_vlan_add() for the affected port
+ * and then the CPU port. Do not warn for duplicates for the CPU port.
+ */
+ warn = !dsa_is_cpu_port(ds, port) && !dsa_is_dsa_port(ds, port);
+
mv88e6xxx_reg_lock(chip);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- if (mv88e6xxx_port_vlan_join(chip, port, vid, member))
+ if (mv88e6xxx_port_vlan_join(chip, port, vid, member, warn))
dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port,
vid, untagged ? 'u' : 't');
@@ -3272,8 +3388,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3282,7 +3397,6 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3312,12 +3426,10 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
.port_set_egress_floods = mv88e6185_port_set_egress_floods,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
- .port_link_state = mv88e6185_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3343,8 +3455,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3354,7 +3465,6 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3383,13 +3493,11 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -3418,8 +3526,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6185_port_set_egress_floods,
@@ -3429,7 +3536,6 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_set_pause = mv88e6185_port_set_pause,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3461,9 +3567,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6341_port_set_speed,
+ .port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -3474,7 +3579,6 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6341_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -3493,6 +3597,11 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6341_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
@@ -3509,8 +3618,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3520,7 +3628,6 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3552,11 +3659,9 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.phy_read = mv88e6165_phy_read,
.phy_write = mv88e6165_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3588,9 +3693,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3600,7 +3704,6 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -3632,9 +3735,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6352_port_set_speed,
+ .port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -3645,7 +3747,6 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -3665,7 +3766,13 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6352_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
.serdes_power = mv88e6352_serdes_power,
+ .serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6352_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6352_phylink_validate,
};
@@ -3679,9 +3786,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3691,7 +3797,6 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -3723,9 +3828,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6352_port_set_speed,
+ .port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -3736,7 +3840,6 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -3756,10 +3859,16 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6352_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
.serdes_power = mv88e6352_serdes_power,
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
.serdes_irq_enable = mv88e6352_serdes_irq_enable,
.serdes_irq_status = mv88e6352_serdes_irq_status,
+ .serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6352_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6352_phylink_validate,
};
@@ -3772,14 +3881,12 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
.port_set_egress_floods = mv88e6185_port_set_egress_floods,
.port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.port_set_pause = mv88e6185_port_set_pause,
- .port_link_state = mv88e6185_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
@@ -3810,9 +3917,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390_port_set_speed,
+ .port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
@@ -3822,7 +3928,6 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -3844,11 +3949,18 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_validate = mv88e6390_phylink_validate,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6390_phylink_validate,
@@ -3864,9 +3976,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390x_port_set_speed,
+ .port_set_speed_duplex = mv88e6390x_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390x_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
@@ -3876,7 +3987,6 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390x_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -3898,11 +4008,18 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_validate = mv88e6390_phylink_validate,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6390x_phylink_validate,
@@ -3918,9 +4035,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390_port_set_speed,
+ .port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -3929,7 +4045,6 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -3951,11 +4066,18 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_validate = mv88e6390_phylink_validate,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -3973,9 +4095,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6352_port_set_speed,
+ .port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -3986,7 +4107,6 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -4006,10 +4126,16 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6352_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
.serdes_power = mv88e6352_serdes_power,
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
.serdes_irq_enable = mv88e6352_serdes_irq_enable,
.serdes_irq_status = mv88e6352_serdes_irq_status,
+ .serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6352_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -4027,9 +4153,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6250_port_set_speed,
+ .port_set_speed_duplex = mv88e6250_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -4037,7 +4162,6 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6250_port_link_state,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6250_stats_get_sset_count,
@@ -4066,9 +4190,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390_port_set_speed,
+ .port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
@@ -4078,7 +4201,6 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -4100,11 +4222,18 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_validate = mv88e6390_phylink_validate,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
@@ -4123,8 +4252,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -4134,7 +4262,6 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -4167,8 +4294,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -4178,7 +4304,6 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -4209,9 +4334,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6341_port_set_speed,
+ .port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -4222,7 +4346,6 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6341_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -4241,6 +4364,11 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6341_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
@@ -4259,9 +4387,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -4271,7 +4398,6 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -4301,9 +4427,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6185_port_set_speed,
+ .port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -4313,7 +4438,6 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -4347,9 +4471,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
- .port_set_speed = mv88e6352_port_set_speed,
+ .port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -4360,7 +4483,6 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
@@ -4380,6 +4502,10 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6352_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6352_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6352_serdes_pcs_link_up,
.serdes_power = mv88e6352_serdes_power,
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
.serdes_irq_enable = mv88e6352_serdes_irq_enable,
@@ -4390,6 +4516,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.serdes_get_sset_count = mv88e6352_serdes_get_sset_count,
.serdes_get_strings = mv88e6352_serdes_get_strings,
.serdes_get_stats = mv88e6352_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6352_serdes_get_regs,
.phylink_validate = mv88e6352_phylink_validate,
};
@@ -4403,9 +4531,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390_port_set_speed,
+ .port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
@@ -4417,7 +4544,6 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -4439,6 +4565,11 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
+ /* Check status register pause & lpa register */
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
@@ -4448,6 +4579,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_validate = mv88e6390_phylink_validate,
};
@@ -4461,9 +4594,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
- .port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
- .port_set_speed = mv88e6390x_port_set_speed,
+ .port_set_speed_duplex = mv88e6390x_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390x_port_max_speed_mode,
.port_tag_remap = mv88e6390_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
@@ -4475,7 +4607,6 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_pause_limit = mv88e6390_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
- .port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390x_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
@@ -4497,12 +4628,18 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
+ .serdes_pcs_config = mv88e6390_serdes_pcs_config,
+ .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart,
+ .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up,
.serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -5373,8 +5510,9 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.setup = mv88e6xxx_setup,
.teardown = mv88e6xxx_teardown,
.phylink_validate = mv88e6xxx_validate,
- .phylink_mac_link_state = mv88e6xxx_link_state,
+ .phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state,
.phylink_mac_config = mv88e6xxx_mac_config,
+ .phylink_mac_an_restart = mv88e6xxx_serdes_pcs_an_restart,
.phylink_mac_link_down = mv88e6xxx_mac_link_down,
.phylink_mac_link_up = mv88e6xxx_mac_link_up,
.get_strings = mv88e6xxx_get_strings,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 79cad5e751c6..e5430cf2ad71 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -399,15 +399,6 @@ struct mv88e6xxx_ops {
*/
int (*port_set_link)(struct mv88e6xxx_chip *chip, int port, int link);
-#define DUPLEX_UNFORCED -2
-
- /* Port's MAC duplex mode
- *
- * Use DUPLEX_HALF or DUPLEX_FULL to force half or full duplex,
- * or DUPLEX_UNFORCED for normal duplex detection.
- */
- int (*port_set_duplex)(struct mv88e6xxx_chip *chip, int port, int dup);
-
#define PAUSE_ON 1
#define PAUSE_OFF 0
@@ -417,13 +408,18 @@ struct mv88e6xxx_ops {
#define SPEED_MAX INT_MAX
#define SPEED_UNFORCED -2
+#define DUPLEX_UNFORCED -2
- /* Port's MAC speed (in Mbps)
+ /* Port's MAC speed (in Mbps) and MAC duplex mode
*
* Depending on the chip, 10, 100, 200, 1000, 2500, 10000 are valid.
* Use SPEED_UNFORCED for normal detection, SPEED_MAX for max value.
+ *
+ * Use DUPLEX_HALF or DUPLEX_FULL to force half or full duplex,
+ * or DUPLEX_UNFORCED for normal duplex detection.
*/
- int (*port_set_speed)(struct mv88e6xxx_chip *chip, int port, int speed);
+ int (*port_set_speed_duplex)(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
/* What interface mode should be used for maximum speed? */
phy_interface_t (*port_max_speed_mode)(int port);
@@ -462,9 +458,6 @@ struct mv88e6xxx_ops {
*/
int (*port_set_upstream_port)(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
- /* Return the port link state, as required by phylink */
- int (*port_link_state)(struct mv88e6xxx_chip *chip, int port,
- struct phylink_link_state *state);
/* Snapshot the statistics for a port. The statistics can then
* be read back a leisure but still with a consistent view.
@@ -502,6 +495,17 @@ struct mv88e6xxx_ops {
/* SERDES lane mapping */
u8 (*serdes_get_lane)(struct mv88e6xxx_chip *chip, int port);
+ int (*serdes_pcs_get_state)(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, struct phylink_link_state *state);
+ int (*serdes_pcs_config)(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise);
+ int (*serdes_pcs_an_restart)(struct mv88e6xxx_chip *chip, int port,
+ u8 lane);
+ int (*serdes_pcs_link_up)(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, int speed, int duplex);
+
/* SERDES interrupt handling */
unsigned int (*serdes_irq_mapping)(struct mv88e6xxx_chip *chip,
int port);
@@ -517,6 +521,11 @@ struct mv88e6xxx_ops {
int (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
+ /* SERDES registers for ethtool */
+ int (*serdes_get_regs_len)(struct mv88e6xxx_chip *chip, int port);
+ void (*serdes_get_regs)(struct mv88e6xxx_chip *chip, int port,
+ void *_p);
+
/* Address Translation Unit operations */
int (*atu_get_hash)(struct mv88e6xxx_chip *chip, u8 *hash);
int (*atu_set_hash)(struct mv88e6xxx_chip *chip, u8 hash);
@@ -664,9 +673,6 @@ int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
u16 mask, u16 val);
int mv88e6xxx_wait_bit(struct mv88e6xxx_chip *chip, int addr, int reg,
int bit, int val);
-int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
- int speed, int duplex, int pause,
- phy_interface_t mode);
struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip);
static inline void mv88e6xxx_reg_lock(struct mv88e6xxx_chip *chip)
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 0b43c650e100..8128dc607cf4 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -162,46 +162,9 @@ int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link)
return 0;
}
-int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup)
-{
- u16 reg;
- int err;
-
- err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, &reg);
- if (err)
- return err;
-
- reg &= ~(MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
- MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL);
-
- switch (dup) {
- case DUPLEX_HALF:
- reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX;
- break;
- case DUPLEX_FULL:
- reg |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
- MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL;
- break;
- case DUPLEX_UNFORCED:
- /* normal duplex detection */
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
- if (err)
- return err;
-
- dev_dbg(chip->dev, "p%d: %s %s duplex\n", port,
- reg & MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX ? "Force" : "Unforce",
- reg & MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL ? "full" : "half");
-
- return 0;
-}
-
-static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port,
- int speed, bool alt_bit, bool force_bit)
+static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip,
+ int port, int speed, bool alt_bit,
+ bool force_bit, int duplex)
{
u16 reg, ctrl;
int err;
@@ -239,11 +202,29 @@ static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port,
return -EOPNOTSUPP;
}
+ switch (duplex) {
+ case DUPLEX_HALF:
+ ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX;
+ break;
+ case DUPLEX_FULL:
+ ctrl |= MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
+ MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL;
+ break;
+ case DUPLEX_UNFORCED:
+ /* normal duplex detection */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, &reg);
if (err)
return err;
- reg &= ~MV88E6XXX_PORT_MAC_CTL_SPEED_MASK;
+ reg &= ~(MV88E6XXX_PORT_MAC_CTL_SPEED_MASK |
+ MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX |
+ MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL);
+
if (alt_bit)
reg &= ~MV88E6390_PORT_MAC_CTL_ALTSPEED;
if (force_bit) {
@@ -261,12 +242,16 @@ static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port,
dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
else
dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
+ dev_dbg(chip->dev, "p%d: %s %s duplex\n", port,
+ reg & MV88E6XXX_PORT_MAC_CTL_FORCE_DUPLEX ? "Force" : "Unforce",
+ reg & MV88E6XXX_PORT_MAC_CTL_DUPLEX_FULL ? "full" : "half");
return 0;
}
/* Support 10, 100, 200 Mbps (e.g. 88E6065 family) */
-int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+int mv88e6065_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
{
if (speed == SPEED_MAX)
speed = 200;
@@ -275,11 +260,13 @@ int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
return -EOPNOTSUPP;
/* Setting 200 Mbps on port 0 to 3 selects 100 Mbps */
- return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, false, false,
+ duplex);
}
/* Support 10, 100, 1000 Mbps (e.g. 88E6185 family) */
-int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
{
if (speed == SPEED_MAX)
speed = 1000;
@@ -287,11 +274,13 @@ int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
if (speed == 200 || speed > 1000)
return -EOPNOTSUPP;
- return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, false, false,
+ duplex);
}
/* Support 10, 100 Mbps (e.g. 88E6250 family) */
-int mv88e6250_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
{
if (speed == SPEED_MAX)
speed = 100;
@@ -299,11 +288,13 @@ int mv88e6250_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
if (speed > 100)
return -EOPNOTSUPP;
- return mv88e6xxx_port_set_speed(chip, port, speed, false, false);
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, false, false,
+ duplex);
}
/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6341) */
-int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+int mv88e6341_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
{
if (speed == SPEED_MAX)
speed = port < 5 ? 1000 : 2500;
@@ -317,7 +308,8 @@ int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
if (speed == 2500 && port < 5)
return -EOPNOTSUPP;
- return mv88e6xxx_port_set_speed(chip, port, speed, !port, true);
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, !port, true,
+ duplex);
}
phy_interface_t mv88e6341_port_max_speed_mode(int port)
@@ -329,7 +321,8 @@ phy_interface_t mv88e6341_port_max_speed_mode(int port)
}
/* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */
-int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
{
if (speed == SPEED_MAX)
speed = 1000;
@@ -340,11 +333,13 @@ int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
if (speed == 200 && port < 5)
return -EOPNOTSUPP;
- return mv88e6xxx_port_set_speed(chip, port, speed, true, false);
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, true, false,
+ duplex);
}
/* Support 10, 100, 200, 1000, 2500 Mbps (e.g. 88E6390) */
-int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+int mv88e6390_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
{
if (speed == SPEED_MAX)
speed = port < 9 ? 1000 : 2500;
@@ -358,7 +353,8 @@ int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
if (speed == 2500 && port < 9)
return -EOPNOTSUPP;
- return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, true, true,
+ duplex);
}
phy_interface_t mv88e6390_port_max_speed_mode(int port)
@@ -370,7 +366,8 @@ phy_interface_t mv88e6390_port_max_speed_mode(int port)
}
/* Support 10, 100, 200, 1000, 2500, 10000 Mbps (e.g. 88E6190X) */
-int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
+int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex)
{
if (speed == SPEED_MAX)
speed = port < 9 ? 1000 : 10000;
@@ -381,7 +378,8 @@ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
if (speed >= 2500 && port < 9)
return -EOPNOTSUPP;
- return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
+ return mv88e6xxx_port_set_speed_duplex(chip, port, speed, true, true,
+ duplex);
}
phy_interface_t mv88e6390x_port_max_speed_mode(int port)
@@ -586,183 +584,6 @@ int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
return 0;
}
-int mv88e6250_port_link_state(struct mv88e6xxx_chip *chip, int port,
- struct phylink_link_state *state)
-{
- int err;
- u16 reg;
-
- err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
- if (err)
- return err;
-
- if (port < 5) {
- switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
- case MV88E6250_PORT_STS_PORTMODE_PHY_10_HALF:
- state->speed = SPEED_10;
- state->duplex = DUPLEX_HALF;
- break;
- case MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF:
- state->speed = SPEED_100;
- state->duplex = DUPLEX_HALF;
- break;
- case MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL:
- state->speed = SPEED_10;
- state->duplex = DUPLEX_FULL;
- break;
- case MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL:
- state->speed = SPEED_100;
- state->duplex = DUPLEX_FULL;
- break;
- default:
- state->speed = SPEED_UNKNOWN;
- state->duplex = DUPLEX_UNKNOWN;
- break;
- }
- } else {
- switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
- case MV88E6250_PORT_STS_PORTMODE_MII_10_HALF:
- state->speed = SPEED_10;
- state->duplex = DUPLEX_HALF;
- break;
- case MV88E6250_PORT_STS_PORTMODE_MII_100_HALF:
- state->speed = SPEED_100;
- state->duplex = DUPLEX_HALF;
- break;
- case MV88E6250_PORT_STS_PORTMODE_MII_10_FULL:
- state->speed = SPEED_10;
- state->duplex = DUPLEX_FULL;
- break;
- case MV88E6250_PORT_STS_PORTMODE_MII_100_FULL:
- state->speed = SPEED_100;
- state->duplex = DUPLEX_FULL;
- break;
- default:
- state->speed = SPEED_UNKNOWN;
- state->duplex = DUPLEX_UNKNOWN;
- break;
- }
- }
-
- state->link = !!(reg & MV88E6250_PORT_STS_LINK);
- state->an_enabled = 1;
- state->an_complete = state->link;
- state->interface = PHY_INTERFACE_MODE_NA;
-
- return 0;
-}
-
-int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
- struct phylink_link_state *state)
-{
- int err;
- u16 reg;
-
- switch (chip->ports[port].cmode) {
- case MV88E6XXX_PORT_STS_CMODE_RGMII:
- err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL,
- &reg);
- if (err)
- return err;
-
- if ((reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK) &&
- (reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK))
- state->interface = PHY_INTERFACE_MODE_RGMII_ID;
- else if (reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK)
- state->interface = PHY_INTERFACE_MODE_RGMII_RXID;
- else if (reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK)
- state->interface = PHY_INTERFACE_MODE_RGMII_TXID;
- else
- state->interface = PHY_INTERFACE_MODE_RGMII;
- break;
- case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
- state->interface = PHY_INTERFACE_MODE_1000BASEX;
- break;
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- state->interface = PHY_INTERFACE_MODE_SGMII;
- break;
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- state->interface = PHY_INTERFACE_MODE_2500BASEX;
- break;
- case MV88E6XXX_PORT_STS_CMODE_XAUI:
- state->interface = PHY_INTERFACE_MODE_XAUI;
- break;
- case MV88E6XXX_PORT_STS_CMODE_RXAUI:
- state->interface = PHY_INTERFACE_MODE_RXAUI;
- break;
- default:
- /* we do not support other cmode values here */
- state->interface = PHY_INTERFACE_MODE_NA;
- }
-
- err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
- if (err)
- return err;
-
- switch (reg & MV88E6XXX_PORT_STS_SPEED_MASK) {
- case MV88E6XXX_PORT_STS_SPEED_10:
- state->speed = SPEED_10;
- break;
- case MV88E6XXX_PORT_STS_SPEED_100:
- state->speed = SPEED_100;
- break;
- case MV88E6XXX_PORT_STS_SPEED_1000:
- state->speed = SPEED_1000;
- break;
- case MV88E6XXX_PORT_STS_SPEED_10000:
- if ((reg & MV88E6XXX_PORT_STS_CMODE_MASK) ==
- MV88E6XXX_PORT_STS_CMODE_2500BASEX)
- state->speed = SPEED_2500;
- else
- state->speed = SPEED_10000;
- break;
- }
-
- state->duplex = reg & MV88E6XXX_PORT_STS_DUPLEX ?
- DUPLEX_FULL : DUPLEX_HALF;
- state->link = !!(reg & MV88E6XXX_PORT_STS_LINK);
- state->an_enabled = 1;
- state->an_complete = state->link;
-
- return 0;
-}
-
-int mv88e6185_port_link_state(struct mv88e6xxx_chip *chip, int port,
- struct phylink_link_state *state)
-{
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
- u8 cmode = chip->ports[port].cmode;
-
- /* When a port is in "Cross-chip serdes" mode, it uses
- * 1000Base-X full duplex mode, but there is no automatic
- * link detection. Use the sync OK status for link (as it
- * would do for 1000Base-X mode.)
- */
- if (cmode == MV88E6185_PORT_STS_CMODE_SERDES) {
- u16 mac;
- int err;
-
- err = mv88e6xxx_port_read(chip, port,
- MV88E6XXX_PORT_MAC_CTL, &mac);
- if (err)
- return err;
-
- state->link = !!(mac & MV88E6185_PORT_MAC_CTL_SYNC_OK);
- state->an_enabled = 1;
- state->an_complete =
- !!(mac & MV88E6185_PORT_MAC_CTL_AN_DONE);
- state->duplex =
- state->link ? DUPLEX_FULL : DUPLEX_UNKNOWN;
- state->speed =
- state->link ? SPEED_1000 : SPEED_UNKNOWN;
-
- return 0;
- }
- }
-
- return mv88e6352_port_link_state(chip, port, state);
-}
-
/* Offset 0x02: Jamming Control
*
* Do not limit the period of time that this port can be paused for by
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 0ec4327c2b42..44d76ac973f6 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -298,15 +298,20 @@ int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link);
-int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup);
-
-int mv88e6065_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
-int mv88e6185_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
-int mv88e6250_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
-int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
-int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
-int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
-int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
+int mv88e6065_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6341_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6390_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
+int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
+ int speed, int duplex);
phy_interface_t mv88e6341_port_max_speed_mode(int port);
phy_interface_t mv88e6390_port_max_speed_mode(int port);
@@ -359,12 +364,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
-int mv88e6185_port_link_state(struct mv88e6xxx_chip *chip, int port,
- struct phylink_link_state *state);
-int mv88e6250_port_link_state(struct mv88e6xxx_chip *chip, int port,
- struct phylink_link_state *state);
-int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
- struct phylink_link_state *state);
int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 8d8b3b74aee1..2098f19b534d 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -49,6 +49,52 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
return mv88e6xxx_phy_write(chip, lane, reg_c45, val);
}
+static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
+ u16 status, u16 lpa,
+ struct phylink_link_state *state)
+{
+ if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
+ state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+ state->duplex = status &
+ MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
+ DUPLEX_FULL : DUPLEX_HALF;
+
+ if (status & MV88E6390_SGMII_PHY_STATUS_TX_PAUSE)
+ state->pause |= MLO_PAUSE_TX;
+ if (status & MV88E6390_SGMII_PHY_STATUS_RX_PAUSE)
+ state->pause |= MLO_PAUSE_RX;
+
+ switch (status & MV88E6390_SGMII_PHY_STATUS_SPEED_MASK) {
+ case MV88E6390_SGMII_PHY_STATUS_SPEED_1000:
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_1000;
+ break;
+ case MV88E6390_SGMII_PHY_STATUS_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case MV88E6390_SGMII_PHY_STATUS_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ default:
+ dev_err(chip->dev, "invalid PHY speed\n");
+ return -EINVAL;
+ }
+ } else {
+ state->link = false;
+ }
+
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ mii_lpa_mod_linkmode_x(state->lp_advertising, lpa,
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
+ else if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ mii_lpa_mod_linkmode_x(state->lp_advertising, lpa,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
+
+ return 0;
+}
+
int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
bool up)
{
@@ -70,6 +116,120 @@ int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
return err;
}
+int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise)
+{
+ u16 adv, bmcr, val;
+ bool changed;
+ int err;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ adv = 0x0001;
+ break;
+
+ case PHY_INTERFACE_MODE_1000BASEX:
+ adv = linkmode_adv_to_mii_adv_x(advertise,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
+ break;
+
+ default:
+ return 0;
+ }
+
+ err = mv88e6352_serdes_read(chip, MII_ADVERTISE, &val);
+ if (err)
+ return err;
+
+ changed = val != adv;
+ if (changed) {
+ err = mv88e6352_serdes_write(chip, MII_ADVERTISE, adv);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6352_serdes_read(chip, MII_BMCR, &val);
+ if (err)
+ return err;
+
+ if (phylink_autoneg_inband(mode))
+ bmcr = val | BMCR_ANENABLE;
+ else
+ bmcr = val & ~BMCR_ANENABLE;
+
+ if (bmcr == val)
+ return changed;
+
+ return mv88e6352_serdes_write(chip, MII_BMCR, bmcr);
+}
+
+int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, struct phylink_link_state *state)
+{
+ u16 lpa, status;
+ int err;
+
+ err = mv88e6352_serdes_read(chip, 0x11, &status);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
+ return err;
+ }
+
+ err = mv88e6352_serdes_read(chip, MII_LPA, &lpa);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes PHY LPA: %d\n", err);
+ return err;
+ }
+
+ return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
+}
+
+int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
+ u8 lane)
+{
+ u16 bmcr;
+ int err;
+
+ err = mv88e6352_serdes_read(chip, MII_BMCR, &bmcr);
+ if (err)
+ return err;
+
+ return mv88e6352_serdes_write(chip, MII_BMCR, bmcr | BMCR_ANRESTART);
+}
+
+int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, int speed, int duplex)
+{
+ u16 val, bmcr;
+ int err;
+
+ err = mv88e6352_serdes_read(chip, MII_BMCR, &val);
+ if (err)
+ return err;
+
+ bmcr = val & ~(BMCR_SPEED100 | BMCR_FULLDPLX | BMCR_SPEED1000);
+ switch (speed) {
+ case SPEED_1000:
+ bmcr |= BMCR_SPEED1000;
+ break;
+ case SPEED_100:
+ bmcr |= BMCR_SPEED100;
+ break;
+ case SPEED_10:
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+
+ if (bmcr == val)
+ return 0;
+
+ return mv88e6352_serdes_write(chip, MII_BMCR, bmcr);
+}
+
u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode = chip->ports[port].cmode;
@@ -180,26 +340,17 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
static void mv88e6352_serdes_irq_link(struct mv88e6xxx_chip *chip, int port)
{
- struct dsa_switch *ds = chip->ds;
- u16 status;
- bool up;
+ u16 bmsr;
int err;
- err = mv88e6352_serdes_read(chip, MII_BMSR, &status);
- if (err)
- return;
-
- /* Status must be read twice in order to give the current link
- * status. Otherwise the change in link status since the last
- * read of the register is returned.
- */
- err = mv88e6352_serdes_read(chip, MII_BMSR, &status);
- if (err)
+ /* If the link has dropped, we want to know about it. */
+ err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes BMSR: %d\n", err);
return;
+ }
- up = status & BMSR_LSTATUS;
-
- dsa_port_phylink_mac_change(ds, port, up);
+ dsa_port_phylink_mac_change(chip->ds, port, !!(bmsr & BMSR_LSTATUS));
}
irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
@@ -237,6 +388,29 @@ unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
return irq_find_mapping(chip->g2_irq.domain, MV88E6352_SERDES_IRQ);
}
+int mv88e6352_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port)
+{
+ if (!mv88e6352_port_has_serdes(chip, port))
+ return 0;
+
+ return 32 * sizeof(u16);
+}
+
+void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
+{
+ u16 *p = _p;
+ u16 reg;
+ int i;
+
+ if (!mv88e6352_port_has_serdes(chip, port))
+ return;
+
+ for (i = 0 ; i < 32; i++) {
+ mv88e6352_serdes_read(chip, i, &reg);
+ p[i] = reg;
+ }
+}
+
u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode = chip->ports[port].cmode;
@@ -387,20 +561,18 @@ static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, u8 lane,
int err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_CONTROL, &val);
+ MV88E6390_SGMII_BMCR, &val);
if (err)
return err;
if (up)
- new_val = val & ~(MV88E6390_SGMII_CONTROL_RESET |
- MV88E6390_SGMII_CONTROL_LOOPBACK |
- MV88E6390_SGMII_CONTROL_PDOWN);
+ new_val = val & ~(BMCR_RESET | BMCR_LOOPBACK | BMCR_PDOWN);
else
- new_val = val | MV88E6390_SGMII_CONTROL_PDOWN;
+ new_val = val | BMCR_PDOWN;
if (val != new_val)
err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_CONTROL, new_val);
+ MV88E6390_SGMII_BMCR, new_val);
return err;
}
@@ -517,71 +689,153 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
return err;
}
-static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
- int port, u8 lane)
+int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise)
{
- u8 cmode = chip->ports[port].cmode;
- struct dsa_switch *ds = chip->ds;
- int duplex = DUPLEX_UNKNOWN;
- int speed = SPEED_UNKNOWN;
- phy_interface_t mode;
- int link, err;
- u16 status;
+ u16 val, bmcr, adv;
+ bool changed;
+ int err;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ adv = 0x0001;
+ break;
+
+ case PHY_INTERFACE_MODE_1000BASEX:
+ adv = linkmode_adv_to_mii_adv_x(advertise,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ adv = linkmode_adv_to_mii_adv_x(advertise,
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
+ break;
+
+ default:
+ return 0;
+ }
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_ADVERTISE, &val);
+ if (err)
+ return err;
+
+ changed = val != adv;
+ if (changed) {
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_ADVERTISE, adv);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, &val);
+ if (err)
+ return err;
+
+ if (phylink_autoneg_inband(mode))
+ bmcr = val | BMCR_ANENABLE;
+ else
+ bmcr = val & ~BMCR_ANENABLE;
+
+ /* setting ANENABLE triggers a restart of negotiation */
+ if (bmcr == val)
+ return changed;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, bmcr);
+}
+
+int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, struct phylink_link_state *state)
+{
+ u16 lpa, status;
+ int err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
MV88E6390_SGMII_PHY_STATUS, &status);
if (err) {
- dev_err(chip->dev, "can't read SGMII PHY status: %d\n", err);
- return;
+ dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
+ return err;
}
- link = status & MV88E6390_SGMII_PHY_STATUS_LINK ?
- LINK_FORCED_UP : LINK_FORCED_DOWN;
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_LPA, &lpa);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes PHY LPA: %d\n", err);
+ return err;
+ }
- if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
- duplex = status & MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
- DUPLEX_FULL : DUPLEX_HALF;
+ return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
+}
- switch (status & MV88E6390_SGMII_PHY_STATUS_SPEED_MASK) {
- case MV88E6390_SGMII_PHY_STATUS_SPEED_1000:
- if (cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
- speed = SPEED_2500;
- else
- speed = SPEED_1000;
- break;
- case MV88E6390_SGMII_PHY_STATUS_SPEED_100:
- speed = SPEED_100;
- break;
- case MV88E6390_SGMII_PHY_STATUS_SPEED_10:
- speed = SPEED_10;
- break;
- default:
- dev_err(chip->dev, "invalid PHY speed\n");
- return;
- }
- }
+int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
+ u8 lane)
+{
+ u16 bmcr;
+ int err;
- switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- mode = PHY_INTERFACE_MODE_SGMII;
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, &bmcr);
+ if (err)
+ return err;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR,
+ bmcr | BMCR_ANRESTART);
+}
+
+int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, int speed, int duplex)
+{
+ u16 val, bmcr;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, &val);
+ if (err)
+ return err;
+
+ bmcr = val & ~(BMCR_SPEED100 | BMCR_FULLDPLX | BMCR_SPEED1000);
+ switch (speed) {
+ case SPEED_2500:
+ case SPEED_1000:
+ bmcr |= BMCR_SPEED1000;
break;
- case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
- mode = PHY_INTERFACE_MODE_1000BASEX;
+ case SPEED_100:
+ bmcr |= BMCR_SPEED100;
break;
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- mode = PHY_INTERFACE_MODE_2500BASEX;
+ case SPEED_10:
break;
- default:
- mode = PHY_INTERFACE_MODE_NA;
}
- err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex,
- PAUSE_OFF, mode);
- if (err)
- dev_err(chip->dev, "can't propagate PHY settings to MAC: %d\n",
- err);
- else
- dsa_port_phylink_mac_change(ds, port, link == LINK_FORCED_UP);
+ if (duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+
+ if (bmcr == val)
+ return 0;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, bmcr);
+}
+
+static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
+ int port, u8 lane)
+{
+ u16 bmsr;
+ int err;
+
+ /* If the link has dropped, we want to know about it. */
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMSR, &bmsr);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes BMSR: %d\n", err);
+ return;
+ }
+
+ dsa_port_phylink_mac_change(chip->ds, port, !!(bmsr & BMSR_LSTATUS));
}
static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip,
@@ -652,3 +906,57 @@ unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
{
return irq_find_mapping(chip->g2_irq.domain, port);
}
+
+static const u16 mv88e6390_serdes_regs[] = {
+ /* SERDES common registers */
+ 0xf00a, 0xf00b, 0xf00c,
+ 0xf010, 0xf011, 0xf012, 0xf013,
+ 0xf016, 0xf017, 0xf018,
+ 0xf01b, 0xf01c, 0xf01d, 0xf01e, 0xf01f,
+ 0xf020, 0xf021, 0xf022, 0xf023, 0xf024, 0xf025, 0xf026, 0xf027,
+ 0xf028, 0xf029,
+ 0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037,
+ 0xf038, 0xf039,
+ /* SGMII */
+ 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007,
+ 0x2008,
+ 0x200f,
+ 0xa000, 0xa001, 0xa002, 0xa003,
+ /* 10Gbase-X */
+ 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007,
+ 0x1008,
+ 0x100e, 0x100f,
+ 0x1018, 0x1019,
+ 0x9000, 0x9001, 0x9002, 0x9003, 0x9004,
+ 0x9006,
+ 0x9010, 0x9011, 0x9012, 0x9013, 0x9014, 0x9015, 0x9016,
+ /* 10Gbase-R */
+ 0x1020, 0x1021, 0x1022, 0x1023, 0x1024, 0x1025, 0x1026, 0x1027,
+ 0x1028, 0x1029, 0x102a, 0x102b,
+};
+
+int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port)
+{
+ if (mv88e6xxx_serdes_get_lane(chip, port) == 0)
+ return 0;
+
+ return ARRAY_SIZE(mv88e6390_serdes_regs) * sizeof(u16);
+}
+
+void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
+{
+ u16 *p = _p;
+ int lane;
+ u16 reg;
+ int i;
+
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane == 0)
+ return;
+
+ for (i = 0 ; i < ARRAY_SIZE(mv88e6390_serdes_regs); i++) {
+ mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ mv88e6390_serdes_regs[i], &reg);
+ p[i] = reg;
+ }
+}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index d16ef4da20b0..7990cadba4c2 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -47,14 +47,10 @@
#define MV88E6390_PCS_CONTROL_1_PDOWN BIT(11)
/* 1000BASE-X and SGMII */
-#define MV88E6390_SGMII_CONTROL 0x2000
-#define MV88E6390_SGMII_CONTROL_RESET BIT(15)
-#define MV88E6390_SGMII_CONTROL_LOOPBACK BIT(14)
-#define MV88E6390_SGMII_CONTROL_PDOWN BIT(11)
-#define MV88E6390_SGMII_STATUS 0x2001
-#define MV88E6390_SGMII_STATUS_AN_DONE BIT(5)
-#define MV88E6390_SGMII_STATUS_REMOTE_FAULT BIT(4)
-#define MV88E6390_SGMII_STATUS_LINK BIT(2)
+#define MV88E6390_SGMII_BMCR (0x2000 + MII_BMCR)
+#define MV88E6390_SGMII_BMSR (0x2000 + MII_BMSR)
+#define MV88E6390_SGMII_ADVERTISE (0x2000 + MII_ADVERTISE)
+#define MV88E6390_SGMII_LPA (0x2000 + MII_LPA)
#define MV88E6390_SGMII_INT_ENABLE 0xa001
#define MV88E6390_SGMII_INT_SPEED_CHANGE BIT(14)
#define MV88E6390_SGMII_INT_DUPLEX_CHANGE BIT(13)
@@ -73,6 +69,8 @@
#define MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL BIT(13)
#define MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID BIT(11)
#define MV88E6390_SGMII_PHY_STATUS_LINK BIT(10)
+#define MV88E6390_SGMII_PHY_STATUS_TX_PAUSE BIT(3)
+#define MV88E6390_SGMII_PHY_STATUS_RX_PAUSE BIT(2)
/* Packet generator pad packet checker */
#define MV88E6390_PG_CONTROL 0xf010
@@ -82,6 +80,26 @@ u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
u8 mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise);
+int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertise);
+int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, struct phylink_link_state *state);
+int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, struct phylink_link_state *state);
+int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
+ u8 lane);
+int mv88e6390_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
+ u8 lane);
+int mv88e6352_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, int speed, int duplex);
+int mv88e6390_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, int speed, int duplex);
unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
int port);
unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
@@ -109,6 +127,11 @@ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
+int mv88e6352_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
+void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
+int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
+void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
+
/* Return the (first) SERDES lane address a port is using, 0 otherwise. */
static inline u8 mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
int port)
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 3257962c147e..79ca3aadb864 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -2,6 +2,7 @@
/* Copyright 2019 NXP Semiconductors
*/
#include <uapi/linux/if_bridge.h>
+#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot_dev.h>
@@ -12,6 +13,7 @@
#include <linux/of_net.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <net/pkt_sched.h>
#include <net/dsa.h>
#include "felix.h"
@@ -176,8 +178,7 @@ static void felix_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, 100baseT_Full);
phylink_set(mask, 1000baseT_Full);
- /* The internal ports that run at 2.5G are overclocked GMII */
- if (state->interface == PHY_INTERFACE_MODE_GMII ||
+ if (state->interface == PHY_INTERFACE_MODE_INTERNAL ||
state->interface == PHY_INTERFACE_MODE_2500BASEX ||
state->interface == PHY_INTERFACE_MODE_USXGMII) {
phylink_set(mask, 2500baseT_Full);
@@ -264,7 +265,9 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int link_an_mode,
phy_interface_t interface,
- struct phy_device *phydev)
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct ocelot *ocelot = ds->priv;
struct ocelot_port *ocelot_port = ocelot->ports[port];
@@ -400,6 +403,9 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->stats_layout = felix->info->stats_layout;
ocelot->num_stats = felix->info->num_stats;
ocelot->shared_queue_sz = felix->info->shared_queue_sz;
+ ocelot->vcap_is2_keys = felix->info->vcap_is2_keys;
+ ocelot->vcap_is2_actions= felix->info->vcap_is2_actions;
+ ocelot->vcap = felix->info->vcap;
ocelot->ops = felix->info->ops;
port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
@@ -511,12 +517,23 @@ static int felix_setup(struct dsa_switch *ds)
for (port = 0; port < ds->num_ports; port++) {
ocelot_init_port(ocelot, port);
+ /* Bring up the CPU port module and configure the NPI port */
if (dsa_is_cpu_port(ds, port))
- ocelot_set_cpu_port(ocelot, port,
- OCELOT_TAG_PREFIX_NONE,
- OCELOT_TAG_PREFIX_LONG);
+ ocelot_configure_cpu(ocelot, port,
+ OCELOT_TAG_PREFIX_NONE,
+ OCELOT_TAG_PREFIX_LONG);
}
+ /* Include the CPU port module in the forwarding mask for unknown
+ * unicast - the hardware default value for ANA_FLOODING_FLD_UNICAST
+ * excludes BIT(ocelot->num_phys_ports), and so does ocelot_init, since
+ * Ocelot relies on whitelisting MAC addresses towards PGID_CPU.
+ */
+ ocelot_write_rix(ocelot,
+ ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
+ ANA_PGID_PGID, PGID_UC);
+
+ ds->mtu_enforcement_ingress = true;
/* It looks like the MAC/PCS interrupt register - PM0_IEVENT (0x8040)
* isn't instantiated for the Felix PF.
* In-band AN may take a few ms to complete, so we need to poll.
@@ -594,6 +611,67 @@ static bool felix_txtstamp(struct dsa_switch *ds, int port,
return false;
}
+static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_set_maxlen(ocelot, port, new_mtu);
+
+ return 0;
+}
+
+static int felix_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_get_max_mtu(ocelot, port);
+}
+
+static int felix_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+}
+
+static int felix_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_cls_flower_destroy(ocelot, port, cls, ingress);
+}
+
+static int felix_cls_flower_stats(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_cls_flower_stats(ocelot, port, cls, ingress);
+}
+
+static int felix_port_policer_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_policer_tc_entry *policer)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct ocelot_policer pol = {
+ .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8,
+ .burst = div_u64(policer->rate_bytes_per_sec *
+ PSCHED_NS2TICKS(policer->burst),
+ PSCHED_TICKS_PER_SEC),
+ };
+
+ return ocelot_port_policer_add(ocelot, port, &pol);
+}
+
+static void felix_port_policer_del(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_policer_del(ocelot, port);
+}
+
static const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.setup = felix_setup,
@@ -625,6 +703,13 @@ static const struct dsa_switch_ops felix_switch_ops = {
.port_hwtstamp_set = felix_hwtstamp_set,
.port_rxtstamp = felix_rxtstamp,
.port_txtstamp = felix_txtstamp,
+ .port_change_mtu = felix_change_mtu,
+ .port_max_mtu = felix_get_max_mtu,
+ .port_policer_add = felix_port_policer_add,
+ .port_policer_del = felix_port_policer_del,
+ .cls_flower_add = felix_cls_flower_add,
+ .cls_flower_del = felix_cls_flower_del,
+ .cls_flower_stats = felix_cls_flower_stats,
};
static struct felix_info *felix_instance_tbl[] = {
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 3a7580015b62..82d46f260041 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -18,6 +18,9 @@ struct felix_info {
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
int num_ports;
+ struct vcap_field *vcap_is2_keys;
+ struct vcap_field *vcap_is2_actions;
+ const struct vcap_props *vcap;
int switch_pci_bar;
int imdio_pci_bar;
int (*mdio_bus_alloc)(struct ocelot *ocelot);
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 2c812b481778..b4078f3c5c38 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -3,12 +3,17 @@
* Copyright 2018-2019 NXP Semiconductors
*/
#include <linux/fsl/enetc_mdio.h>
+#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
#include <linux/iopoll.h>
#include <linux/pci.h>
#include "felix.h"
+#define VSC9959_VCAP_IS2_CNT 1024
+#define VSC9959_VCAP_IS2_ENTRY_WIDTH 376
+#define VSC9959_VCAP_PORT_CNT 6
+
/* TODO: should find a better place for these */
#define USXGMII_BMCR_RESET BIT(15)
#define USXGMII_BMCR_AN_EN BIT(12)
@@ -547,6 +552,129 @@ static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
{ .offset = 0x111, .name = "drop_green_prio_7", },
};
+struct vcap_field vsc9959_vcap_is2_keys[] = {
+ /* Common: 41 bits */
+ [VCAP_IS2_TYPE] = { 0, 4},
+ [VCAP_IS2_HK_FIRST] = { 4, 1},
+ [VCAP_IS2_HK_PAG] = { 5, 8},
+ [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 7},
+ [VCAP_IS2_HK_RSV2] = { 20, 1},
+ [VCAP_IS2_HK_HOST_MATCH] = { 21, 1},
+ [VCAP_IS2_HK_L2_MC] = { 22, 1},
+ [VCAP_IS2_HK_L2_BC] = { 23, 1},
+ [VCAP_IS2_HK_VLAN_TAGGED] = { 24, 1},
+ [VCAP_IS2_HK_VID] = { 25, 12},
+ [VCAP_IS2_HK_DEI] = { 37, 1},
+ [VCAP_IS2_HK_PCP] = { 38, 3},
+ /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+ [VCAP_IS2_HK_L2_DMAC] = { 41, 48},
+ [VCAP_IS2_HK_L2_SMAC] = { 89, 48},
+ /* MAC_ETYPE (TYPE=000) */
+ [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {137, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {153, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {169, 8},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {177, 3},
+ /* MAC_LLC (TYPE=001) */
+ [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {137, 40},
+ /* MAC_SNAP (TYPE=010) */
+ [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {137, 40},
+ /* MAC_ARP (TYPE=011) */
+ [VCAP_IS2_HK_MAC_ARP_SMAC] = { 41, 48},
+ [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 89, 1},
+ [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 90, 1},
+ [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 91, 1},
+ [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 92, 1},
+ [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 93, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 94, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE] = { 95, 2},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = { 97, 32},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {129, 32},
+ [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {161, 1},
+ /* IP4_TCP_UDP / IP4_OTHER common */
+ [VCAP_IS2_HK_IP4] = { 41, 1},
+ [VCAP_IS2_HK_L3_FRAGMENT] = { 42, 1},
+ [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 43, 1},
+ [VCAP_IS2_HK_L3_OPTIONS] = { 44, 1},
+ [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 45, 1},
+ [VCAP_IS2_HK_L3_TOS] = { 46, 8},
+ [VCAP_IS2_HK_L3_IP4_DIP] = { 54, 32},
+ [VCAP_IS2_HK_L3_IP4_SIP] = { 86, 32},
+ [VCAP_IS2_HK_DIP_EQ_SIP] = {118, 1},
+ /* IP4_TCP_UDP (TYPE=100) */
+ [VCAP_IS2_HK_TCP] = {119, 1},
+ [VCAP_IS2_HK_L4_SPORT] = {120, 16},
+ [VCAP_IS2_HK_L4_DPORT] = {136, 16},
+ [VCAP_IS2_HK_L4_RNG] = {152, 8},
+ [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {160, 1},
+ [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {161, 1},
+ [VCAP_IS2_HK_L4_URG] = {162, 1},
+ [VCAP_IS2_HK_L4_ACK] = {163, 1},
+ [VCAP_IS2_HK_L4_PSH] = {164, 1},
+ [VCAP_IS2_HK_L4_RST] = {165, 1},
+ [VCAP_IS2_HK_L4_SYN] = {166, 1},
+ [VCAP_IS2_HK_L4_FIN] = {167, 1},
+ [VCAP_IS2_HK_L4_1588_DOM] = {168, 8},
+ [VCAP_IS2_HK_L4_1588_VER] = {176, 4},
+ /* IP4_OTHER (TYPE=101) */
+ [VCAP_IS2_HK_IP4_L3_PROTO] = {119, 8},
+ [VCAP_IS2_HK_L3_PAYLOAD] = {127, 56},
+ /* IP6_STD (TYPE=110) */
+ [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 41, 1},
+ [VCAP_IS2_HK_L3_IP6_SIP] = { 42, 128},
+ [VCAP_IS2_HK_IP6_L3_PROTO] = {170, 8},
+ /* OAM (TYPE=111) */
+ [VCAP_IS2_HK_OAM_MEL_FLAGS] = {137, 7},
+ [VCAP_IS2_HK_OAM_VER] = {144, 5},
+ [VCAP_IS2_HK_OAM_OPCODE] = {149, 8},
+ [VCAP_IS2_HK_OAM_FLAGS] = {157, 8},
+ [VCAP_IS2_HK_OAM_MEPID] = {165, 16},
+ [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = {181, 1},
+ [VCAP_IS2_HK_OAM_IS_Y1731] = {182, 1},
+};
+
+struct vcap_field vsc9959_vcap_is2_actions[] = {
+ [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1},
+ [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1},
+ [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3},
+ [VCAP_IS2_ACT_MASK_MODE] = { 5, 2},
+ [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1},
+ [VCAP_IS2_ACT_LRN_DIS] = { 8, 1},
+ [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1},
+ [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9},
+ [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1},
+ [VCAP_IS2_ACT_PORT_MASK] = { 20, 11},
+ [VCAP_IS2_ACT_REW_OP] = { 31, 9},
+ [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1},
+ [VCAP_IS2_ACT_RSV] = { 41, 2},
+ [VCAP_IS2_ACT_ACL_ID] = { 43, 6},
+ [VCAP_IS2_ACT_HIT_CNT] = { 49, 32},
+};
+
+static const struct vcap_props vsc9959_vcap_props[] = {
+ [VCAP_IS2] = {
+ .tg_width = 2,
+ .sw_count = 4,
+ .entry_count = VSC9959_VCAP_IS2_CNT,
+ .entry_width = VSC9959_VCAP_IS2_ENTRY_WIDTH,
+ .action_count = VSC9959_VCAP_IS2_CNT +
+ VSC9959_VCAP_PORT_CNT + 2,
+ .action_width = 89,
+ .action_type_width = 1,
+ .action_table = {
+ [IS2_ACTION_TYPE_NORMAL] = {
+ .width = 44,
+ .count = 2
+ },
+ [IS2_ACTION_TYPE_SMAC_SIP] = {
+ .width = 6,
+ .count = 4
+ },
+ },
+ .counter_words = 4,
+ .counter_width = 32,
+ },
+};
+
#define VSC9959_INIT_TIMEOUT 50000
#define VSC9959_GCB_RST_SLEEP 100
#define VSC9959_SYS_RAMINIT_SLEEP 80
@@ -955,8 +1083,7 @@ static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port,
phy_interface_t phy_mode)
{
switch (phy_mode) {
- case PHY_INTERFACE_MODE_GMII:
- /* Only supported on internal to-CPU ports */
+ case PHY_INTERFACE_MODE_INTERNAL:
if (port != 4 && port != 5)
return -ENOTSUPP;
return 0;
@@ -1089,6 +1216,9 @@ struct felix_info felix_info_vsc9959 = {
.ops = &vsc9959_ops,
.stats_layout = vsc9959_stats_layout,
.num_stats = ARRAY_SIZE(vsc9959_stats_layout),
+ .vcap_is2_keys = vsc9959_vcap_is2_keys,
+ .vcap_is2_actions = vsc9959_vcap_is2_actions,
+ .vcap = vsc9959_vcap_props,
.shared_queue_sz = 128 * 1024,
.num_ports = 6,
.switch_pci_bar = 4,
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index de25f99e995a..7c86056b9401 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -458,7 +458,9 @@ static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev)
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
struct regmap *regmap = priv->regmap;
diff --git a/drivers/net/dsa/sja1105/Makefile b/drivers/net/dsa/sja1105/Makefile
index 66161e874344..8943d8d66f2b 100644
--- a/drivers/net/dsa/sja1105/Makefile
+++ b/drivers/net/dsa/sja1105/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_NET_DSA_SJA1105) += sja1105.o
sja1105-objs := \
sja1105_spi.o \
sja1105_main.o \
+ sja1105_flower.o \
sja1105_ethtool.o \
sja1105_clocking.o \
sja1105_static_config.o \
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index d801fc204d19..8b60dbd567f2 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -19,6 +19,7 @@
* The passed parameter is in multiples of 1 ms.
*/
#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
+#define SJA1105_NUM_L2_POLICERS 45
typedef enum {
SPI_READ = 0,
@@ -36,11 +37,15 @@ struct sja1105_regs {
u64 port_control;
u64 rgu;
u64 config;
+ u64 sgmii;
u64 rmii_pll1;
+ u64 ptppinst;
+ u64 ptppindur;
u64 ptp_control;
u64 ptpclkval;
u64 ptpclkrate;
u64 ptpclkcorp;
+ u64 ptpsyncts;
u64 ptpschtm;
u64 ptpegr_ts[SJA1105_NUM_PORTS];
u64 pad_mii_tx[SJA1105_NUM_PORTS];
@@ -56,6 +61,7 @@ struct sja1105_regs {
u64 mac[SJA1105_NUM_PORTS];
u64 mac_hl1[SJA1105_NUM_PORTS];
u64 mac_hl2[SJA1105_NUM_PORTS];
+ u64 ether_stats[SJA1105_NUM_PORTS];
u64 qlevel[SJA1105_NUM_PORTS];
};
@@ -90,6 +96,36 @@ struct sja1105_info {
const char *name;
};
+enum sja1105_rule_type {
+ SJA1105_RULE_BCAST_POLICER,
+ SJA1105_RULE_TC_POLICER,
+};
+
+struct sja1105_rule {
+ struct list_head list;
+ unsigned long cookie;
+ unsigned long port_mask;
+ enum sja1105_rule_type type;
+
+ union {
+ /* SJA1105_RULE_BCAST_POLICER */
+ struct {
+ int sharindx;
+ } bcast_pol;
+
+ /* SJA1105_RULE_TC_POLICER */
+ struct {
+ int sharindx;
+ int tc;
+ } tc_pol;
+ };
+};
+
+struct sja1105_flow_block {
+ struct list_head rules;
+ bool l2_policer_used[SJA1105_NUM_L2_POLICERS];
+};
+
struct sja1105_private {
struct sja1105_static_config static_config;
bool rgmii_rx_delay[SJA1105_NUM_PORTS];
@@ -98,6 +134,7 @@ struct sja1105_private {
struct gpio_desc *reset_gpio;
struct spi_device *spidev;
struct dsa_switch *ds;
+ struct sja1105_flow_block flow_block;
struct sja1105_port ports[SJA1105_NUM_PORTS];
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
@@ -122,6 +159,7 @@ enum sja1105_reset_reason {
SJA1105_RX_HWTSTAMPING,
SJA1105_AGEING_TIME,
SJA1105_SCHEDULING,
+ SJA1105_BEST_EFFORT_POLICING,
};
int sja1105_static_config_reload(struct sja1105_private *priv,
@@ -159,6 +197,7 @@ typedef enum {
XMII_MODE_MII = 0,
XMII_MODE_RMII = 1,
XMII_MODE_RGMII = 2,
+ XMII_MODE_SGMII = 3,
} sja1105_phy_interface_t;
typedef enum {
@@ -212,5 +251,15 @@ size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+
+/* From sja1105_flower.c */
+int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+void sja1105_flower_setup(struct dsa_switch *ds);
+void sja1105_flower_teardown(struct dsa_switch *ds);
#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index 9082e52b55e9..0fdc2d55fff6 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -660,6 +660,10 @@ int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
case XMII_MODE_RGMII:
rc = sja1105_rgmii_clocking_setup(priv, port, role);
break;
+ case XMII_MODE_SGMII:
+ /* Nothing to do in the CGU for SGMII */
+ rc = 0;
+ break;
default:
dev_err(dev, "Invalid interface mode specified: %d\n",
phy_mode);
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 25381bd65ed7..bf9b36ff35bf 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -124,6 +124,9 @@
#define SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD \
SJA1105_SIZE_DYN_CMD
+#define SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY)
+
#define SJA1105_MAX_DYN_CMD_SIZE \
SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD
@@ -481,6 +484,18 @@ sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
return 0;
}
+static void
+sja1105pqrs_avb_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+}
+
#define OP_READ BIT(0)
#define OP_WRITE BIT(1)
#define OP_DEL BIT(2)
@@ -610,7 +625,14 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
.addr = 0x38,
},
[BLK_IDX_L2_FORWARDING_PARAMS] = {0},
- [BLK_IDX_AVB_PARAMS] = {0},
+ [BLK_IDX_AVB_PARAMS] = {
+ .entry_packing = sja1105pqrs_avb_params_entry_packing,
+ .cmd_packing = sja1105pqrs_avb_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD,
+ .addr = 0x8003,
+ },
[BLK_IDX_GENERAL_PARAMS] = {
.entry_packing = sja1105et_general_params_entry_packing,
.cmd_packing = sja1105et_general_params_cmd_packing,
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
index 064301cc7d5b..d742ffcbfce9 100644
--- a/drivers/net/dsa/sja1105/sja1105_ethtool.c
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -7,6 +7,7 @@
#define SJA1105_SIZE_HL1_AREA (0x10 * 4)
#define SJA1105_SIZE_HL2_AREA (0x4 * 4)
#define SJA1105_SIZE_QLEVEL_AREA (0x8 * 4) /* 0x4 to 0xB */
+#define SJA1105_SIZE_ETHER_AREA (0x17 * 4)
struct sja1105_port_status_mac {
u64 n_runt;
@@ -63,10 +64,37 @@ struct sja1105_port_status_hl2 {
u64 qlevel[8]; /* Only for P/Q/R/S */
};
+struct sja1105_port_status_ether {
+ u64 n_drops_nolearn;
+ u64 n_drops_noroute;
+ u64 n_drops_ill_dtag;
+ u64 n_drops_dtag;
+ u64 n_drops_sotag;
+ u64 n_drops_sitag;
+ u64 n_drops_utag;
+ u64 n_tx_bytes_1024_2047;
+ u64 n_tx_bytes_512_1023;
+ u64 n_tx_bytes_256_511;
+ u64 n_tx_bytes_128_255;
+ u64 n_tx_bytes_65_127;
+ u64 n_tx_bytes_64;
+ u64 n_tx_mcast;
+ u64 n_tx_bcast;
+ u64 n_rx_bytes_1024_2047;
+ u64 n_rx_bytes_512_1023;
+ u64 n_rx_bytes_256_511;
+ u64 n_rx_bytes_128_255;
+ u64 n_rx_bytes_65_127;
+ u64 n_rx_bytes_64;
+ u64 n_rx_mcast;
+ u64 n_rx_bcast;
+};
+
struct sja1105_port_status {
struct sja1105_port_status_mac mac;
struct sja1105_port_status_hl1 hl1;
struct sja1105_port_status_hl2 hl2;
+ struct sja1105_port_status_ether ether;
};
static void
@@ -158,6 +186,58 @@ sja1105pqrs_port_status_qlevel_unpack(void *buf,
}
}
+static void
+sja1105pqrs_port_status_ether_unpack(void *buf,
+ struct sja1105_port_status_ether *status)
+{
+ /* Make pointer arithmetic work on 4 bytes */
+ u32 *p = buf;
+
+ sja1105_unpack(p + 0x16, &status->n_drops_nolearn, 31, 0, 4);
+ sja1105_unpack(p + 0x15, &status->n_drops_noroute, 31, 0, 4);
+ sja1105_unpack(p + 0x14, &status->n_drops_ill_dtag, 31, 0, 4);
+ sja1105_unpack(p + 0x13, &status->n_drops_dtag, 31, 0, 4);
+ sja1105_unpack(p + 0x12, &status->n_drops_sotag, 31, 0, 4);
+ sja1105_unpack(p + 0x11, &status->n_drops_sitag, 31, 0, 4);
+ sja1105_unpack(p + 0x10, &status->n_drops_utag, 31, 0, 4);
+ sja1105_unpack(p + 0x0F, &status->n_tx_bytes_1024_2047, 31, 0, 4);
+ sja1105_unpack(p + 0x0E, &status->n_tx_bytes_512_1023, 31, 0, 4);
+ sja1105_unpack(p + 0x0D, &status->n_tx_bytes_256_511, 31, 0, 4);
+ sja1105_unpack(p + 0x0C, &status->n_tx_bytes_128_255, 31, 0, 4);
+ sja1105_unpack(p + 0x0B, &status->n_tx_bytes_65_127, 31, 0, 4);
+ sja1105_unpack(p + 0x0A, &status->n_tx_bytes_64, 31, 0, 4);
+ sja1105_unpack(p + 0x09, &status->n_tx_mcast, 31, 0, 4);
+ sja1105_unpack(p + 0x08, &status->n_tx_bcast, 31, 0, 4);
+ sja1105_unpack(p + 0x07, &status->n_rx_bytes_1024_2047, 31, 0, 4);
+ sja1105_unpack(p + 0x06, &status->n_rx_bytes_512_1023, 31, 0, 4);
+ sja1105_unpack(p + 0x05, &status->n_rx_bytes_256_511, 31, 0, 4);
+ sja1105_unpack(p + 0x04, &status->n_rx_bytes_128_255, 31, 0, 4);
+ sja1105_unpack(p + 0x03, &status->n_rx_bytes_65_127, 31, 0, 4);
+ sja1105_unpack(p + 0x02, &status->n_rx_bytes_64, 31, 0, 4);
+ sja1105_unpack(p + 0x01, &status->n_rx_mcast, 31, 0, 4);
+ sja1105_unpack(p + 0x00, &status->n_rx_bcast, 31, 0, 4);
+}
+
+static int
+sja1105pqrs_port_status_get_ether(struct sja1105_private *priv,
+ struct sja1105_port_status_ether *ether,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_ETHER_AREA] = {0};
+ int rc;
+
+ /* Ethernet statistics area */
+ rc = sja1105_xfer_buf(priv, SPI_READ, regs->ether_stats[port],
+ packed_buf, SJA1105_SIZE_ETHER_AREA);
+ if (rc < 0)
+ return rc;
+
+ sja1105pqrs_port_status_ether_unpack(packed_buf, ether);
+
+ return 0;
+}
+
static int sja1105_port_status_get_mac(struct sja1105_private *priv,
struct sja1105_port_status_mac *status,
int port)
@@ -241,7 +321,11 @@ static int sja1105_port_status_get(struct sja1105_private *priv,
if (rc < 0)
return rc;
- return 0;
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ return 0;
+
+ return sja1105pqrs_port_status_get_ether(priv, &status->ether, port);
}
static char sja1105_port_stats[][ETH_GSTRING_LEN] = {
@@ -308,6 +392,30 @@ static char sja1105pqrs_extra_port_stats[][ETH_GSTRING_LEN] = {
"qlevel_5",
"qlevel_6",
"qlevel_7",
+ /* Ether Stats */
+ "n_drops_nolearn",
+ "n_drops_noroute",
+ "n_drops_ill_dtag",
+ "n_drops_dtag",
+ "n_drops_sotag",
+ "n_drops_sitag",
+ "n_drops_utag",
+ "n_tx_bytes_1024_2047",
+ "n_tx_bytes_512_1023",
+ "n_tx_bytes_256_511",
+ "n_tx_bytes_128_255",
+ "n_tx_bytes_65_127",
+ "n_tx_bytes_64",
+ "n_tx_mcast",
+ "n_tx_bcast",
+ "n_rx_bytes_1024_2047",
+ "n_rx_bytes_512_1023",
+ "n_rx_bytes_256_511",
+ "n_rx_bytes_128_255",
+ "n_rx_bytes_65_127",
+ "n_rx_bytes_64",
+ "n_rx_mcast",
+ "n_rx_bcast",
};
void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
@@ -376,6 +484,29 @@ void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
data[k++] = status.hl2.qlevel_hwm[i];
data[k++] = status.hl2.qlevel[i];
}
+ data[k++] = status.ether.n_drops_nolearn;
+ data[k++] = status.ether.n_drops_noroute;
+ data[k++] = status.ether.n_drops_ill_dtag;
+ data[k++] = status.ether.n_drops_dtag;
+ data[k++] = status.ether.n_drops_sotag;
+ data[k++] = status.ether.n_drops_sitag;
+ data[k++] = status.ether.n_drops_utag;
+ data[k++] = status.ether.n_tx_bytes_1024_2047;
+ data[k++] = status.ether.n_tx_bytes_512_1023;
+ data[k++] = status.ether.n_tx_bytes_256_511;
+ data[k++] = status.ether.n_tx_bytes_128_255;
+ data[k++] = status.ether.n_tx_bytes_65_127;
+ data[k++] = status.ether.n_tx_bytes_64;
+ data[k++] = status.ether.n_tx_mcast;
+ data[k++] = status.ether.n_tx_bcast;
+ data[k++] = status.ether.n_rx_bytes_1024_2047;
+ data[k++] = status.ether.n_rx_bytes_512_1023;
+ data[k++] = status.ether.n_rx_bytes_256_511;
+ data[k++] = status.ether.n_rx_bytes_128_255;
+ data[k++] = status.ether.n_rx_bytes_65_127;
+ data[k++] = status.ether.n_rx_bytes_64;
+ data[k++] = status.ether.n_rx_mcast;
+ data[k++] = status.ether.n_rx_bcast;
}
void sja1105_get_strings(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
new file mode 100644
index 000000000000..5288a722e625
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2020, NXP Semiconductors
+ */
+#include "sja1105.h"
+
+static struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
+ unsigned long cookie)
+{
+ struct sja1105_rule *rule;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list)
+ if (rule->cookie == cookie)
+ return rule;
+
+ return NULL;
+}
+
+static int sja1105_find_free_l2_policer(struct sja1105_private *priv)
+{
+ int i;
+
+ for (i = 0; i < SJA1105_NUM_L2_POLICERS; i++)
+ if (!priv->flow_block.l2_policer_used[i])
+ return i;
+
+ return -1;
+}
+
+static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie, int port,
+ u64 rate_bytes_per_sec,
+ s64 burst)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct sja1105_l2_policing_entry *policing;
+ bool new_rule = false;
+ unsigned long p;
+ int rc;
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_BCAST_POLICER;
+ rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv);
+ new_rule = true;
+ }
+
+ if (rule->bcast_pol.sharindx == -1) {
+ NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (policing[(SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port].sharindx != port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port already has a broadcast policer");
+ rc = -EEXIST;
+ goto out;
+ }
+
+ rule->port_mask |= BIT(port);
+
+ /* Make the broadcast policers of all ports attached to this block
+ * point to the newly allocated policer
+ */
+ for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
+ int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + p;
+
+ policing[bcast].sharindx = rule->bcast_pol.sharindx;
+ }
+
+ policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
+ 512, 1000000);
+ policing[rule->bcast_pol.sharindx].smax = div_u64(rate_bytes_per_sec *
+ PSCHED_NS2TICKS(burst),
+ PSCHED_TICKS_PER_SEC);
+ /* TODO: support per-flow MTU */
+ policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
+ ETH_FCS_LEN;
+
+ rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+
+out:
+ if (rc == 0 && new_rule) {
+ priv->flow_block.l2_policer_used[rule->bcast_pol.sharindx] = true;
+ list_add(&rule->list, &priv->flow_block.rules);
+ } else if (new_rule) {
+ kfree(rule);
+ }
+
+ return rc;
+}
+
+static int sja1105_setup_tc_policer(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack,
+ unsigned long cookie, int port, int tc,
+ u64 rate_bytes_per_sec,
+ s64 burst)
+{
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct sja1105_l2_policing_entry *policing;
+ bool new_rule = false;
+ unsigned long p;
+ int rc;
+
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->cookie = cookie;
+ rule->type = SJA1105_RULE_TC_POLICER;
+ rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv);
+ rule->tc_pol.tc = tc;
+ new_rule = true;
+ }
+
+ if (rule->tc_pol.sharindx == -1) {
+ NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (policing[(port * SJA1105_NUM_TC) + tc].sharindx != port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port-TC pair already has an L2 policer");
+ rc = -EEXIST;
+ goto out;
+ }
+
+ rule->port_mask |= BIT(port);
+
+ /* Make the policers for traffic class @tc of all ports attached to
+ * this block point to the newly allocated policer
+ */
+ for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
+ int index = (p * SJA1105_NUM_TC) + tc;
+
+ policing[index].sharindx = rule->tc_pol.sharindx;
+ }
+
+ policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
+ 512, 1000000);
+ policing[rule->tc_pol.sharindx].smax = div_u64(rate_bytes_per_sec *
+ PSCHED_NS2TICKS(burst),
+ PSCHED_TICKS_PER_SEC);
+ /* TODO: support per-flow MTU */
+ policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
+ ETH_FCS_LEN;
+
+ rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+
+out:
+ if (rc == 0 && new_rule) {
+ priv->flow_block.l2_policer_used[rule->tc_pol.sharindx] = true;
+ list_add(&rule->list, &priv->flow_block.rules);
+ } else if (new_rule) {
+ kfree(rule);
+ }
+
+ return rc;
+}
+
+static int sja1105_flower_parse_policer(struct sja1105_private *priv, int port,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ u64 rate_bytes_per_sec,
+ s64 burst)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ if (match.key->n_proto) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on protocol not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ u8 bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 null[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!ether_addr_equal_masked(match.key->src, null,
+ match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on source MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ether_addr_equal_masked(match.key->dst, bcast,
+ match.mask->dst)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only matching on broadcast DMAC is supported");
+ return -EOPNOTSUPP;
+ }
+
+ return sja1105_setup_bcast_policer(priv, extack, cls->cookie,
+ port, rate_bytes_per_sec,
+ burst);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (match.key->vlan_id & match.mask->vlan_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on VID is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->vlan_priority != 0x7) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Masked matching on PCP is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ return sja1105_setup_tc_policer(priv, extack, cls->cookie, port,
+ match.key->vlan_priority,
+ rate_bytes_per_sec,
+ burst);
+ }
+
+ NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key");
+ return -EOPNOTSUPP;
+}
+
+int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct sja1105_private *priv = ds->priv;
+ const struct flow_action_entry *act;
+ int rc = -EOPNOTSUPP, i;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ rc = sja1105_flower_parse_policer(priv, port, extack, cls,
+ act->police.rate_bytes_ps,
+ act->police.burst);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Action not supported");
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
+ struct sja1105_l2_policing_entry *policing;
+ int old_sharindx;
+
+ if (!rule)
+ return 0;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (rule->type == SJA1105_RULE_BCAST_POLICER) {
+ int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
+
+ old_sharindx = policing[bcast].sharindx;
+ policing[bcast].sharindx = port;
+ } else if (rule->type == SJA1105_RULE_TC_POLICER) {
+ int index = (port * SJA1105_NUM_TC) + rule->tc_pol.tc;
+
+ old_sharindx = policing[index].sharindx;
+ policing[index].sharindx = port;
+ } else {
+ return -EINVAL;
+ }
+
+ rule->port_mask &= ~BIT(port);
+ if (!rule->port_mask) {
+ priv->flow_block.l2_policer_used[old_sharindx] = false;
+ list_del(&rule->list);
+ kfree(rule);
+ }
+
+ return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+void sja1105_flower_setup(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ int port;
+
+ INIT_LIST_HEAD(&priv->flow_block.rules);
+
+ for (port = 0; port < SJA1105_NUM_PORTS; port++)
+ priv->flow_block.l2_policer_used[port] = true;
+}
+
+void sja1105_flower_teardown(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_rule *rule;
+ struct list_head *pos, *n;
+
+ list_for_each_safe(pos, n, &priv->flow_block.rules) {
+ rule = list_entry(pos, struct sja1105_rule, list);
+ list_del(&rule->list);
+ kfree(rule);
+ }
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 7edea5741a5f..472f4eb20c49 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -22,6 +22,7 @@
#include <linux/if_ether.h>
#include <linux/dsa/8021q.h>
#include "sja1105.h"
+#include "sja1105_sgmii.h"
#include "sja1105_tas.h"
static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
@@ -135,6 +136,21 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
return 0;
}
+static bool sja1105_supports_sgmii(struct sja1105_private *priv, int port)
+{
+ if (priv->info->part_no != SJA1105R_PART_NO &&
+ priv->info->part_no != SJA1105S_PART_NO)
+ return false;
+
+ if (port != SJA1105_SGMII_PORT)
+ return false;
+
+ if (dsa_is_unused_port(priv->ds, port))
+ return false;
+
+ return true;
+}
+
static int sja1105_init_mii_settings(struct sja1105_private *priv,
struct sja1105_dt_port *ports)
{
@@ -162,6 +178,9 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
mii = table->entries;
for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ if (dsa_is_unused_port(priv->ds, i))
+ continue;
+
switch (ports[i].phy_mode) {
case PHY_INTERFACE_MODE_MII:
mii->xmii_mode[i] = XMII_MODE_MII;
@@ -175,12 +194,24 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
case PHY_INTERFACE_MODE_RGMII_TXID:
mii->xmii_mode[i] = XMII_MODE_RGMII;
break;
+ case PHY_INTERFACE_MODE_SGMII:
+ if (!sja1105_supports_sgmii(priv, i))
+ return -EINVAL;
+ mii->xmii_mode[i] = XMII_MODE_SGMII;
+ break;
default:
dev_err(dev, "Unsupported PHY mode %s!\n",
phy_modes(ports[i].phy_mode));
}
- mii->phy_mac[i] = ports[i].role;
+ /* Even though the SerDes port is able to drive SGMII autoneg
+ * like a PHY would, from the perspective of the XMII tables,
+ * the SGMII port should always be put in MAC mode.
+ */
+ if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII)
+ mii->phy_mac[i] = XMII_MAC;
+ else
+ mii->phy_mac[i] = ports[i].role;
}
return 0;
}
@@ -448,23 +479,93 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
return 0;
}
-#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
-
-static void sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
- int index)
+static int sja1105_init_avb_params(struct sja1105_private *priv)
{
- policing[index].sharindx = index;
- policing[index].smax = 65535; /* Burst size in bytes */
- policing[index].rate = SJA1105_RATE_MBPS(1000);
- policing[index].maxlen = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
- policing[index].partition = 0;
+ struct sja1105_avb_params_entry *avb;
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
+
+ /* Discard previous AVB Parameters Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
+
+ avb = table->entries;
+
+ /* Configure the MAC addresses for meta frames */
+ avb->destmeta = SJA1105_META_DMAC;
+ avb->srcmeta = SJA1105_META_SMAC;
+ /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by
+ * default. This is because there might be boards with a hardware
+ * layout where enabling the pin as output might cause an electrical
+ * clash. On E/T the pin is always an output, which the board designers
+ * probably already knew, so even if there are going to be electrical
+ * issues, there's nothing we can do.
+ */
+ avb->cas_master = false;
+
+ return 0;
}
+/* The L2 policing table is 2-stage. The table is looked up for each frame
+ * according to the ingress port, whether it was broadcast or not, and the
+ * classified traffic class (given by VLAN PCP). This portion of the lookup is
+ * fixed, and gives access to the SHARINDX, an indirection register pointing
+ * within the policing table itself, which is used to resolve the policer that
+ * will be used for this frame.
+ *
+ * Stage 1 Stage 2
+ * +------------+--------+ +---------------------------------+
+ * |Port 0 TC 0 |SHARINDX| | Policer 0: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 0 TC 1 |SHARINDX| | Policer 1: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * ... | Policer 2: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 0 TC 7 |SHARINDX| | Policer 3: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 1 TC 0 |SHARINDX| | Policer 4: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * ... | Policer 5: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 1 TC 7 |SHARINDX| | Policer 6: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * ... | Policer 7: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ * |Port 4 TC 7 |SHARINDX| ...
+ * +------------+--------+
+ * |Port 0 BCAST|SHARINDX| ...
+ * +------------+--------+
+ * |Port 1 BCAST|SHARINDX| ...
+ * +------------+--------+
+ * ... ...
+ * +------------+--------+ +---------------------------------+
+ * |Port 4 BCAST|SHARINDX| | Policer 44: Rate, Burst, MTU |
+ * +------------+--------+ +---------------------------------+
+ *
+ * In this driver, we shall use policers 0-4 as statically alocated port
+ * (matchall) policers. So we need to make the SHARINDX for all lookups
+ * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast
+ * lookup) equal.
+ * The remaining policers (40) shall be dynamically allocated for flower
+ * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff.
+ */
+#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
+
static int sja1105_init_l2_policing(struct sja1105_private *priv)
{
struct sja1105_l2_policing_entry *policing;
struct sja1105_table *table;
- int i, j, k;
+ int port, tc;
table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
@@ -483,18 +584,29 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
policing = table->entries;
- /* k sweeps through all unicast policers (0-39).
- * bcast sweeps through policers 40-44.
- */
- for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) {
- int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i;
+ /* Setup shared indices for the matchall policers */
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
- for (j = 0; j < SJA1105_NUM_TC; j++, k++)
- sja1105_setup_policer(policing, k);
+ for (tc = 0; tc < SJA1105_NUM_TC; tc++)
+ policing[port * SJA1105_NUM_TC + tc].sharindx = port;
- /* Set up this port's policer for broadcast traffic */
- sja1105_setup_policer(policing, bcast);
+ policing[bcast].sharindx = port;
}
+
+ /* Setup the matchall policer parameters */
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(priv->ds, port))
+ mtu += VLAN_HLEN;
+
+ policing[port].smax = 65535; /* Burst size in bytes */
+ policing[port].rate = SJA1105_RATE_MBPS(1000);
+ policing[port].maxlen = mtu;
+ policing[port].partition = 0;
+ }
+
return 0;
}
@@ -538,6 +650,9 @@ static int sja1105_static_config_load(struct sja1105_private *priv,
rc = sja1105_init_general_params(priv);
if (rc < 0)
return rc;
+ rc = sja1105_init_avb_params(priv);
+ if (rc < 0)
+ return rc;
/* Send initial configuration to hardware via SPI */
return sja1105_static_config_upload(priv);
@@ -647,6 +762,85 @@ static int sja1105_parse_dt(struct sja1105_private *priv,
return rc;
}
+static int sja1105_sgmii_read(struct sja1105_private *priv, int pcs_reg)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 val;
+ int rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->sgmii + pcs_reg, &val,
+ NULL);
+ if (rc < 0)
+ return rc;
+
+ return val;
+}
+
+static int sja1105_sgmii_write(struct sja1105_private *priv, int pcs_reg,
+ u16 pcs_val)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 val = pcs_val;
+ int rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->sgmii + pcs_reg, &val,
+ NULL);
+ if (rc < 0)
+ return rc;
+
+ return val;
+}
+
+static void sja1105_sgmii_pcs_config(struct sja1105_private *priv,
+ bool an_enabled, bool an_master)
+{
+ u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII;
+
+ /* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to
+ * stop the clock during LPI mode, make the MAC reconfigure
+ * autonomously after PCS autoneg is done, flush the internal FIFOs.
+ */
+ sja1105_sgmii_write(priv, SJA1105_DC1, SJA1105_DC1_EN_VSMMD1 |
+ SJA1105_DC1_CLOCK_STOP_EN |
+ SJA1105_DC1_MAC_AUTO_SW |
+ SJA1105_DC1_INIT);
+ /* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */
+ sja1105_sgmii_write(priv, SJA1105_DC2, SJA1105_DC2_TX_POL_INV_DISABLE);
+ /* AUTONEG_CONTROL: Use SGMII autoneg */
+ if (an_master)
+ ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK;
+ sja1105_sgmii_write(priv, SJA1105_AC, ac);
+ /* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise,
+ * sja1105_sgmii_pcs_force_speed must be called later for the link
+ * to become operational.
+ */
+ if (an_enabled)
+ sja1105_sgmii_write(priv, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART);
+}
+
+static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv,
+ int speed)
+{
+ int pcs_speed;
+
+ switch (speed) {
+ case SPEED_1000:
+ pcs_speed = BMCR_SPEED1000;
+ break;
+ case SPEED_100:
+ pcs_speed = BMCR_SPEED100;
+ break;
+ case SPEED_10:
+ pcs_speed = BMCR_SPEED10;
+ break;
+ default:
+ dev_err(priv->ds->dev, "Invalid speed %d\n", speed);
+ return;
+ }
+ sja1105_sgmii_write(priv, MII_BMCR, pcs_speed | BMCR_FULLDPLX);
+}
+
/* Convert link speed from SJA1105 to ethtool encoding */
static int sja1105_speed[] = {
[SJA1105_SPEED_AUTO] = SPEED_UNKNOWN,
@@ -704,8 +898,13 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
* table, since this will be used for the clocking setup, and we no
* longer need to store it in the static config (already told hardware
* we want auto during upload phase).
+ * Actually for the SGMII port, the MAC is fixed at 1 Gbps and
+ * we need to configure the PCS only (if even that).
*/
- mac[port].speed = speed;
+ if (sja1105_supports_sgmii(priv, port))
+ mac[port].speed = SJA1105_SPEED_1000MBPS;
+ else
+ mac[port].speed = speed;
/* Write to the dynamic reconfiguration tables */
rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
@@ -754,26 +953,34 @@ static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
return (phy_mode != XMII_MODE_RGMII);
+ case PHY_INTERFACE_MODE_SGMII:
+ return (phy_mode != XMII_MODE_SGMII);
default:
return true;
}
}
static void sja1105_mac_config(struct dsa_switch *ds, int port,
- unsigned int link_an_mode,
+ unsigned int mode,
const struct phylink_link_state *state)
{
struct sja1105_private *priv = ds->priv;
+ bool is_sgmii = sja1105_supports_sgmii(priv, port);
- if (sja1105_phy_mode_mismatch(priv, port, state->interface))
+ if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
+ dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
+ phy_modes(state->interface));
return;
+ }
- if (link_an_mode == MLO_AN_INBAND) {
+ if (phylink_autoneg_inband(mode) && !is_sgmii) {
dev_err(ds->dev, "In-band AN not supported!\n");
return;
}
- sja1105_adjust_port_config(priv, port, state->speed);
+ if (is_sgmii)
+ sja1105_sgmii_pcs_config(priv, phylink_autoneg_inband(mode),
+ false);
}
static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
@@ -786,9 +993,18 @@ static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev)
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
- sja1105_inhibit_tx(ds->priv, BIT(port), false);
+ struct sja1105_private *priv = ds->priv;
+
+ sja1105_adjust_port_config(priv, port, speed);
+
+ if (sja1105_supports_sgmii(priv, port) && !phylink_autoneg_inband(mode))
+ sja1105_sgmii_pcs_force_speed(priv, speed);
+
+ sja1105_inhibit_tx(priv, BIT(port), false);
}
static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
@@ -822,7 +1038,9 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, MII);
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Full);
- if (mii->xmii_mode[port] == XMII_MODE_RGMII)
+ phylink_set(mask, 100baseT1_Full);
+ if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
+ mii->xmii_mode[port] == XMII_MODE_SGMII)
phylink_set(mask, 1000baseT_Full);
bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -830,6 +1048,38 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
+static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct sja1105_private *priv = ds->priv;
+ int ais;
+
+ /* Read the vendor-specific AUTONEG_INTR_STATUS register */
+ ais = sja1105_sgmii_read(priv, SJA1105_AIS);
+ if (ais < 0)
+ return ais;
+
+ switch (SJA1105_AIS_SPEED(ais)) {
+ case 0:
+ state->speed = SPEED_10;
+ break;
+ case 1:
+ state->speed = SPEED_100;
+ break;
+ case 2:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n",
+ SJA1105_AIS_SPEED(ais));
+ }
+ state->duplex = SJA1105_AIS_DUPLEX_MODE(ais);
+ state->an_complete = SJA1105_AIS_COMPLETE(ais);
+ state->link = SJA1105_AIS_LINK_STATUS(ais);
+
+ return 0;
+}
+
static int
sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
const struct sja1105_l2_lookup_entry *requested)
@@ -1338,6 +1588,7 @@ static const char * const sja1105_reset_reasons[] = {
[SJA1105_RX_HWTSTAMPING] = "RX timestamping",
[SJA1105_AGEING_TIME] = "Ageing time",
[SJA1105_SCHEDULING] = "Time-aware scheduling",
+ [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
};
/* For situations where we need to change a setting at runtime that is only
@@ -1356,6 +1607,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
struct dsa_switch *ds = priv->ds;
s64 t1, t2, t3, t4;
s64 t12, t34;
+ u16 bmcr = 0;
int rc, i;
s64 now;
@@ -1373,6 +1625,9 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
mac[i].speed = SJA1105_SPEED_AUTO;
}
+ if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT))
+ bmcr = sja1105_sgmii_read(priv, MII_BMCR);
+
/* No PTP operations can run right now */
mutex_lock(&priv->ptp_data.lock);
@@ -1422,6 +1677,25 @@ out_unlock_ptp:
if (rc < 0)
goto out;
}
+
+ if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) {
+ bool an_enabled = !!(bmcr & BMCR_ANENABLE);
+
+ sja1105_sgmii_pcs_config(priv, an_enabled, false);
+
+ if (!an_enabled) {
+ int speed = SPEED_UNKNOWN;
+
+ if (bmcr & BMCR_SPEED1000)
+ speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ speed = SPEED_100;
+ else if (bmcr & BMCR_SPEED10)
+ speed = SPEED_10;
+
+ sja1105_sgmii_pcs_force_speed(priv, speed);
+ }
+ }
out:
mutex_unlock(&priv->mgmt_lock);
@@ -1723,6 +1997,8 @@ static int sja1105_setup(struct dsa_switch *ds)
/* Advertise the 8 egress queues */
ds->num_tx_queues = SJA1105_NUM_TC;
+ ds->mtu_enforcement_ingress = true;
+
/* The DSA/switchdev model brings up switch ports in standalone mode by
* default, and that means vlan_filtering is 0 since they're not under
* a bridge, so it's safe to set up switch tagging at this time.
@@ -1745,6 +2021,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
kthread_destroy_worker(sp->xmit_worker);
}
+ sja1105_flower_teardown(ds);
sja1105_tas_teardown(ds);
sja1105_ptp_clock_unregister(ds);
sja1105_static_config_free(&priv->static_config);
@@ -1891,6 +2168,31 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds,
return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
}
+static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_private *priv = ds->priv;
+
+ new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(ds, port))
+ new_mtu += VLAN_HLEN;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ if (policing[port].maxlen == new_mtu)
+ return 0;
+
+ policing[port].maxlen = new_mtu;
+
+ return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+static int sja1105_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
@@ -1981,12 +2283,49 @@ static void sja1105_mirror_del(struct dsa_switch *ds, int port,
mirror->ingress, false);
}
+static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_policer_tc_entry *policer)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_private *priv = ds->priv;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ /* In hardware, every 8 microseconds the credit level is incremented by
+ * the value of RATE bytes divided by 64, up to a maximum of SMAX
+ * bytes.
+ */
+ policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec,
+ 1000000);
+ policing[port].smax = div_u64(policer->rate_bytes_per_sec *
+ PSCHED_NS2TICKS(policer->burst),
+ PSCHED_TICKS_PER_SEC);
+
+ return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
+static void sja1105_port_policer_del(struct dsa_switch *ds, int port)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_private *priv = ds->priv;
+
+ policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
+
+ policing[port].rate = SJA1105_RATE_MBPS(1000);
+ policing[port].smax = 65535;
+
+ sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
+}
+
static const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol,
.setup = sja1105_setup,
.teardown = sja1105_teardown,
.set_ageing_time = sja1105_set_ageing_time,
+ .port_change_mtu = sja1105_change_mtu,
+ .port_max_mtu = sja1105_get_max_mtu,
.phylink_validate = sja1105_phylink_validate,
+ .phylink_mac_link_state = sja1105_mac_pcs_get_state,
.phylink_mac_config = sja1105_mac_config,
.phylink_mac_link_up = sja1105_mac_link_up,
.phylink_mac_link_down = sja1105_mac_link_down,
@@ -2016,6 +2355,10 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_setup_tc = sja1105_port_setup_tc,
.port_mirror_add = sja1105_mirror_add,
.port_mirror_del = sja1105_mirror_del,
+ .port_policer_add = sja1105_port_policer_add,
+ .port_policer_del = sja1105_port_policer_del,
+ .cls_flower_add = sja1105_cls_flower_add,
+ .cls_flower_del = sja1105_cls_flower_del,
};
static int sja1105_check_device_id(struct sja1105_private *priv)
@@ -2119,6 +2462,7 @@ static int sja1105_probe(struct spi_device *spi)
mutex_init(&priv->mgmt_lock);
sja1105_tas_setup(ds);
+ sja1105_flower_setup(ds);
rc = dsa_register_switch(priv->ds);
if (rc)
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index a836fc38c4a4..a22f8e3fc06b 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -14,6 +14,17 @@
#define SJA1105_MAX_ADJ_PPB 32000000
#define SJA1105_SIZE_PTP_CMD 4
+/* PTPSYNCTS has no interrupt or update mechanism, because the intended
+ * hardware use case is for the timestamp to be collected synchronously,
+ * immediately after the CAS_MASTER SJA1105 switch has triggered a CASSYNC
+ * pulse on the PTP_CLK pin. When used as a generic extts source, it needs
+ * polling and a comparison with the old value. The polling interval is just
+ * the Nyquist rate of a canonical PPS input (e.g. from a GPS module).
+ * Anything of higher frequency than 1 Hz will be lost, since there is no
+ * timestamp FIFO.
+ */
+#define SJA1105_EXTTS_INTERVAL (HZ / 2)
+
/* This range is actually +/- SJA1105_MAX_ADJ_PPB
* divided by 1000 (ppb -> ppm) and with a 16-bit
* "fractional" part (actually fixed point).
@@ -39,44 +50,13 @@ enum sja1105_ptp_clk_mode {
PTP_SET_MODE = 0,
};
+#define extts_to_data(d) \
+ container_of((d), struct sja1105_ptp_data, extts_work)
#define ptp_caps_to_data(d) \
container_of((d), struct sja1105_ptp_data, caps)
#define ptp_data_to_sja1105(d) \
container_of((d), struct sja1105_private, ptp_data)
-static int sja1105_init_avb_params(struct sja1105_private *priv,
- bool on)
-{
- struct sja1105_avb_params_entry *avb;
- struct sja1105_table *table;
-
- table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
-
- /* Discard previous AVB Parameters Table */
- if (table->entry_count) {
- kfree(table->entries);
- table->entry_count = 0;
- }
-
- /* Configure the reception of meta frames only if requested */
- if (!on)
- return 0;
-
- table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
- table->ops->unpacked_entry_size, GFP_KERNEL);
- if (!table->entries)
- return -ENOMEM;
-
- table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
-
- avb = table->entries;
-
- avb->destmeta = SJA1105_META_DMAC;
- avb->srcmeta = SJA1105_META_SMAC;
-
- return 0;
-}
-
/* Must be called only with priv->tagger_data.state bit
* SJA1105_HWTS_RX_EN cleared
*/
@@ -86,17 +66,12 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct sja1105_general_params_entry *general_params;
struct sja1105_table *table;
- int rc;
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
general_params->send_meta1 = on;
general_params->send_meta0 = on;
- rc = sja1105_init_avb_params(priv, on);
- if (rc < 0)
- return rc;
-
/* Initialize the meta state machine to a known state */
if (priv->tagger_data.stampable_skb) {
kfree_skb(priv->tagger_data.stampable_skb);
@@ -206,6 +181,8 @@ void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
sja1105_packing(buf, &valid, 31, 31, size, op);
sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->startptpcp, 28, 28, size, op);
+ sja1105_packing(buf, &cmd->stopptpcp, 27, 27, size, op);
sja1105_packing(buf, &cmd->resptp, 2, 2, size, op);
sja1105_packing(buf, &cmd->corrclk4ts, 1, 1, size, op);
sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
@@ -221,6 +198,8 @@ void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
sja1105_packing(buf, &valid, 31, 31, size, op);
sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op);
sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op);
+ sja1105_packing(buf, &cmd->startptpcp, 28, 28, size, op);
+ sja1105_packing(buf, &cmd->stopptpcp, 27, 27, size, op);
sja1105_packing(buf, &cmd->resptp, 3, 3, size, op);
sja1105_packing(buf, &cmd->corrclk4ts, 2, 2, size, op);
sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op);
@@ -615,6 +594,227 @@ static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return rc;
}
+static void sja1105_ptp_extts_work(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct sja1105_ptp_data *ptp_data = extts_to_data(dw);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct ptp_clock_event event;
+ u64 ptpsyncts = 0;
+ int rc;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = sja1105_xfer_u64(priv, SPI_READ, regs->ptpsyncts, &ptpsyncts,
+ NULL);
+ if (rc < 0)
+ dev_err_ratelimited(priv->ds->dev,
+ "Failed to read PTPSYNCTS: %d\n", rc);
+
+ if (ptpsyncts && ptp_data->ptpsyncts != ptpsyncts) {
+ event.index = 0;
+ event.type = PTP_CLOCK_EXTTS;
+ event.timestamp = ns_to_ktime(sja1105_ticks_to_ns(ptpsyncts));
+ ptp_clock_event(ptp_data->clock, &event);
+
+ ptp_data->ptpsyncts = ptpsyncts;
+ }
+
+ mutex_unlock(&ptp_data->lock);
+
+ schedule_delayed_work(&ptp_data->extts_work, SJA1105_EXTTS_INTERVAL);
+}
+
+static int sja1105_change_ptp_clk_pin_func(struct sja1105_private *priv,
+ enum ptp_pin_function func)
+{
+ struct sja1105_avb_params_entry *avb;
+ enum ptp_pin_function old_func;
+
+ avb = priv->static_config.tables[BLK_IDX_AVB_PARAMS].entries;
+
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID ||
+ avb->cas_master)
+ old_func = PTP_PF_PEROUT;
+ else
+ old_func = PTP_PF_EXTTS;
+
+ if (func == old_func)
+ return 0;
+
+ avb->cas_master = (func == PTP_PF_PEROUT);
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_AVB_PARAMS, 0, avb,
+ true);
+}
+
+/* The PTP_CLK pin may be configured to toggle with a 50% duty cycle and a
+ * frequency f:
+ *
+ * NSEC_PER_SEC
+ * f = ----------------------
+ * (PTPPINDUR * 8 ns) * 2
+ */
+static int sja1105_per_out_enable(struct sja1105_private *priv,
+ struct ptp_perout_request *perout,
+ bool on)
+{
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_ptp_cmd cmd = ptp_data->cmd;
+ int rc;
+
+ /* We only support one channel */
+ if (perout->index != 0)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (perout->flags)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptp_data->lock);
+
+ rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_PEROUT);
+ if (rc)
+ goto out;
+
+ if (on) {
+ struct timespec64 pin_duration_ts = {
+ .tv_sec = perout->period.sec,
+ .tv_nsec = perout->period.nsec,
+ };
+ struct timespec64 pin_start_ts = {
+ .tv_sec = perout->start.sec,
+ .tv_nsec = perout->start.nsec,
+ };
+ u64 pin_duration = timespec64_to_ns(&pin_duration_ts);
+ u64 pin_start = timespec64_to_ns(&pin_start_ts);
+ u32 pin_duration32;
+ u64 now;
+
+ /* ptppindur: 32 bit register which holds the interval between
+ * 2 edges on PTP_CLK. So check for truncation which happens
+ * at periods larger than around 68.7 seconds.
+ */
+ pin_duration = ns_to_sja1105_ticks(pin_duration / 2);
+ if (pin_duration > U32_MAX) {
+ rc = -ERANGE;
+ goto out;
+ }
+ pin_duration32 = pin_duration;
+
+ /* ptppins: 64 bit register which needs to hold a PTP time
+ * larger than the current time, otherwise the startptpcp
+ * command won't do anything. So advance the current time
+ * by a number of periods in a way that won't alter the
+ * phase offset.
+ */
+ rc = __sja1105_ptp_gettimex(priv->ds, &now, NULL);
+ if (rc < 0)
+ goto out;
+
+ pin_start = future_base_time(pin_start, pin_duration,
+ now + 1ull * NSEC_PER_SEC);
+ pin_start = ns_to_sja1105_ticks(pin_start);
+
+ rc = sja1105_xfer_u64(priv, SPI_WRITE, regs->ptppinst,
+ &pin_start, NULL);
+ if (rc < 0)
+ goto out;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->ptppindur,
+ &pin_duration32, NULL);
+ if (rc < 0)
+ goto out;
+ }
+
+ if (on)
+ cmd.startptpcp = true;
+ else
+ cmd.stopptpcp = true;
+
+ rc = sja1105_ptp_commit(priv->ds, &cmd, SPI_WRITE);
+
+out:
+ mutex_unlock(&ptp_data->lock);
+
+ return rc;
+}
+
+static int sja1105_extts_enable(struct sja1105_private *priv,
+ struct ptp_extts_request *extts,
+ bool on)
+{
+ int rc;
+
+ /* We only support one channel */
+ if (extts->index != 0)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (extts->flags)
+ return -EOPNOTSUPP;
+
+ rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_EXTTS);
+ if (rc)
+ return rc;
+
+ if (on)
+ schedule_delayed_work(&priv->ptp_data.extts_work,
+ SJA1105_EXTTS_INTERVAL);
+ else
+ cancel_delayed_work_sync(&priv->ptp_data.extts_work);
+
+ return 0;
+}
+
+static int sja1105_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *req, int on)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+ int rc = -EOPNOTSUPP;
+
+ if (req->type == PTP_CLK_REQ_PEROUT)
+ rc = sja1105_per_out_enable(priv, &req->perout, on);
+ else if (req->type == PTP_CLK_REQ_EXTTS)
+ rc = sja1105_extts_enable(priv, &req->extts, on);
+
+ return rc;
+}
+
+static int sja1105_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data);
+
+ if (chan != 0 || pin != 0)
+ return -1;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ case PTP_PF_EXTTS:
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ return -1;
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+static struct ptp_pin_desc sja1105_ptp_pin = {
+ .name = "ptp_clk",
+ .index = 0,
+ .func = PTP_PF_NONE,
+};
+
int sja1105_ptp_clock_register(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
@@ -628,8 +828,14 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
.adjtime = sja1105_ptp_adjtime,
.gettimex64 = sja1105_ptp_gettimex,
.settime64 = sja1105_ptp_settime,
+ .enable = sja1105_ptp_enable,
+ .verify = sja1105_ptp_verify_pin,
.do_aux_work = sja1105_rxtstamp_work,
.max_adj = SJA1105_MAX_ADJ_PPB,
+ .pin_config = &sja1105_ptp_pin,
+ .n_pins = 1,
+ .n_ext_ts = 1,
+ .n_per_out = 1,
};
skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
@@ -642,6 +848,8 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
ptp_data->cmd.corrclk4ts = true;
ptp_data->cmd.ptpclkadd = PTP_SET_MODE;
+ INIT_DELAYED_WORK(&ptp_data->extts_work, sja1105_ptp_extts_work);
+
return sja1105_ptp_reset(ds);
}
@@ -653,6 +861,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
if (IS_ERR_OR_NULL(ptp_data->clock))
return;
+ cancel_delayed_work_sync(&ptp_data->extts_work);
ptp_cancel_worker_sync(ptp_data->clock);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
ptp_clock_unregister(ptp_data->clock);
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 6f4a19eec709..43480b24f1f0 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -21,7 +21,36 @@ static inline s64 sja1105_ticks_to_ns(s64 ticks)
return ticks * SJA1105_TICK_NS;
}
+/* Calculate the first base_time in the future that satisfies this
+ * relationship:
+ *
+ * future_base_time = base_time + N x cycle_time >= now, or
+ *
+ * now - base_time
+ * N >= ---------------
+ * cycle_time
+ *
+ * Because N is an integer, the ceiling value of the above "a / b" ratio
+ * is in fact precisely the floor value of "(a + b - 1) / b", which is
+ * easier to calculate only having integer division tools.
+ */
+static inline s64 future_base_time(s64 base_time, s64 cycle_time, s64 now)
+{
+ s64 a, b, n;
+
+ if (base_time >= now)
+ return base_time;
+
+ a = now - base_time;
+ b = cycle_time;
+ n = div_s64(a + b - 1, b);
+
+ return base_time + n * cycle_time;
+}
+
struct sja1105_ptp_cmd {
+ u64 startptpcp; /* start toggling PTP_CLK pin */
+ u64 stopptpcp; /* stop toggling PTP_CLK pin */
u64 ptpstrtsch; /* start schedule */
u64 ptpstopsch; /* stop schedule */
u64 resptp; /* reset */
@@ -30,12 +59,14 @@ struct sja1105_ptp_cmd {
};
struct sja1105_ptp_data {
+ struct delayed_work extts_work;
struct sk_buff_head skb_rxtstamp_queue;
struct ptp_clock_info caps;
struct ptp_clock *clock;
struct sja1105_ptp_cmd cmd;
/* Serializes all operations on the PTP hardware clock */
struct mutex lock;
+ u64 ptpsyncts;
};
int sja1105_ptp_clock_register(struct dsa_switch *ds);
diff --git a/drivers/net/dsa/sja1105/sja1105_sgmii.h b/drivers/net/dsa/sja1105/sja1105_sgmii.h
new file mode 100644
index 000000000000..24d9bc046e70
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_sgmii.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright 2020, NXP Semiconductors
+ */
+#ifndef _SJA1105_SGMII_H
+#define _SJA1105_SGMII_H
+
+#define SJA1105_SGMII_PORT 4
+
+/* DIGITAL_CONTROL_1 (address 1f8000h) */
+#define SJA1105_DC1 0x8000
+#define SJA1105_DC1_VS_RESET BIT(15)
+#define SJA1105_DC1_REMOTE_LOOPBACK BIT(14)
+#define SJA1105_DC1_EN_VSMMD1 BIT(13)
+#define SJA1105_DC1_POWER_SAVE BIT(11)
+#define SJA1105_DC1_CLOCK_STOP_EN BIT(10)
+#define SJA1105_DC1_MAC_AUTO_SW BIT(9)
+#define SJA1105_DC1_INIT BIT(8)
+#define SJA1105_DC1_TX_DISABLE BIT(4)
+#define SJA1105_DC1_AUTONEG_TIMER_OVRR BIT(3)
+#define SJA1105_DC1_BYP_POWERUP BIT(1)
+#define SJA1105_DC1_PHY_MODE_CONTROL BIT(0)
+
+/* DIGITAL_CONTROL_2 register (address 1f80E1h) */
+#define SJA1105_DC2 0x80e1
+#define SJA1105_DC2_TX_POL_INV_DISABLE BIT(4)
+#define SJA1105_DC2_RX_POL_INV BIT(0)
+
+/* DIGITAL_ERROR_CNT register (address 1f80E2h) */
+#define SJA1105_DEC 0x80e2
+#define SJA1105_DEC_ICG_EC_ENA BIT(4)
+#define SJA1105_DEC_CLEAR_ON_READ BIT(0)
+
+/* AUTONEG_CONTROL register (address 1f8001h) */
+#define SJA1105_AC 0x8001
+#define SJA1105_AC_MII_CONTROL BIT(8)
+#define SJA1105_AC_SGMII_LINK BIT(4)
+#define SJA1105_AC_PHY_MODE BIT(3)
+#define SJA1105_AC_AUTONEG_MODE(x) (((x) << 1) & GENMASK(2, 1))
+#define SJA1105_AC_AUTONEG_MODE_SGMII SJA1105_AC_AUTONEG_MODE(2)
+
+/* AUTONEG_INTR_STATUS register (address 1f8002h) */
+#define SJA1105_AIS 0x8002
+#define SJA1105_AIS_LINK_STATUS(x) (!!((x) & BIT(4)))
+#define SJA1105_AIS_SPEED(x) (((x) & GENMASK(3, 2)) >> 2)
+#define SJA1105_AIS_DUPLEX_MODE(x) (!!((x) & BIT(1)))
+#define SJA1105_AIS_COMPLETE(x) (!!((x) & BIT(0)))
+
+/* DEBUG_CONTROL register (address 1f8005h) */
+#define SJA1105_DC 0x8005
+#define SJA1105_DC_SUPPRESS_LOS BIT(4)
+#define SJA1105_DC_RESTART_SYNC BIT(0)
+
+#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index 29b127f3bf9c..04bdb72ae6b6 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -458,6 +458,8 @@ static struct sja1105_regs sja1105et_regs = {
.rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
.ptpegr_ts = {0xC0, 0xC2, 0xC4, 0xC6, 0xC8},
.ptpschtm = 0x12, /* Spans 0x12 to 0x13 */
+ .ptppinst = 0x14,
+ .ptppindur = 0x16,
.ptp_control = 0x17,
.ptpclkval = 0x18, /* Spans 0x18 to 0x19 */
.ptpclkrate = 0x1A,
@@ -474,11 +476,13 @@ static struct sja1105_regs sja1105pqrs_regs = {
/* UM10944.pdf, Table 86, ACU Register overview */
.pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
.pad_mii_id = {0x100810, 0x100811, 0x100812, 0x100813, 0x100814},
+ .sgmii = 0x1F0000,
.rmii_pll1 = 0x10000A,
.cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
.mac = {0x200, 0x202, 0x204, 0x206, 0x208},
.mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
.mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
+ .ether_stats = {0x1400, 0x1418, 0x1430, 0x1448, 0x1460},
/* UM11040.pdf, Table 114 */
.mii_tx_clk = {0x100013, 0x100019, 0x10001F, 0x100025, 0x10002B},
.mii_rx_clk = {0x100014, 0x10001A, 0x100020, 0x100026, 0x10002C},
@@ -490,10 +494,13 @@ static struct sja1105_regs sja1105pqrs_regs = {
.qlevel = {0x604, 0x614, 0x624, 0x634, 0x644},
.ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
.ptpschtm = 0x13, /* Spans 0x13 to 0x14 */
+ .ptppinst = 0x15,
+ .ptppindur = 0x17,
.ptp_control = 0x18,
.ptpclkval = 0x19,
.ptpclkrate = 0x1B,
.ptpclkcorp = 0x1E,
+ .ptpsyncts = 0x1F,
};
struct sja1105_info sja1105e_info = {
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index 63d2311817c4..bbfe034910a0 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -102,12 +102,13 @@ static size_t sja1105et_avb_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
-static size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
- enum packing_op op)
+size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
{
const size_t size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY;
struct sja1105_avb_params_entry *entry = entry_ptr;
+ sja1105_packing(buf, &entry->cas_master, 126, 126, size, op);
sja1105_packing(buf, &entry->destmeta, 125, 78, size, op);
sja1105_packing(buf, &entry->srcmeta, 77, 30, size, op);
return size;
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
index f4a5c5c04311..8afafb6aef12 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -230,6 +230,7 @@ struct sja1105_l2_policing_entry {
};
struct sja1105_avb_params_entry {
+ u64 cas_master;
u64 destmeta;
u64 srcmeta;
};
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
index fa6750d973d7..77e547b4cd89 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.c
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -28,33 +28,6 @@ static s64 sja1105_delta_to_ns(s64 delta)
return delta * 200;
}
-/* Calculate the first base_time in the future that satisfies this
- * relationship:
- *
- * future_base_time = base_time + N x cycle_time >= now, or
- *
- * now - base_time
- * N >= ---------------
- * cycle_time
- *
- * Because N is an integer, the ceiling value of the above "a / b" ratio
- * is in fact precisely the floor value of "(a + b - 1) / b", which is
- * easier to calculate only having integer division tools.
- */
-static s64 future_base_time(s64 base_time, s64 cycle_time, s64 now)
-{
- s64 a, b, n;
-
- if (base_time >= now)
- return base_time;
-
- a = now - base_time;
- b = cycle_time;
- n = div_s64(a + b - 1, b);
-
- return base_time + n * cycle_time;
-}
-
static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
{
struct sja1105_tas_data *tas_data = &priv->tas_data;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 6e21a2a5cf01..19ce4aa0973b 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -664,16 +664,6 @@ static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
VSC73XX_MAC_CFG_TX_EN |
VSC73XX_MAC_CFG_RX_EN);
- /* Max length, we can do up to 9.6 KiB, so allow that.
- * According to application not "VSC7398 Jumbo Frames" setting
- * up the MTU to 9.6 KB does not affect the performance on standard
- * frames, so just enable it. It is clear from the application note
- * that "9.6 kilobytes" == 9600 bytes.
- */
- vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
- port,
- VSC73XX_MAXLEN, 9600);
-
/* Flow control for the CPU port:
* Use a zero delay pause frame when pause condition is left
* Obey pause control frames
@@ -1030,6 +1020,24 @@ static void vsc73xx_get_ethtool_stats(struct dsa_switch *ds, int port,
}
}
+static int vsc73xx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ return vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAXLEN, new_mtu);
+}
+
+/* According to application not "VSC7398 Jumbo Frames" setting
+ * up the MTU to 9.6 KB does not affect the performance on standard
+ * frames. It is clear from the application note that
+ * "9.6 kilobytes" == 9600 bytes.
+ */
+static int vsc73xx_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ return 9600;
+}
+
static const struct dsa_switch_ops vsc73xx_ds_ops = {
.get_tag_protocol = vsc73xx_get_tag_protocol,
.setup = vsc73xx_setup,
@@ -1041,6 +1049,8 @@ static const struct dsa_switch_ops vsc73xx_ds_ops = {
.get_sset_count = vsc73xx_get_sset_count,
.port_enable = vsc73xx_port_enable,
.port_disable = vsc73xx_port_disable,
+ .port_change_mtu = vsc73xx_change_mtu,
+ .port_max_mtu = vsc73xx_get_max_mtu,
};
static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 3031a5fc5427..bab3a9bb5e6f 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -42,7 +42,6 @@
#include <linux/u64_stats_sync.h>
#define DRV_NAME "dummy"
-#define DRV_VERSION "1.0"
static int numdummies = 1;
@@ -104,7 +103,6 @@ static void dummy_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static const struct ethtool_ops dummy_ethtool_ops = {
@@ -212,4 +210,3 @@ module_init(dummy_init_module);
module_exit(dummy_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK(DRV_NAME);
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 8cafd06ff0c4..b762176a1406 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -60,8 +60,6 @@
*/
#define DRV_NAME "3c509"
-#define DRV_VERSION "1.20"
-#define DRV_RELDATE "04Feb2008"
/* A few values that may be tweaked. */
@@ -87,13 +85,12 @@
#include <linux/device.h>
#include <linux/eisa.h>
#include <linux/bitops.h>
+#include <linux/vermagic.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
-static char version[] = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
-
#ifdef EL3_DEBUG
static int el3_debug = EL3_DEBUG;
#else
@@ -547,8 +544,6 @@ static int el3_common_init(struct net_device *dev)
dev->name, dev->base_addr, if_names[(dev->if_port & 0x03)],
dev->dev_addr, dev->irq);
- if (el3_debug > 0)
- pr_info("%s", version);
return 0;
}
@@ -1143,7 +1138,6 @@ el3_netdev_set_ecmd(struct net_device *dev,
static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int el3_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 1e233e2f0a5a..90312fcd6319 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -22,12 +22,8 @@
*/
+#include <linux/vermagic.h>
#define DRV_NAME "3c515"
-#define DRV_VERSION "0.99t-ac"
-#define DRV_RELDATE "28-Oct-2002"
-
-static char *version =
-DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " becker@scyld.com and others\n";
#define CORKSCREW 1
@@ -84,7 +80,6 @@ static int max_interrupt_work = 20;
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("3Com 3c515 Corkscrew driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
/* "Knobs" for adjusting internal parameters. */
/* Put out somewhat more debugging messages. (0 - no msg, 1 minimal msgs). */
@@ -418,8 +413,6 @@ int init_module(void)
int found = 0;
if (debug >= 0)
corkscrew_debug = debug;
- if (corkscrew_debug)
- pr_debug("%s", version);
while (corkscrew_scan(-1))
found++;
return found ? 0 : -ENODEV;
@@ -429,16 +422,10 @@ int init_module(void)
struct net_device *tc515_probe(int unit)
{
struct net_device *dev = corkscrew_scan(unit);
- static int printed;
if (!dev)
return ERR_PTR(-ENODEV);
- if (corkscrew_debug > 0 && !printed) {
- printed = 1;
- pr_debug("%s", version);
- }
-
return dev;
}
#endif /* not MODULE */
@@ -1540,7 +1527,6 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "ISA 0x%lx",
dev->base_addr);
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index d47cde6c5f08..09816e84314d 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -23,7 +23,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "3c589_cs"
-#define DRV_VERSION "1.162-ac"
#include <linux/module.h>
#include <linux/kernel.h>
@@ -482,7 +481,6 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 14fce6658106..5ed33c2c4742 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -127,7 +127,6 @@ static const int multicast_filter_limit = 32;
#include "typhoon.h"
MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
-MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_NAME);
MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
@@ -311,7 +310,7 @@ enum state_values {
* cannot pass a read, so this forces current writes to post.
*/
#define typhoon_post_pci_writes(x) \
- do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
+ do { if (likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while (0)
/* We'll wait up to six seconds for a reset, and half a second normally.
*/
@@ -381,7 +380,7 @@ typhoon_reset(void __iomem *ioaddr, int wait_type)
int i, err = 0;
int timeout;
- if(wait_type == WaitNoSleep)
+ if (wait_type == WaitNoSleep)
timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
else
timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
@@ -394,13 +393,13 @@ typhoon_reset(void __iomem *ioaddr, int wait_type)
udelay(1);
iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
- if(wait_type != NoWait) {
- for(i = 0; i < timeout; i++) {
- if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
+ if (wait_type != NoWait) {
+ for (i = 0; i < timeout; i++) {
+ if (ioread32(ioaddr + TYPHOON_REG_STATUS) ==
TYPHOON_STATUS_WAITING_FOR_HOST)
goto out;
- if(wait_type == WaitSleep)
+ if (wait_type == WaitSleep)
schedule_timeout_uninterruptible(1);
else
udelay(TYPHOON_UDELAY);
@@ -423,7 +422,7 @@ out:
* which should be enough (I've see it work well at 100us, but still
* saw occasional problems.)
*/
- if(wait_type == WaitSleep)
+ if (wait_type == WaitSleep)
msleep(5);
else
udelay(500);
@@ -435,8 +434,8 @@ typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
{
int i, err = 0;
- for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
- if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
+ for (i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
+ if (ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
goto out;
udelay(TYPHOON_UDELAY);
}
@@ -450,7 +449,7 @@ out:
static inline void
typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
{
- if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
+ if (resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
netif_carrier_off(dev);
else
netif_carrier_on(dev);
@@ -466,7 +465,7 @@ typhoon_hello(struct typhoon *tp)
* card in a long while. If the lock is held, then we're in the
* process of issuing a command, so we don't need to respond.
*/
- if(spin_trylock(&tp->command_lock)) {
+ if (spin_trylock(&tp->command_lock)) {
cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
typhoon_inc_cmd_index(&ring->lastWrite, 1);
@@ -490,32 +489,32 @@ typhoon_process_response(struct typhoon *tp, int resp_size,
cleared = le32_to_cpu(indexes->respCleared);
ready = le32_to_cpu(indexes->respReady);
- while(cleared != ready) {
+ while (cleared != ready) {
resp = (struct resp_desc *)(base + cleared);
count = resp->numDesc + 1;
- if(resp_save && resp->seqNo) {
- if(count > resp_size) {
+ if (resp_save && resp->seqNo) {
+ if (count > resp_size) {
resp_save->flags = TYPHOON_RESP_ERROR;
goto cleanup;
}
wrap_len = 0;
len = count * sizeof(*resp);
- if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
+ if (unlikely(cleared + len > RESPONSE_RING_SIZE)) {
wrap_len = cleared + len - RESPONSE_RING_SIZE;
len = RESPONSE_RING_SIZE - cleared;
}
memcpy(resp_save, resp, len);
- if(unlikely(wrap_len)) {
+ if (unlikely(wrap_len)) {
resp_save += len / sizeof(*resp);
memcpy(resp_save, base, wrap_len);
}
resp_save = NULL;
- } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
+ } else if (resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
typhoon_media_status(tp->dev, resp);
- } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
+ } else if (resp->cmd == TYPHOON_CMD_HELLO_RESP) {
typhoon_hello(tp);
} else {
netdev_err(tp->dev,
@@ -589,19 +588,19 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
freeCmd = typhoon_num_free_cmd(tp);
freeResp = typhoon_num_free_resp(tp);
- if(freeCmd < num_cmd || freeResp < num_resp) {
+ if (freeCmd < num_cmd || freeResp < num_resp) {
netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
freeCmd, num_cmd, freeResp, num_resp);
err = -ENOMEM;
goto out;
}
- if(cmd->flags & TYPHOON_CMD_RESPOND) {
+ if (cmd->flags & TYPHOON_CMD_RESPOND) {
/* If we're expecting a response, but the caller hasn't given
* us a place to put it, we'll provide one.
*/
tp->awaiting_resp = 1;
- if(resp == NULL) {
+ if (resp == NULL) {
resp = &local_resp;
num_resp = 1;
}
@@ -609,13 +608,13 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
wrap_len = 0;
len = num_cmd * sizeof(*cmd);
- if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
+ if (unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
len = COMMAND_RING_SIZE - ring->lastWrite;
}
memcpy(ring->ringBase + ring->lastWrite, cmd, len);
- if(unlikely(wrap_len)) {
+ if (unlikely(wrap_len)) {
struct cmd_desc *wrap_ptr = cmd;
wrap_ptr += len / sizeof(*cmd);
memcpy(ring->ringBase, wrap_ptr, wrap_len);
@@ -629,7 +628,7 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
typhoon_post_pci_writes(tp->ioaddr);
- if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
+ if ((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
goto out;
/* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
@@ -649,14 +648,14 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
* wait here.
*/
got_resp = 0;
- for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
- if(indexes->respCleared != indexes->respReady)
+ for (i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
+ if (indexes->respCleared != indexes->respReady)
got_resp = typhoon_process_response(tp, num_resp,
resp);
udelay(TYPHOON_UDELAY);
}
- if(!got_resp) {
+ if (!got_resp) {
err = -ETIMEDOUT;
goto out;
}
@@ -664,11 +663,11 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
/* Collect the error response even if we don't care about the
* rest of the response
*/
- if(resp->flags & TYPHOON_RESP_ERROR)
+ if (resp->flags & TYPHOON_RESP_ERROR)
err = -EIO;
out:
- if(tp->awaiting_resp) {
+ if (tp->awaiting_resp) {
tp->awaiting_resp = 0;
smp_wmb();
@@ -679,7 +678,7 @@ out:
* time. So, check for it, and interrupt ourselves if this
* is the case.
*/
- if(indexes->respCleared != indexes->respReady)
+ if (indexes->respCleared != indexes->respReady)
iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
}
@@ -749,7 +748,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
* between marking the queue awake and updating the cleared index.
* Just loop and it will appear. This comes from the acenic driver.
*/
- while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
+ while (unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
smp_rmb();
first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
@@ -761,7 +760,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
first_txd->tx_addr = (u64)((unsigned long) skb);
first_txd->processFlags = 0;
- if(skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
/* The 3XP will figure out if this is UDP/TCP */
first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
@@ -789,7 +788,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
/* No need to worry about padding packet -- the firmware pads
* it with zeros to ETH_ZLEN for us.
*/
- if(skb_shinfo(skb)->nr_frags == 0) {
+ if (skb_shinfo(skb)->nr_frags == 0) {
skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
@@ -841,14 +840,14 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
*/
numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
- if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
+ if (typhoon_num_free_tx(txRing) < (numDesc + 2)) {
netif_stop_queue(dev);
/* A Tx complete IRQ could have gotten between, making
* the ring free again. Only need to recheck here, since
* Tx is serialized.
*/
- if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
+ if (typhoon_num_free_tx(txRing) >= (numDesc + 2))
netif_wake_queue(dev);
}
@@ -864,7 +863,7 @@ typhoon_set_rx_mode(struct net_device *dev)
__le16 filter;
filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
- if(dev->flags & IFF_PROMISC) {
+ if (dev->flags & IFF_PROMISC) {
filter |= TYPHOON_RX_FILTER_PROMISCOUS;
} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
@@ -906,7 +905,7 @@ typhoon_do_get_stats(struct typhoon *tp)
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
- if(err < 0)
+ if (err < 0)
return err;
/* 3Com's Linux driver uses txMultipleCollisions as it's
@@ -954,10 +953,10 @@ typhoon_get_stats(struct net_device *dev)
struct net_device_stats *saved = &tp->stats_saved;
smp_rmb();
- if(tp->card_state == Sleeping)
+ if (tp->card_state == Sleeping)
return saved;
- if(typhoon_do_get_stats(tp) < 0) {
+ if (typhoon_do_get_stats(tp) < 0) {
netdev_err(dev, "error getting stats\n");
return saved;
}
@@ -974,12 +973,12 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct resp_desc xp_resp[3];
smp_rmb();
- if(tp->card_state == Sleeping) {
+ if (tp->card_state == Sleeping) {
strlcpy(info->fw_version, "Sleep image",
sizeof(info->fw_version));
} else {
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
- if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
+ if (typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
strlcpy(info->fw_version, "Unknown runtime",
sizeof(info->fw_version));
} else {
@@ -1026,7 +1025,7 @@ typhoon_get_link_ksettings(struct net_device *dev,
break;
}
- if(tp->capabilities & TYPHOON_FIBER) {
+ if (tp->capabilities & TYPHOON_FIBER) {
supported |= SUPPORTED_FIBRE;
advertising |= ADVERTISED_FIBRE;
cmd->base.port = PORT_FIBRE;
@@ -1043,7 +1042,7 @@ typhoon_get_link_ksettings(struct net_device *dev,
cmd->base.speed = tp->speed;
cmd->base.duplex = tp->duplex;
cmd->base.phy_address = 0;
- if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
+ if (tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
cmd->base.autoneg = AUTONEG_ENABLE;
else
cmd->base.autoneg = AUTONEG_DISABLE;
@@ -1091,7 +1090,7 @@ typhoon_set_link_ksettings(struct net_device *dev,
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
xp_cmd.parm1 = xcvr;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto out;
tp->xcvr_select = xcvr;
@@ -1114,9 +1113,9 @@ typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
wol->supported = WAKE_PHY | WAKE_MAGIC;
wol->wolopts = 0;
- if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
+ if (tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
wol->wolopts |= WAKE_PHY;
- if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
+ if (tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
wol->wolopts |= WAKE_MAGIC;
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
@@ -1126,13 +1125,13 @@ typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct typhoon *tp = netdev_priv(dev);
- if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
return -EINVAL;
tp->wol_events = 0;
- if(wol->wolopts & WAKE_PHY)
+ if (wol->wolopts & WAKE_PHY)
tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
- if(wol->wolopts & WAKE_MAGIC)
+ if (wol->wolopts & WAKE_MAGIC)
tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
return 0;
@@ -1163,8 +1162,8 @@ typhoon_wait_interrupt(void __iomem *ioaddr)
{
int i, err = 0;
- for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
- if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
+ for (i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
+ if (ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
TYPHOON_INTR_BOOTCMD)
goto out;
udelay(TYPHOON_UDELAY);
@@ -1356,7 +1355,7 @@ typhoon_download_firmware(struct typhoon *tp)
*/
err = -ENOMEM;
dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
- if(!dpage) {
+ if (!dpage) {
netdev_err(tp->dev, "no DMA mem for firmware\n");
goto err_out;
}
@@ -1369,7 +1368,7 @@ typhoon_download_firmware(struct typhoon *tp)
ioaddr + TYPHOON_REG_INTR_MASK);
err = -ETIMEDOUT;
- if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+ if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
netdev_err(tp->dev, "card ready timeout\n");
goto err_out_irq;
}
@@ -1398,16 +1397,16 @@ typhoon_download_firmware(struct typhoon *tp)
* last write to the command register to post, so
* we don't need a typhoon_post_pci_writes() after it.
*/
- for(i = 0; i < numSections; i++) {
+ for (i = 0; i < numSections; i++) {
sHdr = (struct typhoon_section_header *) image_data;
image_data += sizeof(struct typhoon_section_header);
load_addr = le32_to_cpu(sHdr->startAddr);
section_len = le32_to_cpu(sHdr->len);
- while(section_len) {
+ while (section_len) {
len = min_t(u32, section_len, PAGE_SIZE);
- if(typhoon_wait_interrupt(ioaddr) < 0 ||
+ if (typhoon_wait_interrupt(ioaddr) < 0 ||
ioread32(ioaddr + TYPHOON_REG_STATUS) !=
TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
netdev_err(tp->dev, "segment ready timeout\n");
@@ -1440,7 +1439,7 @@ typhoon_download_firmware(struct typhoon *tp)
}
}
- if(typhoon_wait_interrupt(ioaddr) < 0 ||
+ if (typhoon_wait_interrupt(ioaddr) < 0 ||
ioread32(ioaddr + TYPHOON_REG_STATUS) !=
TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
netdev_err(tp->dev, "final segment ready timeout\n");
@@ -1449,7 +1448,7 @@ typhoon_download_firmware(struct typhoon *tp)
iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
- if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
+ if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
ioread32(ioaddr + TYPHOON_REG_STATUS));
goto err_out_irq;
@@ -1472,7 +1471,7 @@ typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
{
void __iomem *ioaddr = tp->ioaddr;
- if(typhoon_wait_status(ioaddr, initial_status) < 0) {
+ if (typhoon_wait_status(ioaddr, initial_status) < 0) {
netdev_err(tp->dev, "boot ready timeout\n");
goto out_timeout;
}
@@ -1483,7 +1482,7 @@ typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
ioaddr + TYPHOON_REG_COMMAND);
- if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
+ if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
ioread32(ioaddr + TYPHOON_REG_STATUS));
goto out_timeout;
@@ -1513,17 +1512,17 @@ typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
int dma_len;
int type;
- while(lastRead != le32_to_cpu(*index)) {
+ while (lastRead != le32_to_cpu(*index)) {
tx = (struct tx_desc *) (txRing->ringBase + lastRead);
type = tx->flags & TYPHOON_TYPE_MASK;
- if(type == TYPHOON_TX_DESC) {
+ if (type == TYPHOON_TX_DESC) {
/* This tx_desc describes a packet.
*/
unsigned long ptr = tx->tx_addr;
struct sk_buff *skb = (struct sk_buff *) ptr;
dev_kfree_skb_irq(skb);
- } else if(type == TYPHOON_FRAG_DESC) {
+ } else if (type == TYPHOON_FRAG_DESC) {
/* This tx_desc describes a memory mapping. Free it.
*/
skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
@@ -1548,7 +1547,7 @@ typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
/* This will need changing if we start to use the Hi Tx ring. */
lastRead = typhoon_clean_tx(tp, txRing, index);
- if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
+ if (netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
lastRead, TXLO_ENTRIES) > (numDesc + 2))
netif_wake_queue(tp->dev);
@@ -1564,7 +1563,7 @@ typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
struct basic_ring *ring = &tp->rxBuffRing;
struct rx_free *r;
- if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
+ if ((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
le32_to_cpu(indexes->rxBuffCleared)) {
/* no room in ring, just drop the skb
*/
@@ -1595,12 +1594,12 @@ typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
rxb->skb = NULL;
- if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
+ if ((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
le32_to_cpu(indexes->rxBuffCleared))
return -ENOMEM;
skb = netdev_alloc_skb(tp->dev, PKT_BUF_SZ);
- if(!skb)
+ if (!skb)
return -ENOMEM;
#if 0
@@ -1647,7 +1646,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
received = 0;
local_ready = le32_to_cpu(*ready);
rxaddr = le32_to_cpu(*cleared);
- while(rxaddr != local_ready && budget > 0) {
+ while (rxaddr != local_ready && budget > 0) {
rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
idx = rx->addr;
rxb = &tp->rxbuffers[idx];
@@ -1656,14 +1655,14 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
typhoon_inc_rx_index(&rxaddr, 1);
- if(rx->flags & TYPHOON_RX_ERROR) {
+ if (rx->flags & TYPHOON_RX_ERROR) {
typhoon_recycle_rx_skb(tp, idx);
continue;
}
pkt_len = le16_to_cpu(rx->frameLen);
- if(pkt_len < rx_copybreak &&
+ if (pkt_len < rx_copybreak &&
(new_skb = netdev_alloc_skb(tp->dev, pkt_len + 2)) != NULL) {
skb_reserve(new_skb, 2);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
@@ -1685,7 +1684,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
new_skb->protocol = eth_type_trans(new_skb, tp->dev);
csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
- if(csum_bits ==
+ if (csum_bits ==
(TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
csum_bits ==
(TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
@@ -1711,11 +1710,11 @@ typhoon_fill_free_ring(struct typhoon *tp)
{
u32 i;
- for(i = 0; i < RXENT_ENTRIES; i++) {
+ for (i = 0; i < RXENT_ENTRIES; i++) {
struct rxbuff_ent *rxb = &tp->rxbuffers[i];
- if(rxb->skb)
+ if (rxb->skb)
continue;
- if(typhoon_alloc_rx_skb(tp, i) < 0)
+ if (typhoon_alloc_rx_skb(tp, i) < 0)
break;
}
}
@@ -1728,25 +1727,25 @@ typhoon_poll(struct napi_struct *napi, int budget)
int work_done;
rmb();
- if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
+ if (!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
typhoon_process_response(tp, 0, NULL);
- if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
+ if (le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
work_done = 0;
- if(indexes->rxHiCleared != indexes->rxHiReady) {
+ if (indexes->rxHiCleared != indexes->rxHiReady) {
work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
&indexes->rxHiCleared, budget);
}
- if(indexes->rxLoCleared != indexes->rxLoReady) {
+ if (indexes->rxLoCleared != indexes->rxLoReady) {
work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
&indexes->rxLoCleared, budget - work_done);
}
- if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
+ if (le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
/* rxBuff ring is empty, try to fill it. */
typhoon_fill_free_ring(tp);
}
@@ -1770,7 +1769,7 @@ typhoon_interrupt(int irq, void *dev_instance)
u32 intr_status;
intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
- if(!(intr_status & TYPHOON_INTR_HOST_INT))
+ if (!(intr_status & TYPHOON_INTR_HOST_INT))
return IRQ_NONE;
iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
@@ -1790,9 +1789,9 @@ typhoon_free_rx_rings(struct typhoon *tp)
{
u32 i;
- for(i = 0; i < RXENT_ENTRIES; i++) {
+ for (i = 0; i < RXENT_ENTRIES; i++) {
struct rxbuff_ent *rxb = &tp->rxbuffers[i];
- if(rxb->skb) {
+ if (rxb->skb) {
pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(rxb->skb);
@@ -1812,7 +1811,7 @@ typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
xp_cmd.parm1 = events;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0) {
+ if (err < 0) {
netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
err);
return err;
@@ -1820,12 +1819,12 @@ typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0) {
+ if (err < 0) {
netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
return err;
}
- if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
+ if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
return -ETIMEDOUT;
/* Since we cannot monitor the status of the link while sleeping,
@@ -1852,7 +1851,7 @@ typhoon_wakeup(struct typhoon *tp, int wait_type)
* the old firmware pay for the reset.
*/
iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
- if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
+ if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
(tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
return typhoon_reset(ioaddr, wait_type);
@@ -1871,12 +1870,12 @@ typhoon_start_runtime(struct typhoon *tp)
typhoon_fill_free_ring(tp);
err = typhoon_download_firmware(tp);
- if(err < 0) {
+ if (err < 0) {
netdev_err(tp->dev, "cannot load runtime on 3XP\n");
goto error_out;
}
- if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
+ if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
netdev_err(tp->dev, "cannot boot 3XP\n");
err = -EIO;
goto error_out;
@@ -1885,14 +1884,14 @@ typhoon_start_runtime(struct typhoon *tp)
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto error_out;
/* Disable IRQ coalescing -- we can reenable it when 3Com gives
@@ -1901,38 +1900,38 @@ typhoon_start_runtime(struct typhoon *tp)
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
xp_cmd.parm1 = 0;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
xp_cmd.parm1 = tp->xcvr_select;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
xp_cmd.parm2 = tp->offload;
xp_cmd.parm3 = tp->offload;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto error_out;
typhoon_set_rx_mode(dev);
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto error_out;
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
+ if (err < 0)
goto error_out;
tp->card_state = Running;
@@ -1972,13 +1971,13 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
/* Wait 1/2 sec for any outstanding transmits to occur
* We'll cleanup after the reset if this times out.
*/
- for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
- if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
+ for (i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
+ if (indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
break;
udelay(TYPHOON_UDELAY);
}
- if(i == TYPHOON_WAIT_TIMEOUT)
+ if (i == TYPHOON_WAIT_TIMEOUT)
netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
@@ -1995,16 +1994,16 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
+ if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
- if(typhoon_reset(ioaddr, wait_type) < 0) {
+ if (typhoon_reset(ioaddr, wait_type) < 0) {
netdev_err(tp->dev, "unable to reset 3XP\n");
return -ETIMEDOUT;
}
/* cleanup any outstanding Tx packets */
- if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
+ if (indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
}
@@ -2017,7 +2016,7 @@ typhoon_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct typhoon *tp = netdev_priv(dev);
- if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
+ if (typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
netdev_warn(dev, "could not reset in tx timeout\n");
goto truly_dead;
}
@@ -2026,7 +2025,7 @@ typhoon_tx_timeout(struct net_device *dev, unsigned int txqueue)
typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
typhoon_free_rx_rings(tp);
- if(typhoon_start_runtime(tp) < 0) {
+ if (typhoon_start_runtime(tp) < 0) {
netdev_err(dev, "could not start runtime in tx timeout\n");
goto truly_dead;
}
@@ -2051,20 +2050,20 @@ typhoon_open(struct net_device *dev)
goto out;
err = typhoon_wakeup(tp, WaitSleep);
- if(err < 0) {
+ if (err < 0) {
netdev_err(dev, "unable to wakeup device\n");
goto out_sleep;
}
err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
dev->name, dev);
- if(err < 0)
+ if (err < 0)
goto out_sleep;
napi_enable(&tp->napi);
err = typhoon_start_runtime(tp);
- if(err < 0) {
+ if (err < 0) {
napi_disable(&tp->napi);
goto out_irq;
}
@@ -2076,13 +2075,13 @@ out_irq:
free_irq(dev->irq, dev);
out_sleep:
- if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+ if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
netdev_err(dev, "unable to reboot into sleep img\n");
typhoon_reset(tp->ioaddr, NoWait);
goto out;
}
- if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
+ if (typhoon_sleep(tp, PCI_D3hot, 0) < 0)
netdev_err(dev, "unable to go back to sleep\n");
out:
@@ -2097,7 +2096,7 @@ typhoon_close(struct net_device *dev)
netif_stop_queue(dev);
napi_disable(&tp->napi);
- if(typhoon_stop_runtime(tp, WaitSleep) < 0)
+ if (typhoon_stop_runtime(tp, WaitSleep) < 0)
netdev_err(dev, "unable to stop runtime\n");
/* Make sure there is no irq handler running on a different CPU. */
@@ -2106,10 +2105,10 @@ typhoon_close(struct net_device *dev)
typhoon_free_rx_rings(tp);
typhoon_init_rings(tp);
- if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
+ if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
netdev_err(dev, "unable to boot sleep image\n");
- if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
+ if (typhoon_sleep(tp, PCI_D3hot, 0) < 0)
netdev_err(dev, "unable to put card to sleep\n");
return 0;
@@ -2124,15 +2123,15 @@ typhoon_resume(struct pci_dev *pdev)
/* If we're down, resume when we are upped.
*/
- if(!netif_running(dev))
+ if (!netif_running(dev))
return 0;
- if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
+ if (typhoon_wakeup(tp, WaitNoSleep) < 0) {
netdev_err(dev, "critical: could not wake up in resume\n");
goto reset;
}
- if(typhoon_start_runtime(tp) < 0) {
+ if (typhoon_start_runtime(tp) < 0) {
netdev_err(dev, "critical: could not start runtime in resume\n");
goto reset;
}
@@ -2154,16 +2153,16 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
/* If we're down, we're already suspended.
*/
- if(!netif_running(dev))
+ if (!netif_running(dev))
return 0;
/* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
- if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
+ if (tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
netif_device_detach(dev);
- if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
+ if (typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
netdev_err(dev, "unable to stop runtime\n");
goto need_resume;
}
@@ -2171,7 +2170,7 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
typhoon_free_rx_rings(tp);
typhoon_init_rings(tp);
- if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+ if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
netdev_err(dev, "unable to boot sleep image\n");
goto need_resume;
}
@@ -2179,19 +2178,19 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
- if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
+ if (typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
netdev_err(dev, "unable to set mac address in suspend\n");
goto need_resume;
}
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
- if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
+ if (typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
netdev_err(dev, "unable to set rx filter in suspend\n");
goto need_resume;
}
- if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
+ if (typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
netdev_err(dev, "unable to put card to sleep\n");
goto need_resume;
}
@@ -2211,10 +2210,10 @@ typhoon_test_mmio(struct pci_dev *pdev)
int mode = 0;
u32 val;
- if(!ioaddr)
+ if (!ioaddr)
goto out;
- if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
+ if (ioread32(ioaddr + TYPHOON_REG_STATUS) !=
TYPHOON_STATUS_WAITING_FOR_HOST)
goto out_unmap;
@@ -2227,12 +2226,12 @@ typhoon_test_mmio(struct pci_dev *pdev)
* The 50usec delay is arbitrary -- it could probably be smaller.
*/
val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
- if((val & TYPHOON_INTR_SELF) == 0) {
+ if ((val & TYPHOON_INTR_SELF) == 0) {
iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
udelay(50);
val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
- if(val & TYPHOON_INTR_SELF)
+ if (val & TYPHOON_INTR_SELF)
mode = 1;
}
@@ -2245,7 +2244,7 @@ out_unmap:
pci_iounmap(pdev, ioaddr);
out:
- if(!mode)
+ if (!mode)
pr_info("%s: falling back to port IO\n", pci_name(pdev));
return mode;
}
@@ -2276,7 +2275,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
const char *err_msg;
dev = alloc_etherdev(sizeof(*tp));
- if(dev == NULL) {
+ if (dev == NULL) {
err_msg = "unable to alloc new net device";
err = -ENOMEM;
goto error_out;
@@ -2284,55 +2283,55 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_NETDEV_DEV(dev, &pdev->dev);
err = pci_enable_device(pdev);
- if(err < 0) {
+ if (err < 0) {
err_msg = "unable to enable device";
goto error_out_dev;
}
err = pci_set_mwi(pdev);
- if(err < 0) {
+ if (err < 0) {
err_msg = "unable to set MWI";
goto error_out_disable;
}
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if(err < 0) {
+ if (err < 0) {
err_msg = "No usable DMA configuration";
goto error_out_mwi;
}
/* sanity checks on IO and MMIO BARs
*/
- if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
err_msg = "region #1 not a PCI IO resource, aborting";
err = -ENODEV;
goto error_out_mwi;
}
- if(pci_resource_len(pdev, 0) < 128) {
+ if (pci_resource_len(pdev, 0) < 128) {
err_msg = "Invalid PCI IO region size, aborting";
err = -ENODEV;
goto error_out_mwi;
}
- if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
err_msg = "region #1 not a PCI MMIO resource, aborting";
err = -ENODEV;
goto error_out_mwi;
}
- if(pci_resource_len(pdev, 1) < 128) {
+ if (pci_resource_len(pdev, 1) < 128) {
err_msg = "Invalid PCI MMIO region size, aborting";
err = -ENODEV;
goto error_out_mwi;
}
err = pci_request_regions(pdev, KBUILD_MODNAME);
- if(err < 0) {
+ if (err < 0) {
err_msg = "could not request regions";
goto error_out_mwi;
}
/* map our registers
*/
- if(use_mmio != 0 && use_mmio != 1)
+ if (use_mmio != 0 && use_mmio != 1)
use_mmio = typhoon_test_mmio(pdev);
ioaddr = pci_iomap(pdev, use_mmio, 128);
@@ -2346,7 +2345,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
&shared_dma);
- if(!shared) {
+ if (!shared) {
err_msg = "could not allocate DMA memory";
err = -ENOMEM;
goto error_out_remap;
@@ -2426,7 +2425,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* seem to need a little extra help to get started. Since we don't
* know how to nudge it along, just kick it.
*/
- if(xp_resp[0].numDesc != 0)
+ if (xp_resp[0].numDesc != 0)
tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
err = typhoon_sleep(tp, PCI_D3hot, 0);
@@ -2471,14 +2470,14 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* xp_resp still contains the response to the READ_VERSIONS command.
* For debugging, let the user know what version he has.
*/
- if(xp_resp[0].numDesc == 0) {
+ if (xp_resp[0].numDesc == 0) {
/* This is the Typhoon 1.0 type Sleep Image, last 16 bits
* of version is Month/Day of build.
*/
u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
monthday >> 8, monthday & 0xff);
- } else if(xp_resp[0].numDesc == 2) {
+ } else if (xp_resp[0].numDesc == 2) {
/* This is the Typhoon 1.1+ type Sleep Image
*/
u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
diff --git a/drivers/net/ethernet/3com/typhoon.h b/drivers/net/ethernet/3com/typhoon.h
index 88187fc84aa3..2f634c64d5d1 100644
--- a/drivers/net/ethernet/3com/typhoon.h
+++ b/drivers/net/ethernet/3com/typhoon.h
@@ -366,7 +366,7 @@ struct resp_desc {
memset(_ptr, 0, sizeof(struct cmd_desc)); \
_ptr->flags = TYPHOON_CMD_DESC | TYPHOON_DESC_VALID; \
_ptr->cmd = command; \
- } while(0)
+ } while (0)
/* We set seqNo to 1 if we're expecting a response from this command */
#define INIT_COMMAND_WITH_RESPONSE(x, command) \
@@ -376,7 +376,7 @@ struct resp_desc {
_ptr->flags |= TYPHOON_DESC_VALID; \
_ptr->cmd = command; \
_ptr->seqNo = 1; \
- } while(0)
+ } while (0)
/* TYPHOON_CMD_SET_RX_FILTER filter bits (cmd.parm1)
*/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 165d18405b0c..2db42211329f 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -27,8 +27,6 @@
*/
#define DRV_NAME "starfire"
-#define DRV_VERSION "2.1"
-#define DRV_RELDATE "July 6, 2008"
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -47,6 +45,7 @@
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <linux/uaccess.h>
#include <asm/io.h>
+#include <linux/vermagic.h>
/*
* The current frame processor firmware fails to checksum a fragment
@@ -165,15 +164,9 @@ static int rx_copybreak /* = 0 */;
#define FIRMWARE_RX "adaptec/starfire_rx.bin"
#define FIRMWARE_TX "adaptec/starfire_tx.bin"
-/* These identify the driver base version and may not be removed. */
-static const char version[] =
-KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
-" (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
-
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_FIRMWARE(FIRMWARE_RX);
MODULE_FIRMWARE(FIRMWARE_TX);
@@ -654,13 +647,6 @@ static int starfire_init_one(struct pci_dev *pdev,
int drv_flags, io_size;
int boguscnt;
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(version);
-#endif
-
if (pci_enable_device (pdev))
return -EIO;
@@ -1853,7 +1839,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
@@ -2073,8 +2058,6 @@ static int __init starfire_init (void)
{
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
- printk(version);
-
printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
#endif
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 2a9f8643629c..bf546118dbc6 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1114,9 +1114,7 @@ static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
strlcpy(info->driver, dev_driver_string(greth->dev),
sizeof(info->driver));
- strlcpy(info->version, "revision: 1.0", sizeof(info->version));
strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
- strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
}
static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index cb6a761d5c11..1b19385ad8a9 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2958,7 +2958,6 @@ static void et131x_get_drvinfo(struct net_device *netdev,
struct et131x_adapter *adapter = netdev_priv(netdev);
strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/agere/et131x.h b/drivers/net/ethernet/agere/et131x.h
index be9a11c02526..d0e922584d8a 100644
--- a/drivers/net/ethernet/agere/et131x.h
+++ b/drivers/net/ethernet/agere/et131x.h
@@ -46,7 +46,6 @@
*/
#define DRIVER_NAME "et131x"
-#define DRIVER_VERSION "v2.0"
/* EEPROM registers */
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 9daef4c8feef..6234fcd844ee 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -26,7 +26,6 @@
#include "slic.h"
#define DRV_NAME "slicoss"
-#define DRV_VERSION "1.0"
static const struct pci_device_id slic_id_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH,
@@ -1533,7 +1532,6 @@ static void slic_get_drvinfo(struct net_device *dev,
struct slic_device *sdev = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
}
@@ -1852,4 +1850,3 @@ module_pci_driver(slic_driver);
MODULE_DESCRIPTION("Alacritech non-accelerated SLIC driver");
MODULE_AUTHOR("Lino Sanfilippo <LinoSanfilippo@gmx.de>");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 22cadfbeedfb..18d3b4340bd4 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -33,7 +33,6 @@
#include "sun4i-emac.h"
#define DRV_NAME "sun4i-emac"
-#define DRV_VERSION "1.02"
#define EMAC_MAX_FRAME_LEN 0x0600
@@ -212,7 +211,6 @@ static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index f366faf88eee..5d192d551623 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2699,9 +2699,8 @@ static void ace_get_drvinfo(struct net_device *dev,
struct ace_private *ap = netdev_priv(dev);
strlcpy(info->driver, "acenic", sizeof(info->driver));
- snprintf(info->version, sizeof(info->version), "%i.%i.%i",
- ap->firmware_major, ap->firmware_minor,
- ap->firmware_fix);
+ snprintf(info->fw_version, sizeof(info->version), "%i.%i.%i",
+ ap->firmware_major, ap->firmware_minor, ap->firmware_fix);
if (ap->pdev)
strlcpy(info->bus_info, pci_name(ap->pdev),
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 23823464f2e7..4299f1301149 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -67,7 +67,6 @@ static void tse_get_drvinfo(struct net_device *dev,
u32 rev = ioread32(&priv->mac_dev->megacore_revision);
strcpy(info->driver, "altera_tse");
- strcpy(info->version, "v8.0");
snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d",
rev & 0xFFFF, (rev & 0xFFFF0000) >> 16);
sprintf(info->bus_info, "platform");
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 1fb58f9ad80b..a250046b8e18 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -1067,18 +1067,14 @@ static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_feature_rss_flow_hash_control *hash_key;
struct ena_admin_get_feat_resp get_resp;
int rc;
- hash_key = (ena_dev->rss).hash_key;
-
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
ENA_ADMIN_RSS_HASH_FUNCTION,
ena_dev->rss.hash_key_dma_addr,
sizeof(ena_dev->rss.hash_key), 0);
if (unlikely(rc)) {
- hash_key = NULL;
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index ced1d577b62a..9cc28b4b2627 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -221,7 +221,7 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
snprintf(*data, ETH_GSTRING_LEN,
"queue_%u_tx_%s", i, ena_stats->name);
- (*data) += ETH_GSTRING_LEN;
+ (*data) += ETH_GSTRING_LEN;
}
/* Rx stats */
for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
@@ -404,7 +404,6 @@ static void ena_get_drvinfo(struct net_device *dev,
struct ena_adapter *adapter = netdev_priv(dev);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -674,7 +673,6 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
* supports getting/setting the hash function.
*/
rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
-
if (rc) {
if (rc == -EOPNOTSUPP) {
key = NULL;
@@ -685,9 +683,6 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return rc;
}
- if (rc)
- return rc;
-
switch (ena_func) {
case ENA_ADMIN_TOEPLITZ:
func = ETH_RSS_HASH_TOP;
@@ -831,6 +826,8 @@ static int ena_set_tunable(struct net_device *netdev,
}
static const struct ethtool_ops ena_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_link_ksettings = ena_get_link_ksettings,
.get_drvinfo = ena_get_drvinfo,
.get_msglevel = ena_get_msglevel,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index cada6e7e30f4..2cc765df8da3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -49,12 +49,9 @@
#include <linux/bpf_trace.h>
#include "ena_pci_id_tbl.h"
-static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
-
MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
MODULE_DESCRIPTION(DEVICE_NAME);
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (5 * HZ)
@@ -460,10 +457,9 @@ static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
}
-void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
- struct bpf_prog *prog,
- int first,
- int count)
+static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
+ struct bpf_prog *prog,
+ int first, int count)
{
struct ena_ring *rx_ring;
int i = 0;
@@ -481,8 +477,8 @@ void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
}
}
-void ena_xdp_exchange_program(struct ena_adapter *adapter,
- struct bpf_prog *prog)
+static void ena_xdp_exchange_program(struct ena_adapter *adapter,
+ struct bpf_prog *prog)
{
struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
@@ -1555,7 +1551,7 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring,
}
}
-int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
{
struct ena_rx_buffer *rx_info;
int ret;
@@ -3104,9 +3100,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev,
strncpy(host_info->os_dist_str, utsname()->release,
sizeof(host_info->os_dist_str) - 1);
host_info->driver_version =
- (DRV_MODULE_VER_MAJOR) |
- (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
- (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
+ (DRV_MODULE_GEN_MAJOR) |
+ (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
+ (DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
host_info->num_cpus = num_online_cpus();
@@ -3486,10 +3482,8 @@ static int ena_restore_device(struct ena_adapter *adapter)
netif_carrier_on(adapter->netdev);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
+ dev_err(&pdev->dev, "Device reset completed successfully\n");
adapter->last_keep_alive_jiffies = jiffies;
- dev_err(&pdev->dev,
- "Device reset completed successfully, Driver info: %s\n",
- version);
return rc;
err_disable_msix:
@@ -4127,8 +4121,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_dbg(&pdev->dev, "%s\n", __func__);
- dev_info_once(&pdev->dev, "%s", version);
-
rc = pci_enable_device_mem(pdev);
if (rc) {
dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
@@ -4471,8 +4463,6 @@ static struct pci_driver ena_pci_driver = {
static int __init ena_init(void)
{
- pr_info("%s", version);
-
ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
if (!ena_wq) {
pr_err("Failed to create workqueue\n");
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 8795e0b1dc3c..97dfd0c67e84 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -45,16 +45,16 @@
#include "ena_com.h"
#include "ena_eth_com.h"
-#define DRV_MODULE_VER_MAJOR 2
-#define DRV_MODULE_VER_MINOR 1
-#define DRV_MODULE_VER_SUBMINOR 0
+#define DRV_MODULE_GEN_MAJOR 2
+#define DRV_MODULE_GEN_MINOR 1
+#define DRV_MODULE_GEN_SUBMINOR 0
#define DRV_MODULE_NAME "ena"
-#ifndef DRV_MODULE_VERSION
-#define DRV_MODULE_VERSION \
- __stringify(DRV_MODULE_VER_MAJOR) "." \
- __stringify(DRV_MODULE_VER_MINOR) "." \
- __stringify(DRV_MODULE_VER_SUBMINOR) "K"
+#ifndef DRV_MODULE_GENERATION
+#define DRV_MODULE_GENERATION \
+ __stringify(DRV_MODULE_GEN_MAJOR) "." \
+ __stringify(DRV_MODULE_GEN_MINOR) "." \
+ __stringify(DRV_MODULE_GEN_SUBMINOR) "K"
#endif
#define DEVICE_NAME "Elastic Network Adapter (ENA)"
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 0f3b743425e8..7a1286f8e983 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -84,9 +84,8 @@ Revision History:
#include "amd8111e.h"
#define MODULE_NAME "amd8111e"
-#define MODULE_VERS "3.0.7"
MODULE_AUTHOR("Advanced Micro Devices, Inc.");
-MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS);
+MODULE_DESCRIPTION("AMD8111 based 10/100 Ethernet Controller.");
MODULE_LICENSE("GPL");
module_param_array(speed_duplex, int, NULL, 0);
MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
@@ -1366,7 +1365,6 @@ static void amd8111e_get_drvinfo(struct net_device *dev,
struct amd8111e_priv *lp = netdev_priv(dev);
struct pci_dev *pci_dev = lp->pci_dev;
strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, MODULE_VERS, sizeof(info->version));
snprintf(info->fw_version, sizeof(info->fw_version),
"%u", chip_version);
strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
@@ -1875,7 +1873,6 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
/* display driver and device information */
chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
- dev_info(&pdev->dev, "AMD-8111e Driver Version: %s\n", MODULE_VERS);
dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
chip_version, dev->dev_addr);
if (lp->ext_phy_id)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 089a4fbc61a0..9f6e3cc2ce80 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -63,14 +63,12 @@ static int au1000_debug = 3;
NETIF_MSG_LINK)
#define DRV_NAME "au1000_eth"
-#define DRV_VERSION "1.7"
#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
#define DRV_DESC "Au1xxx on-chip Ethernet driver"
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION(DRV_DESC);
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
/* AU1000 MAC registers and bits */
#define MAC_CONTROL 0x0
@@ -656,7 +654,6 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct au1000_private *aup = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
aup->mac_id);
}
@@ -1290,8 +1287,6 @@ static int au1000_probe(struct platform_device *pdev)
netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
(unsigned long)base->start, irq);
- pr_info_once("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
-
return 0;
err_out:
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 023aecf6ab30..11c0b13edd30 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -114,8 +114,6 @@ Log: nmclan_cs.c,v
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "nmclan_cs"
-#define DRV_VERSION "0.16"
-
/* ----------------------------------------------------------------------------
Conditional Compilation Options
@@ -367,7 +365,7 @@ typedef struct _mace_private {
char tx_free_frames; /* Number of free transmit frame buffers */
char tx_irq_disabled; /* MACE TX interrupt disabled */
-
+
spinlock_t bank_lock; /* Must be held if you step off bank 0 */
} mace_private;
@@ -444,7 +442,7 @@ static int nmclan_probe(struct pcmcia_device *link)
lp = netdev_priv(dev);
lp->p_dev = link;
link->priv = dev;
-
+
spin_lock_init(&lp->bank_lock);
link->resource[0]->end = 32;
link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
@@ -817,7 +815,6 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
@@ -1110,7 +1107,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
if (pkt_len & 1)
*(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV);
skb->protocol = eth_type_trans(skb, dev);
-
+
netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
dev->stats.rx_packets++;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index dc7d88227e76..07e8211eea51 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -24,13 +24,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "pcnet32"
-#define DRV_VERSION "1.35"
#define DRV_RELDATE "21.Apr.2008"
#define PFX DRV_NAME ": "
-static const char *const version =
- DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -809,7 +805,6 @@ static void pcnet32_get_drvinfo(struct net_device *dev,
struct pcnet32_private *lp = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
if (lp->pci_dev)
strlcpy(info->bus_info, pci_name(lp->pci_dev),
sizeof(info->bus_info));
@@ -3006,8 +3001,6 @@ MODULE_LICENSE("GPL");
static int __init pcnet32_init_module(void)
{
- pr_info("%s", version);
-
pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index b00e00881253..a21b2e60157e 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -105,14 +105,9 @@ static char lancestr[] = "LANCE";
#include <asm/irq.h>
#define DRV_NAME "sunlance"
-#define DRV_VERSION "2.02"
#define DRV_RELDATE "8/24/03"
#define DRV_AUTHOR "Miguel de Icaza (miguel@nuclecu.unam.mx)"
-static char version[] =
- DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
-
-MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION("Sun Lance ethernet driver");
MODULE_LICENSE("GPL");
@@ -1282,7 +1277,6 @@ static void lance_free_hwresources(struct lance_private *lp)
static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strlcpy(info->driver, "sunlance", sizeof(info->driver));
- strlcpy(info->version, "2.02", sizeof(info->version));
}
static const struct ethtool_ops sparc_lance_ethtool_ops = {
@@ -1305,7 +1299,6 @@ static int sparc_lance_probe_one(struct platform_device *op,
struct platform_device *lebuffer)
{
struct device_node *dp = op->dev.of_node;
- static unsigned version_printed;
struct lance_private *lp;
struct net_device *dev;
int i;
@@ -1316,9 +1309,6 @@ static int sparc_lance_probe_one(struct platform_device *op,
lp = netdev_priv(dev);
- if (sparc_lance_debug && version_printed++ == 0)
- printk (KERN_INFO "%s", version);
-
spin_lock_init(&lp->lock);
/* Copy the IDPROM ethernet address to the device structure, later we
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 8083173f1a8f..61f39a0e04f9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -405,7 +405,6 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
@@ -451,30 +450,6 @@ static int xgbe_set_coalesce(struct net_device *netdev,
unsigned int rx_frames, rx_riwt, rx_usecs;
unsigned int tx_frames;
- /* Check for not supported parameters */
- if ((ec->rx_coalesce_usecs_irq) ||
- (ec->rx_max_coalesced_frames_irq) ||
- (ec->tx_coalesce_usecs) ||
- (ec->tx_coalesce_usecs_irq) ||
- (ec->tx_max_coalesced_frames_irq) ||
- (ec->stats_block_coalesce_usecs) ||
- (ec->use_adaptive_rx_coalesce) ||
- (ec->use_adaptive_tx_coalesce) ||
- (ec->pkt_rate_low) ||
- (ec->rx_coalesce_usecs_low) ||
- (ec->rx_max_coalesced_frames_low) ||
- (ec->tx_coalesce_usecs_low) ||
- (ec->tx_max_coalesced_frames_low) ||
- (ec->pkt_rate_high) ||
- (ec->rx_coalesce_usecs_high) ||
- (ec->rx_max_coalesced_frames_high) ||
- (ec->tx_coalesce_usecs_high) ||
- (ec->tx_max_coalesced_frames_high) ||
- (ec->rate_sample_interval)) {
- netdev_err(netdev, "unsupported coalescing parameter\n");
- return -EOPNOTSUPP;
- }
-
rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
rx_usecs = ec->rx_coalesce_usecs;
rx_frames = ec->rx_max_coalesced_frames;
@@ -838,6 +813,8 @@ out:
}
static const struct ethtool_ops xgbe_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = xgbe_get_drvinfo,
.get_msglevel = xgbe_get_msglevel,
.set_msglevel = xgbe_set_msglevel,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 7ce9c69e9c44..2a70714a791d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -127,7 +127,6 @@
MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(XGBE_DRV_VERSION);
MODULE_DESCRIPTION(XGBE_DRV_DESC);
static int debug = -1;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 47bcbcf58048..5897e46faca5 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -135,7 +135,6 @@
#include <linux/list.h>
#define XGBE_DRV_NAME "amd-xgbe"
-#define XGBE_DRV_VERSION "1.0.3"
#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
/* Descriptor related defines */
diff --git a/drivers/net/ethernet/apm/xgene-v2/ethtool.c b/drivers/net/ethernet/apm/xgene-v2/ethtool.c
index a58250c1b57a..b78d1a99fe81 100644
--- a/drivers/net/ethernet/apm/xgene-v2/ethtool.c
+++ b/drivers/net/ethernet/apm/xgene-v2/ethtool.c
@@ -89,8 +89,6 @@ static void xge_get_drvinfo(struct net_device *ndev,
struct platform_device *pdev = pdata->pdev;
strcpy(info->driver, "xgene-enet-v2");
- strcpy(info->version, XGENE_ENET_V2_VERSION);
- snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "N/A");
sprintf(info->bus_info, "%s", pdev->name);
}
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index c48f60996761..860c18fb7aae 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -741,5 +741,4 @@ module_platform_driver(xge_driver);
MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
-MODULE_VERSION(XGENE_ENET_V2_VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.h b/drivers/net/ethernet/apm/xgene-v2/main.h
index d41439d2709d..b3985a7be59d 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.h
+++ b/drivers/net/ethernet/apm/xgene-v2/main.h
@@ -28,7 +28,6 @@
#include "ring.h"
#include "ethtool.h"
-#define XGENE_ENET_V2_VERSION "v1.0"
#define XGENE_ENET_STD_MTU 1536
#define XGENE_ENET_MIN_FRAME 60
#define IRQ_ID_SIZE 16
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
index 246dec27140d..ada70425b48c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
@@ -103,8 +103,6 @@ static void xgene_get_drvinfo(struct net_device *ndev,
struct platform_device *pdev = pdata->pdev;
strcpy(info->driver, "xgene_enet");
- strcpy(info->version, XGENE_DRV_VERSION);
- snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "N/A");
sprintf(info->bus_info, "%s", pdev->name);
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 6aee2f0fc0db..5f1fc6582d74 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -2179,7 +2179,6 @@ static struct platform_driver xgene_enet_driver = {
module_platform_driver(xgene_enet_driver);
MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
-MODULE_VERSION(XGENE_DRV_VERSION);
MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 18f4923b1723..d35a338120cf 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -28,7 +28,6 @@
#include "xgene_enet_ring2.h"
#include "../../../phy/mdio-xgene.h"
-#define XGENE_DRV_VERSION "v1.0"
#define ETHER_MIN_PACKET 64
#define ETHER_STD_PACKET 1518
#define XGENE_ENET_STD_MTU 1536
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
index 350a48e4f124..76a44b2546ff 100644
--- a/drivers/net/ethernet/aquantia/Kconfig
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -20,6 +20,7 @@ config AQTION
tristate "aQuantia AQtion(tm) Support"
depends on PCI
depends on X86_64 || ARM64 || COMPILE_TEST
+ depends on MACSEC || MACSEC=n
---help---
This enables the support for the aQuantia AQtion(tm) Ethernet card.
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 6e0a6e234483..8b555665a33a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -8,6 +8,8 @@
obj-$(CONFIG_AQTION) += atlantic.o
+ccflags-y += -I$(src)
+
atlantic-objs := aq_main.o \
aq_nic.o \
aq_pci_func.o \
@@ -22,6 +24,9 @@ atlantic-objs := aq_main.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
hw_atl/hw_atl_utils_fw2x.o \
- hw_atl/hw_atl_llh.o
+ hw_atl/hw_atl_llh.o \
+ macsec/macsec_api.o
+
+atlantic-$(CONFIG_MACSEC) += aq_macsec.o
atlantic-$(CONFIG_PTP_1588_CLOCK) += aq_ptp.o \ No newline at end of file
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index f0c41f7408e5..7560f5506e55 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -9,8 +9,6 @@
#ifndef AQ_CFG_H
#define AQ_CFG_H
-#include <generated/utsrelease.h>
-
#define AQ_CFG_VECS_DEF 8U
#define AQ_CFG_TCS_DEF 1U
@@ -85,7 +83,5 @@
#define AQ_CFG_DRV_AUTHOR "aQuantia"
#define AQ_CFG_DRV_DESC "aQuantia Corporation(R) Network Driver"
#define AQ_CFG_DRV_NAME "atlantic"
-#define AQ_CFG_DRV_VERSION UTS_RELEASE \
- AQ_CFG_DRV_VERSION_SUFFIX
#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index 42ea8d8daa46..c8c402b013bb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -12,7 +12,6 @@
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/if_vlan.h>
-#include "ver.h"
#include "aq_cfg.h"
#include "aq_utils.h"
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 7b55633d2cb9..7241cf92b43a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -11,6 +11,7 @@
#include "aq_vec.h"
#include "aq_ptp.h"
#include "aq_filters.h"
+#include "aq_macsec.h"
#include <linux/ptp_clock_kernel.h>
@@ -96,6 +97,62 @@ static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
"Queue[%d] InErrors",
};
+#if IS_ENABLED(CONFIG_MACSEC)
+static const char aq_macsec_stat_names[][ETH_GSTRING_LEN] = {
+ "MACSec InCtlPackets",
+ "MACSec InTaggedMissPackets",
+ "MACSec InUntaggedMissPackets",
+ "MACSec InNotagPackets",
+ "MACSec InUntaggedPackets",
+ "MACSec InBadTagPackets",
+ "MACSec InNoSciPackets",
+ "MACSec InUnknownSciPackets",
+ "MACSec InCtrlPortPassPackets",
+ "MACSec InUnctrlPortPassPackets",
+ "MACSec InCtrlPortFailPackets",
+ "MACSec InUnctrlPortFailPackets",
+ "MACSec InTooLongPackets",
+ "MACSec InIgpocCtlPackets",
+ "MACSec InEccErrorPackets",
+ "MACSec InUnctrlHitDropRedir",
+ "MACSec OutCtlPackets",
+ "MACSec OutUnknownSaPackets",
+ "MACSec OutUntaggedPackets",
+ "MACSec OutTooLong",
+ "MACSec OutEccErrorPackets",
+ "MACSec OutUnctrlHitDropRedir",
+};
+
+static const char *aq_macsec_txsc_stat_names[] = {
+ "MACSecTXSC%d ProtectedPkts",
+ "MACSecTXSC%d EncryptedPkts",
+ "MACSecTXSC%d ProtectedOctets",
+ "MACSecTXSC%d EncryptedOctets",
+};
+
+static const char *aq_macsec_txsa_stat_names[] = {
+ "MACSecTXSC%dSA%d HitDropRedirect",
+ "MACSecTXSC%dSA%d Protected2Pkts",
+ "MACSecTXSC%dSA%d ProtectedPkts",
+ "MACSecTXSC%dSA%d EncryptedPkts",
+};
+
+static const char *aq_macsec_rxsa_stat_names[] = {
+ "MACSecRXSC%dSA%d UntaggedHitPkts",
+ "MACSecRXSC%dSA%d CtrlHitDrpRedir",
+ "MACSecRXSC%dSA%d NotUsingSa",
+ "MACSecRXSC%dSA%d UnusedSa",
+ "MACSecRXSC%dSA%d NotValidPkts",
+ "MACSecRXSC%dSA%d InvalidPkts",
+ "MACSecRXSC%dSA%d OkPkts",
+ "MACSecRXSC%dSA%d LatePkts",
+ "MACSecRXSC%dSA%d DelayedPkts",
+ "MACSecRXSC%dSA%d UncheckedPkts",
+ "MACSecRXSC%dSA%d ValidatedOctets",
+ "MACSecRXSC%dSA%d DecryptedOctets",
+};
+#endif
+
static const char aq_ethtool_priv_flag_names[][ETH_GSTRING_LEN] = {
"DMASystemLoopback",
"PKTSystemLoopback",
@@ -104,18 +161,38 @@ static const char aq_ethtool_priv_flag_names[][ETH_GSTRING_LEN] = {
"PHYExternalLoopback",
};
+static u32 aq_ethtool_n_stats(struct net_device *ndev)
+{
+ struct aq_nic_s *nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(nic);
+ u32 n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
+ ARRAY_SIZE(aq_ethtool_queue_stat_names) * cfg->vecs;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ if (nic->macsec_cfg) {
+ n_stats += ARRAY_SIZE(aq_macsec_stat_names) +
+ ARRAY_SIZE(aq_macsec_txsc_stat_names) *
+ aq_macsec_tx_sc_cnt(nic) +
+ ARRAY_SIZE(aq_macsec_txsa_stat_names) *
+ aq_macsec_tx_sa_cnt(nic) +
+ ARRAY_SIZE(aq_macsec_rxsa_stat_names) *
+ aq_macsec_rx_sa_cnt(nic);
+ }
+#endif
+
+ return n_stats;
+}
+
static void aq_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg;
-
- cfg = aq_nic_get_cfg(aq_nic);
- memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
- ARRAY_SIZE(aq_ethtool_queue_stat_names) *
- cfg->vecs) * sizeof(u64));
- aq_nic_get_stats(aq_nic, data);
+ memset(data, 0, aq_ethtool_n_stats(ndev) * sizeof(u64));
+ data = aq_nic_get_stats(aq_nic, data);
+#if IS_ENABLED(CONFIG_MACSEC)
+ data = aq_macsec_get_stats(aq_nic, data);
+#endif
}
static void aq_ethtool_get_drvinfo(struct net_device *ndev,
@@ -123,16 +200,13 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
{
struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg;
u32 firmware_version;
u32 regs_count;
- cfg = aq_nic_get_cfg(aq_nic);
firmware_version = aq_nic_get_fw_version(aq_nic);
regs_count = aq_nic_get_regs_count(aq_nic);
strlcat(drvinfo->driver, AQ_CFG_DRV_NAME, sizeof(drvinfo->driver));
- strlcat(drvinfo->version, AQ_CFG_DRV_VERSION, sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u.%u", firmware_version >> 24,
@@ -140,8 +214,7 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
- cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
+ drvinfo->n_stats = aq_ethtool_n_stats(ndev);
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = regs_count;
drvinfo->eedump_len = 0;
@@ -154,6 +227,9 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
struct aq_nic_cfg_s *cfg;
u8 *p = data;
int i, si;
+#if IS_ENABLED(CONFIG_MACSEC)
+ int sa;
+#endif
cfg = aq_nic_get_cfg(aq_nic);
@@ -171,6 +247,60 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
p += ETH_GSTRING_LEN;
}
}
+#if IS_ENABLED(CONFIG_MACSEC)
+ if (!aq_nic->macsec_cfg)
+ break;
+
+ memcpy(p, aq_macsec_stat_names, sizeof(aq_macsec_stat_names));
+ p = p + sizeof(aq_macsec_stat_names);
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ struct aq_macsec_txsc *aq_txsc;
+
+ if (!(test_bit(i, &aq_nic->macsec_cfg->txsc_idx_busy)))
+ continue;
+
+ for (si = 0;
+ si < ARRAY_SIZE(aq_macsec_txsc_stat_names);
+ si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_macsec_txsc_stat_names[si], i);
+ p += ETH_GSTRING_LEN;
+ }
+ aq_txsc = &aq_nic->macsec_cfg->aq_txsc[i];
+ for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
+ if (!(test_bit(sa, &aq_txsc->tx_sa_idx_busy)))
+ continue;
+ for (si = 0;
+ si < ARRAY_SIZE(aq_macsec_txsa_stat_names);
+ si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_macsec_txsa_stat_names[si],
+ i, sa);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ struct aq_macsec_rxsc *aq_rxsc;
+
+ if (!(test_bit(i, &aq_nic->macsec_cfg->rxsc_idx_busy)))
+ continue;
+
+ aq_rxsc = &aq_nic->macsec_cfg->aq_rxsc[i];
+ for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
+ if (!(test_bit(sa, &aq_rxsc->rx_sa_idx_busy)))
+ continue;
+ for (si = 0;
+ si < ARRAY_SIZE(aq_macsec_rxsa_stat_names);
+ si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_macsec_rxsa_stat_names[si],
+ i, sa);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ }
+#endif
break;
case ETH_SS_PRIV_FLAGS:
memcpy(p, aq_ethtool_priv_flag_names,
@@ -210,16 +340,11 @@ static int aq_ethtool_set_phys_id(struct net_device *ndev,
static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
{
- struct aq_nic_s *aq_nic = netdev_priv(ndev);
- struct aq_nic_cfg_s *cfg;
int ret = 0;
- cfg = aq_nic_get_cfg(aq_nic);
-
switch (stringset) {
case ETH_SS_STATS:
- ret = ARRAY_SIZE(aq_ethtool_stat_names) +
- cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
+ ret = aq_ethtool_n_stats(ndev);
break;
case ETH_SS_PRIV_FLAGS:
ret = ARRAY_SIZE(aq_ethtool_priv_flag_names);
@@ -387,21 +512,10 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev,
cfg = aq_nic_get_cfg(aq_nic);
- /* This is not yet supported
- */
- if (coal->use_adaptive_rx_coalesce || coal->use_adaptive_tx_coalesce)
- return -EOPNOTSUPP;
-
/* Atlantic only supports timing based coalescing
*/
if (coal->rx_max_coalesced_frames > 1 ||
- coal->rx_coalesce_usecs_irq ||
- coal->rx_max_coalesced_frames_irq)
- return -EOPNOTSUPP;
-
- if (coal->tx_max_coalesced_frames > 1 ||
- coal->tx_coalesce_usecs_irq ||
- coal->tx_max_coalesced_frames_irq)
+ coal->tx_max_coalesced_frames > 1)
return -EOPNOTSUPP;
/* We do not support frame counting. Check this
@@ -743,6 +857,8 @@ static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags)
}
const struct ethtool_ops aq_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_link = aq_ethtool_get_link,
.get_regs_len = aq_ethtool_get_regs_len,
.get_regs = aq_ethtool_get_regs,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 251767c31f7e..7d71bc7dc500 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -343,6 +343,12 @@ struct aq_fw_ops {
int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
u32 *supported_rates);
+
+ u32 (*get_link_capabilities)(struct aq_hw_s *self);
+
+ int (*send_macsec_req)(struct aq_hw_s *self,
+ struct macsec_msg_fw_request *msg,
+ struct macsec_msg_fw_response *resp);
};
#endif /* AQ_HW_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
new file mode 100644
index 000000000000..0b3e234a54aa
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -0,0 +1,1777 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "aq_macsec.h"
+#include "aq_nic.h"
+#include <linux/rtnetlink.h>
+
+#include "macsec/macsec_api.h"
+#define AQ_MACSEC_KEY_LEN_128_BIT 16
+#define AQ_MACSEC_KEY_LEN_192_BIT 24
+#define AQ_MACSEC_KEY_LEN_256_BIT 32
+
+enum aq_clear_type {
+ /* update HW configuration */
+ AQ_CLEAR_HW = BIT(0),
+ /* update SW configuration (busy bits, pointers) */
+ AQ_CLEAR_SW = BIT(1),
+ /* update both HW and SW configuration */
+ AQ_CLEAR_ALL = AQ_CLEAR_HW | AQ_CLEAR_SW,
+};
+
+static int aq_clear_txsc(struct aq_nic_s *nic, const int txsc_idx,
+ enum aq_clear_type clear_type);
+static int aq_clear_txsa(struct aq_nic_s *nic, struct aq_macsec_txsc *aq_txsc,
+ const int sa_num, enum aq_clear_type clear_type);
+static int aq_clear_rxsc(struct aq_nic_s *nic, const int rxsc_idx,
+ enum aq_clear_type clear_type);
+static int aq_clear_rxsa(struct aq_nic_s *nic, struct aq_macsec_rxsc *aq_rxsc,
+ const int sa_num, enum aq_clear_type clear_type);
+static int aq_clear_secy(struct aq_nic_s *nic, const struct macsec_secy *secy,
+ enum aq_clear_type clear_type);
+static int aq_apply_macsec_cfg(struct aq_nic_s *nic);
+static int aq_apply_secy_cfg(struct aq_nic_s *nic,
+ const struct macsec_secy *secy);
+
+static void aq_ether_addr_to_mac(u32 mac[2], unsigned char *emac)
+{
+ u32 tmp[2] = { 0 };
+
+ memcpy(((u8 *)tmp) + 2, emac, ETH_ALEN);
+
+ mac[0] = swab32(tmp[1]);
+ mac[1] = swab32(tmp[0]);
+}
+
+/* There's a 1:1 mapping between SecY and TX SC */
+static int aq_get_txsc_idx_from_secy(struct aq_macsec_cfg *macsec_cfg,
+ const struct macsec_secy *secy)
+{
+ int i;
+
+ if (unlikely(!secy))
+ return -1;
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (macsec_cfg->aq_txsc[i].sw_secy == secy)
+ return i;
+ }
+ return -1;
+}
+
+static int aq_get_rxsc_idx_from_rxsc(struct aq_macsec_cfg *macsec_cfg,
+ const struct macsec_rx_sc *rxsc)
+{
+ int i;
+
+ if (unlikely(!rxsc))
+ return -1;
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (macsec_cfg->aq_rxsc[i].sw_rxsc == rxsc)
+ return i;
+ }
+
+ return -1;
+}
+
+static int aq_get_txsc_idx_from_sc_idx(const enum aq_macsec_sc_sa sc_sa,
+ const int sc_idx)
+{
+ switch (sc_sa) {
+ case aq_macsec_sa_sc_4sa_8sc:
+ return sc_idx >> 2;
+ case aq_macsec_sa_sc_2sa_16sc:
+ return sc_idx >> 1;
+ case aq_macsec_sa_sc_1sa_32sc:
+ return sc_idx;
+ default:
+ WARN_ONCE(true, "Invalid sc_sa");
+ }
+ return -1;
+}
+
+/* Rotate keys u32[8] */
+static void aq_rotate_keys(u32 (*key)[8], const int key_len)
+{
+ u32 tmp[8] = { 0 };
+
+ memcpy(&tmp, key, sizeof(tmp));
+ memset(*key, 0, sizeof(*key));
+
+ if (key_len == AQ_MACSEC_KEY_LEN_128_BIT) {
+ (*key)[0] = swab32(tmp[3]);
+ (*key)[1] = swab32(tmp[2]);
+ (*key)[2] = swab32(tmp[1]);
+ (*key)[3] = swab32(tmp[0]);
+ } else if (key_len == AQ_MACSEC_KEY_LEN_192_BIT) {
+ (*key)[0] = swab32(tmp[5]);
+ (*key)[1] = swab32(tmp[4]);
+ (*key)[2] = swab32(tmp[3]);
+ (*key)[3] = swab32(tmp[2]);
+ (*key)[4] = swab32(tmp[1]);
+ (*key)[5] = swab32(tmp[0]);
+ } else if (key_len == AQ_MACSEC_KEY_LEN_256_BIT) {
+ (*key)[0] = swab32(tmp[7]);
+ (*key)[1] = swab32(tmp[6]);
+ (*key)[2] = swab32(tmp[5]);
+ (*key)[3] = swab32(tmp[4]);
+ (*key)[4] = swab32(tmp[3]);
+ (*key)[5] = swab32(tmp[2]);
+ (*key)[6] = swab32(tmp[1]);
+ (*key)[7] = swab32(tmp[0]);
+ } else {
+ pr_warn("Rotate_keys: invalid key_len\n");
+ }
+}
+
+#define STATS_2x32_TO_64(stat_field) \
+ (((u64)stat_field[1] << 32) | stat_field[0])
+
+static int aq_get_macsec_common_stats(struct aq_hw_s *hw,
+ struct aq_macsec_common_stats *stats)
+{
+ struct aq_mss_ingress_common_counters ingress_counters;
+ struct aq_mss_egress_common_counters egress_counters;
+ int ret;
+
+ /* MACSEC counters */
+ ret = aq_mss_get_ingress_common_counters(hw, &ingress_counters);
+ if (unlikely(ret))
+ return ret;
+
+ stats->in.ctl_pkts = STATS_2x32_TO_64(ingress_counters.ctl_pkts);
+ stats->in.tagged_miss_pkts =
+ STATS_2x32_TO_64(ingress_counters.tagged_miss_pkts);
+ stats->in.untagged_miss_pkts =
+ STATS_2x32_TO_64(ingress_counters.untagged_miss_pkts);
+ stats->in.notag_pkts = STATS_2x32_TO_64(ingress_counters.notag_pkts);
+ stats->in.untagged_pkts =
+ STATS_2x32_TO_64(ingress_counters.untagged_pkts);
+ stats->in.bad_tag_pkts =
+ STATS_2x32_TO_64(ingress_counters.bad_tag_pkts);
+ stats->in.no_sci_pkts = STATS_2x32_TO_64(ingress_counters.no_sci_pkts);
+ stats->in.unknown_sci_pkts =
+ STATS_2x32_TO_64(ingress_counters.unknown_sci_pkts);
+ stats->in.ctrl_prt_pass_pkts =
+ STATS_2x32_TO_64(ingress_counters.ctrl_prt_pass_pkts);
+ stats->in.unctrl_prt_pass_pkts =
+ STATS_2x32_TO_64(ingress_counters.unctrl_prt_pass_pkts);
+ stats->in.ctrl_prt_fail_pkts =
+ STATS_2x32_TO_64(ingress_counters.ctrl_prt_fail_pkts);
+ stats->in.unctrl_prt_fail_pkts =
+ STATS_2x32_TO_64(ingress_counters.unctrl_prt_fail_pkts);
+ stats->in.too_long_pkts =
+ STATS_2x32_TO_64(ingress_counters.too_long_pkts);
+ stats->in.igpoc_ctl_pkts =
+ STATS_2x32_TO_64(ingress_counters.igpoc_ctl_pkts);
+ stats->in.ecc_error_pkts =
+ STATS_2x32_TO_64(ingress_counters.ecc_error_pkts);
+ stats->in.unctrl_hit_drop_redir =
+ STATS_2x32_TO_64(ingress_counters.unctrl_hit_drop_redir);
+
+ ret = aq_mss_get_egress_common_counters(hw, &egress_counters);
+ if (unlikely(ret))
+ return ret;
+ stats->out.ctl_pkts = STATS_2x32_TO_64(egress_counters.ctl_pkt);
+ stats->out.unknown_sa_pkts =
+ STATS_2x32_TO_64(egress_counters.unknown_sa_pkts);
+ stats->out.untagged_pkts =
+ STATS_2x32_TO_64(egress_counters.untagged_pkts);
+ stats->out.too_long = STATS_2x32_TO_64(egress_counters.too_long);
+ stats->out.ecc_error_pkts =
+ STATS_2x32_TO_64(egress_counters.ecc_error_pkts);
+ stats->out.unctrl_hit_drop_redir =
+ STATS_2x32_TO_64(egress_counters.unctrl_hit_drop_redir);
+
+ return 0;
+}
+
+static int aq_get_rxsa_stats(struct aq_hw_s *hw, const int sa_idx,
+ struct aq_macsec_rx_sa_stats *stats)
+{
+ struct aq_mss_ingress_sa_counters i_sa_counters;
+ int ret;
+
+ ret = aq_mss_get_ingress_sa_counters(hw, &i_sa_counters, sa_idx);
+ if (unlikely(ret))
+ return ret;
+
+ stats->untagged_hit_pkts =
+ STATS_2x32_TO_64(i_sa_counters.untagged_hit_pkts);
+ stats->ctrl_hit_drop_redir_pkts =
+ STATS_2x32_TO_64(i_sa_counters.ctrl_hit_drop_redir_pkts);
+ stats->not_using_sa = STATS_2x32_TO_64(i_sa_counters.not_using_sa);
+ stats->unused_sa = STATS_2x32_TO_64(i_sa_counters.unused_sa);
+ stats->not_valid_pkts = STATS_2x32_TO_64(i_sa_counters.not_valid_pkts);
+ stats->invalid_pkts = STATS_2x32_TO_64(i_sa_counters.invalid_pkts);
+ stats->ok_pkts = STATS_2x32_TO_64(i_sa_counters.ok_pkts);
+ stats->late_pkts = STATS_2x32_TO_64(i_sa_counters.late_pkts);
+ stats->delayed_pkts = STATS_2x32_TO_64(i_sa_counters.delayed_pkts);
+ stats->unchecked_pkts = STATS_2x32_TO_64(i_sa_counters.unchecked_pkts);
+ stats->validated_octets =
+ STATS_2x32_TO_64(i_sa_counters.validated_octets);
+ stats->decrypted_octets =
+ STATS_2x32_TO_64(i_sa_counters.decrypted_octets);
+
+ return 0;
+}
+
+static int aq_get_txsa_stats(struct aq_hw_s *hw, const int sa_idx,
+ struct aq_macsec_tx_sa_stats *stats)
+{
+ struct aq_mss_egress_sa_counters e_sa_counters;
+ int ret;
+
+ ret = aq_mss_get_egress_sa_counters(hw, &e_sa_counters, sa_idx);
+ if (unlikely(ret))
+ return ret;
+
+ stats->sa_hit_drop_redirect =
+ STATS_2x32_TO_64(e_sa_counters.sa_hit_drop_redirect);
+ stats->sa_protected2_pkts =
+ STATS_2x32_TO_64(e_sa_counters.sa_protected2_pkts);
+ stats->sa_protected_pkts =
+ STATS_2x32_TO_64(e_sa_counters.sa_protected_pkts);
+ stats->sa_encrypted_pkts =
+ STATS_2x32_TO_64(e_sa_counters.sa_encrypted_pkts);
+
+ return 0;
+}
+
+static int aq_get_txsa_next_pn(struct aq_hw_s *hw, const int sa_idx, u32 *pn)
+{
+ struct aq_mss_egress_sa_record sa_rec;
+ int ret;
+
+ ret = aq_mss_get_egress_sa_record(hw, &sa_rec, sa_idx);
+ if (likely(!ret))
+ *pn = sa_rec.next_pn;
+
+ return ret;
+}
+
+static int aq_get_rxsa_next_pn(struct aq_hw_s *hw, const int sa_idx, u32 *pn)
+{
+ struct aq_mss_ingress_sa_record sa_rec;
+ int ret;
+
+ ret = aq_mss_get_ingress_sa_record(hw, &sa_rec, sa_idx);
+ if (likely(!ret))
+ *pn = (!sa_rec.sat_nextpn) ? sa_rec.next_pn : 0;
+
+ return ret;
+}
+
+static int aq_get_txsc_stats(struct aq_hw_s *hw, const int sc_idx,
+ struct aq_macsec_tx_sc_stats *stats)
+{
+ struct aq_mss_egress_sc_counters e_sc_counters;
+ int ret;
+
+ ret = aq_mss_get_egress_sc_counters(hw, &e_sc_counters, sc_idx);
+ if (unlikely(ret))
+ return ret;
+
+ stats->sc_protected_pkts =
+ STATS_2x32_TO_64(e_sc_counters.sc_protected_pkts);
+ stats->sc_encrypted_pkts =
+ STATS_2x32_TO_64(e_sc_counters.sc_encrypted_pkts);
+ stats->sc_protected_octets =
+ STATS_2x32_TO_64(e_sc_counters.sc_protected_octets);
+ stats->sc_encrypted_octets =
+ STATS_2x32_TO_64(e_sc_counters.sc_encrypted_octets);
+
+ return 0;
+}
+
+static int aq_mdo_dev_open(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ int ret = 0;
+
+ if (ctx->prepare)
+ return 0;
+
+ if (netif_carrier_ok(nic->ndev))
+ ret = aq_apply_secy_cfg(nic, ctx->secy);
+
+ return ret;
+}
+
+static int aq_mdo_dev_stop(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ int i;
+
+ if (ctx->prepare)
+ return 0;
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (nic->macsec_cfg->txsc_idx_busy & BIT(i))
+ aq_clear_secy(nic, nic->macsec_cfg->aq_txsc[i].sw_secy,
+ AQ_CLEAR_HW);
+ }
+
+ return 0;
+}
+
+static int aq_set_txsc(struct aq_nic_s *nic, const int txsc_idx)
+{
+ struct aq_macsec_txsc *aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
+ struct aq_mss_egress_class_record tx_class_rec = { 0 };
+ const struct macsec_secy *secy = aq_txsc->sw_secy;
+ struct aq_mss_egress_sc_record sc_rec = { 0 };
+ unsigned int sc_idx = aq_txsc->hw_sc_idx;
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+
+ aq_ether_addr_to_mac(tx_class_rec.mac_sa, secy->netdev->dev_addr);
+
+ put_unaligned_be64((__force u64)secy->sci, tx_class_rec.sci);
+ tx_class_rec.sci_mask = 0;
+
+ tx_class_rec.sa_mask = 0x3f;
+
+ tx_class_rec.action = 0; /* forward to SA/SC table */
+ tx_class_rec.valid = 1;
+
+ tx_class_rec.sc_idx = sc_idx;
+
+ tx_class_rec.sc_sa = nic->macsec_cfg->sc_sa;
+
+ ret = aq_mss_set_egress_class_record(hw, &tx_class_rec, txsc_idx);
+ if (ret)
+ return ret;
+
+ sc_rec.protect = secy->protect_frames;
+ if (secy->tx_sc.encrypt)
+ sc_rec.tci |= BIT(1);
+ if (secy->tx_sc.scb)
+ sc_rec.tci |= BIT(2);
+ if (secy->tx_sc.send_sci)
+ sc_rec.tci |= BIT(3);
+ if (secy->tx_sc.end_station)
+ sc_rec.tci |= BIT(4);
+ /* The C bit is clear if and only if the Secure Data is
+ * exactly the same as the User Data and the ICV is 16 octets long.
+ */
+ if (!(secy->icv_len == 16 && !secy->tx_sc.encrypt))
+ sc_rec.tci |= BIT(0);
+
+ sc_rec.an_roll = 0;
+
+ switch (secy->key_len) {
+ case AQ_MACSEC_KEY_LEN_128_BIT:
+ sc_rec.sak_len = 0;
+ break;
+ case AQ_MACSEC_KEY_LEN_192_BIT:
+ sc_rec.sak_len = 1;
+ break;
+ case AQ_MACSEC_KEY_LEN_256_BIT:
+ sc_rec.sak_len = 2;
+ break;
+ default:
+ WARN_ONCE(true, "Invalid sc_sa");
+ return -EINVAL;
+ }
+
+ sc_rec.curr_an = secy->tx_sc.encoding_sa;
+ sc_rec.valid = 1;
+ sc_rec.fresh = 1;
+
+ return aq_mss_set_egress_sc_record(hw, &sc_rec, sc_idx);
+}
+
+static u32 aq_sc_idx_max(const enum aq_macsec_sc_sa sc_sa)
+{
+ u32 result = 0;
+
+ switch (sc_sa) {
+ case aq_macsec_sa_sc_4sa_8sc:
+ result = 8;
+ break;
+ case aq_macsec_sa_sc_2sa_16sc:
+ result = 16;
+ break;
+ case aq_macsec_sa_sc_1sa_32sc:
+ result = 32;
+ break;
+ default:
+ break;
+ };
+
+ return result;
+}
+
+static u32 aq_to_hw_sc_idx(const u32 sc_idx, const enum aq_macsec_sc_sa sc_sa)
+{
+ switch (sc_sa) {
+ case aq_macsec_sa_sc_4sa_8sc:
+ return sc_idx << 2;
+ case aq_macsec_sa_sc_2sa_16sc:
+ return sc_idx << 1;
+ case aq_macsec_sa_sc_1sa_32sc:
+ return sc_idx;
+ default:
+ WARN_ONCE(true, "Invalid sc_sa");
+ };
+
+ return sc_idx;
+}
+
+static enum aq_macsec_sc_sa sc_sa_from_num_an(const int num_an)
+{
+ enum aq_macsec_sc_sa sc_sa = aq_macsec_sa_sc_not_used;
+
+ switch (num_an) {
+ case 4:
+ sc_sa = aq_macsec_sa_sc_4sa_8sc;
+ break;
+ case 2:
+ sc_sa = aq_macsec_sa_sc_2sa_16sc;
+ break;
+ case 1:
+ sc_sa = aq_macsec_sa_sc_1sa_32sc;
+ break;
+ default:
+ break;
+ }
+
+ return sc_sa;
+}
+
+static int aq_mdo_add_secy(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ const struct macsec_secy *secy = ctx->secy;
+ enum aq_macsec_sc_sa sc_sa;
+ u32 txsc_idx;
+ int ret = 0;
+
+ if (secy->xpn)
+ return -EOPNOTSUPP;
+
+ sc_sa = sc_sa_from_num_an(MACSEC_NUM_AN);
+ if (sc_sa == aq_macsec_sa_sc_not_used)
+ return -EINVAL;
+
+ if (hweight32(cfg->txsc_idx_busy) >= aq_sc_idx_max(sc_sa))
+ return -ENOSPC;
+
+ txsc_idx = ffz(cfg->txsc_idx_busy);
+ if (txsc_idx == AQ_MACSEC_MAX_SC)
+ return -ENOSPC;
+
+ if (ctx->prepare)
+ return 0;
+
+ cfg->sc_sa = sc_sa;
+ cfg->aq_txsc[txsc_idx].hw_sc_idx = aq_to_hw_sc_idx(txsc_idx, sc_sa);
+ cfg->aq_txsc[txsc_idx].sw_secy = secy;
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_set_txsc(nic, txsc_idx);
+
+ set_bit(txsc_idx, &cfg->txsc_idx_busy);
+
+ return 0;
+}
+
+static int aq_mdo_upd_secy(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ const struct macsec_secy *secy = ctx->secy;
+ int txsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, secy);
+ if (txsc_idx < 0)
+ return -ENOENT;
+
+ if (ctx->prepare)
+ return 0;
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_set_txsc(nic, txsc_idx);
+
+ return ret;
+}
+
+static int aq_clear_txsc(struct aq_nic_s *nic, const int txsc_idx,
+ enum aq_clear_type clear_type)
+{
+ struct aq_macsec_txsc *tx_sc = &nic->macsec_cfg->aq_txsc[txsc_idx];
+ struct aq_mss_egress_class_record tx_class_rec = { 0 };
+ struct aq_mss_egress_sc_record sc_rec = { 0 };
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+ int sa_num;
+
+ for_each_set_bit (sa_num, &tx_sc->tx_sa_idx_busy, AQ_MACSEC_MAX_SA) {
+ ret = aq_clear_txsa(nic, tx_sc, sa_num, clear_type);
+ if (ret)
+ return ret;
+ }
+
+ if (clear_type & AQ_CLEAR_HW) {
+ ret = aq_mss_set_egress_class_record(hw, &tx_class_rec,
+ txsc_idx);
+ if (ret)
+ return ret;
+
+ sc_rec.fresh = 1;
+ ret = aq_mss_set_egress_sc_record(hw, &sc_rec,
+ tx_sc->hw_sc_idx);
+ if (ret)
+ return ret;
+ }
+
+ if (clear_type & AQ_CLEAR_SW) {
+ clear_bit(txsc_idx, &nic->macsec_cfg->txsc_idx_busy);
+ nic->macsec_cfg->aq_txsc[txsc_idx].sw_secy = NULL;
+ }
+
+ return ret;
+}
+
+static int aq_mdo_del_secy(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ int ret = 0;
+
+ if (ctx->prepare)
+ return 0;
+
+ if (!nic->macsec_cfg)
+ return 0;
+
+ ret = aq_clear_secy(nic, ctx->secy, AQ_CLEAR_ALL);
+
+ return ret;
+}
+
+static int aq_update_txsa(struct aq_nic_s *nic, const unsigned int sc_idx,
+ const struct macsec_secy *secy,
+ const struct macsec_tx_sa *tx_sa,
+ const unsigned char *key, const unsigned char an)
+{
+ const u32 next_pn = tx_sa->next_pn_halves.lower;
+ struct aq_mss_egress_sakey_record key_rec;
+ const unsigned int sa_idx = sc_idx | an;
+ struct aq_mss_egress_sa_record sa_rec;
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+
+ memset(&sa_rec, 0, sizeof(sa_rec));
+ sa_rec.valid = tx_sa->active;
+ sa_rec.fresh = 1;
+ sa_rec.next_pn = next_pn;
+
+ ret = aq_mss_set_egress_sa_record(hw, &sa_rec, sa_idx);
+ if (ret)
+ return ret;
+
+ if (!key)
+ return ret;
+
+ memset(&key_rec, 0, sizeof(key_rec));
+ memcpy(&key_rec.key, key, secy->key_len);
+
+ aq_rotate_keys(&key_rec.key, secy->key_len);
+
+ ret = aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx);
+
+ return ret;
+}
+
+static int aq_mdo_add_txsa(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ const struct macsec_secy *secy = ctx->secy;
+ struct aq_macsec_txsc *aq_txsc;
+ int txsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(cfg, secy);
+ if (txsc_idx < 0)
+ return -EINVAL;
+
+ if (ctx->prepare)
+ return 0;
+
+ aq_txsc = &cfg->aq_txsc[txsc_idx];
+ set_bit(ctx->sa.assoc_num, &aq_txsc->tx_sa_idx_busy);
+
+ memcpy(aq_txsc->tx_sa_key[ctx->sa.assoc_num], ctx->sa.key,
+ secy->key_len);
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
+ ctx->sa.tx_sa, ctx->sa.key,
+ ctx->sa.assoc_num);
+
+ return ret;
+}
+
+static int aq_mdo_upd_txsa(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ const struct macsec_secy *secy = ctx->secy;
+ struct aq_macsec_txsc *aq_txsc;
+ int txsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(cfg, secy);
+ if (txsc_idx < 0)
+ return -EINVAL;
+
+ if (ctx->prepare)
+ return 0;
+
+ aq_txsc = &cfg->aq_txsc[txsc_idx];
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
+ ctx->sa.tx_sa, NULL, ctx->sa.assoc_num);
+
+ return ret;
+}
+
+static int aq_clear_txsa(struct aq_nic_s *nic, struct aq_macsec_txsc *aq_txsc,
+ const int sa_num, enum aq_clear_type clear_type)
+{
+ const int sa_idx = aq_txsc->hw_sc_idx | sa_num;
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+
+ if (clear_type & AQ_CLEAR_SW)
+ clear_bit(sa_num, &aq_txsc->tx_sa_idx_busy);
+
+ if ((clear_type & AQ_CLEAR_HW) && netif_carrier_ok(nic->ndev)) {
+ struct aq_mss_egress_sakey_record key_rec;
+ struct aq_mss_egress_sa_record sa_rec;
+
+ memset(&sa_rec, 0, sizeof(sa_rec));
+ sa_rec.fresh = 1;
+
+ ret = aq_mss_set_egress_sa_record(hw, &sa_rec, sa_idx);
+ if (ret)
+ return ret;
+
+ memset(&key_rec, 0, sizeof(key_rec));
+ return aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx);
+ }
+
+ return 0;
+}
+
+static int aq_mdo_del_txsa(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ int txsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(cfg, ctx->secy);
+ if (txsc_idx < 0)
+ return -EINVAL;
+
+ if (ctx->prepare)
+ return 0;
+
+ ret = aq_clear_txsa(nic, &cfg->aq_txsc[txsc_idx], ctx->sa.assoc_num,
+ AQ_CLEAR_ALL);
+
+ return ret;
+}
+
+static int aq_rxsc_validate_frames(const enum macsec_validation_type validate)
+{
+ switch (validate) {
+ case MACSEC_VALIDATE_DISABLED:
+ return 2;
+ case MACSEC_VALIDATE_CHECK:
+ return 1;
+ case MACSEC_VALIDATE_STRICT:
+ return 0;
+ default:
+ WARN_ONCE(true, "Invalid validation type");
+ }
+
+ return 0;
+}
+
+static int aq_set_rxsc(struct aq_nic_s *nic, const u32 rxsc_idx)
+{
+ const struct aq_macsec_rxsc *aq_rxsc =
+ &nic->macsec_cfg->aq_rxsc[rxsc_idx];
+ struct aq_mss_ingress_preclass_record pre_class_record;
+ const struct macsec_rx_sc *rx_sc = aq_rxsc->sw_rxsc;
+ const struct macsec_secy *secy = aq_rxsc->sw_secy;
+ const u32 hw_sc_idx = aq_rxsc->hw_sc_idx;
+ struct aq_mss_ingress_sc_record sc_record;
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+
+ memset(&pre_class_record, 0, sizeof(pre_class_record));
+ put_unaligned_be64((__force u64)rx_sc->sci, pre_class_record.sci);
+ pre_class_record.sci_mask = 0xff;
+ /* match all MACSEC ethertype packets */
+ pre_class_record.eth_type = ETH_P_MACSEC;
+ pre_class_record.eth_type_mask = 0x3;
+
+ aq_ether_addr_to_mac(pre_class_record.mac_sa, (char *)&rx_sc->sci);
+ pre_class_record.sa_mask = 0x3f;
+
+ pre_class_record.an_mask = nic->macsec_cfg->sc_sa;
+ pre_class_record.sc_idx = hw_sc_idx;
+ /* strip SecTAG & forward for decryption */
+ pre_class_record.action = 0x0;
+ pre_class_record.valid = 1;
+
+ ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record,
+ 2 * rxsc_idx + 1);
+ if (ret)
+ return ret;
+
+ /* If SCI is absent, then match by SA alone */
+ pre_class_record.sci_mask = 0;
+ pre_class_record.sci_from_table = 1;
+
+ ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record,
+ 2 * rxsc_idx);
+ if (ret)
+ return ret;
+
+ memset(&sc_record, 0, sizeof(sc_record));
+ sc_record.validate_frames =
+ aq_rxsc_validate_frames(secy->validate_frames);
+ if (secy->replay_protect) {
+ sc_record.replay_protect = 1;
+ sc_record.anti_replay_window = secy->replay_window;
+ }
+ sc_record.valid = 1;
+ sc_record.fresh = 1;
+
+ ret = aq_mss_set_ingress_sc_record(hw, &sc_record, hw_sc_idx);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int aq_mdo_add_rxsc(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ const u32 rxsc_idx_max = aq_sc_idx_max(cfg->sc_sa);
+ u32 rxsc_idx;
+ int ret = 0;
+
+ if (hweight32(cfg->rxsc_idx_busy) >= rxsc_idx_max)
+ return -ENOSPC;
+
+ rxsc_idx = ffz(cfg->rxsc_idx_busy);
+ if (rxsc_idx >= rxsc_idx_max)
+ return -ENOSPC;
+
+ if (ctx->prepare)
+ return 0;
+
+ cfg->aq_rxsc[rxsc_idx].hw_sc_idx = aq_to_hw_sc_idx(rxsc_idx,
+ cfg->sc_sa);
+ cfg->aq_rxsc[rxsc_idx].sw_secy = ctx->secy;
+ cfg->aq_rxsc[rxsc_idx].sw_rxsc = ctx->rx_sc;
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev))
+ ret = aq_set_rxsc(nic, rxsc_idx);
+
+ if (ret < 0)
+ return ret;
+
+ set_bit(rxsc_idx, &cfg->rxsc_idx_busy);
+
+ return 0;
+}
+
+static int aq_mdo_upd_rxsc(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ int rxsc_idx;
+ int ret = 0;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, ctx->rx_sc);
+ if (rxsc_idx < 0)
+ return -ENOENT;
+
+ if (ctx->prepare)
+ return 0;
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev))
+ ret = aq_set_rxsc(nic, rxsc_idx);
+
+ return ret;
+}
+
+static int aq_clear_rxsc(struct aq_nic_s *nic, const int rxsc_idx,
+ enum aq_clear_type clear_type)
+{
+ struct aq_macsec_rxsc *rx_sc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+ int sa_num;
+
+ for_each_set_bit (sa_num, &rx_sc->rx_sa_idx_busy, AQ_MACSEC_MAX_SA) {
+ ret = aq_clear_rxsa(nic, rx_sc, sa_num, clear_type);
+ if (ret)
+ return ret;
+ }
+
+ if (clear_type & AQ_CLEAR_HW) {
+ struct aq_mss_ingress_preclass_record pre_class_record;
+ struct aq_mss_ingress_sc_record sc_record;
+
+ memset(&pre_class_record, 0, sizeof(pre_class_record));
+ memset(&sc_record, 0, sizeof(sc_record));
+
+ ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record,
+ 2 * rxsc_idx);
+ if (ret)
+ return ret;
+
+ ret = aq_mss_set_ingress_preclass_record(hw, &pre_class_record,
+ 2 * rxsc_idx + 1);
+ if (ret)
+ return ret;
+
+ sc_record.fresh = 1;
+ ret = aq_mss_set_ingress_sc_record(hw, &sc_record,
+ rx_sc->hw_sc_idx);
+ if (ret)
+ return ret;
+ }
+
+ if (clear_type & AQ_CLEAR_SW) {
+ clear_bit(rxsc_idx, &nic->macsec_cfg->rxsc_idx_busy);
+ rx_sc->sw_secy = NULL;
+ rx_sc->sw_rxsc = NULL;
+ }
+
+ return ret;
+}
+
+static int aq_mdo_del_rxsc(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ enum aq_clear_type clear_type = AQ_CLEAR_SW;
+ int rxsc_idx;
+ int ret = 0;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, ctx->rx_sc);
+ if (rxsc_idx < 0)
+ return -ENOENT;
+
+ if (ctx->prepare)
+ return 0;
+
+ if (netif_carrier_ok(nic->ndev))
+ clear_type = AQ_CLEAR_ALL;
+
+ ret = aq_clear_rxsc(nic, rxsc_idx, clear_type);
+
+ return ret;
+}
+
+static int aq_update_rxsa(struct aq_nic_s *nic, const unsigned int sc_idx,
+ const struct macsec_secy *secy,
+ const struct macsec_rx_sa *rx_sa,
+ const unsigned char *key, const unsigned char an)
+{
+ struct aq_mss_ingress_sakey_record sa_key_record;
+ const u32 next_pn = rx_sa->next_pn_halves.lower;
+ struct aq_mss_ingress_sa_record sa_record;
+ struct aq_hw_s *hw = nic->aq_hw;
+ const int sa_idx = sc_idx | an;
+ int ret = 0;
+
+ memset(&sa_record, 0, sizeof(sa_record));
+ sa_record.valid = rx_sa->active;
+ sa_record.fresh = 1;
+ sa_record.next_pn = next_pn;
+
+ ret = aq_mss_set_ingress_sa_record(hw, &sa_record, sa_idx);
+ if (ret)
+ return ret;
+
+ if (!key)
+ return ret;
+
+ memset(&sa_key_record, 0, sizeof(sa_key_record));
+ memcpy(&sa_key_record.key, key, secy->key_len);
+
+ switch (secy->key_len) {
+ case AQ_MACSEC_KEY_LEN_128_BIT:
+ sa_key_record.key_len = 0;
+ break;
+ case AQ_MACSEC_KEY_LEN_192_BIT:
+ sa_key_record.key_len = 1;
+ break;
+ case AQ_MACSEC_KEY_LEN_256_BIT:
+ sa_key_record.key_len = 2;
+ break;
+ default:
+ return -1;
+ }
+
+ aq_rotate_keys(&sa_key_record.key, secy->key_len);
+
+ ret = aq_mss_set_ingress_sakey_record(hw, &sa_key_record, sa_idx);
+
+ return ret;
+}
+
+static int aq_mdo_add_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc;
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ const struct macsec_secy *secy = ctx->secy;
+ struct aq_macsec_rxsc *aq_rxsc;
+ int rxsc_idx;
+ int ret = 0;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, rx_sc);
+ if (rxsc_idx < 0)
+ return -EINVAL;
+
+ if (ctx->prepare)
+ return 0;
+
+ aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
+ set_bit(ctx->sa.assoc_num, &aq_rxsc->rx_sa_idx_busy);
+
+ memcpy(aq_rxsc->rx_sa_key[ctx->sa.assoc_num], ctx->sa.key,
+ secy->key_len);
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_update_rxsa(nic, aq_rxsc->hw_sc_idx, secy,
+ ctx->sa.rx_sa, ctx->sa.key,
+ ctx->sa.assoc_num);
+
+ return ret;
+}
+
+static int aq_mdo_upd_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc;
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ const struct macsec_secy *secy = ctx->secy;
+ int rxsc_idx;
+ int ret = 0;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, rx_sc);
+ if (rxsc_idx < 0)
+ return -EINVAL;
+
+ if (ctx->prepare)
+ return 0;
+
+ if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
+ ret = aq_update_rxsa(nic, cfg->aq_rxsc[rxsc_idx].hw_sc_idx,
+ secy, ctx->sa.rx_sa, NULL,
+ ctx->sa.assoc_num);
+
+ return ret;
+}
+
+static int aq_clear_rxsa(struct aq_nic_s *nic, struct aq_macsec_rxsc *aq_rxsc,
+ const int sa_num, enum aq_clear_type clear_type)
+{
+ int sa_idx = aq_rxsc->hw_sc_idx | sa_num;
+ struct aq_hw_s *hw = nic->aq_hw;
+ int ret = 0;
+
+ if (clear_type & AQ_CLEAR_SW)
+ clear_bit(sa_num, &aq_rxsc->rx_sa_idx_busy);
+
+ if ((clear_type & AQ_CLEAR_HW) && netif_carrier_ok(nic->ndev)) {
+ struct aq_mss_ingress_sakey_record sa_key_record;
+ struct aq_mss_ingress_sa_record sa_record;
+
+ memset(&sa_key_record, 0, sizeof(sa_key_record));
+ memset(&sa_record, 0, sizeof(sa_record));
+ sa_record.fresh = 1;
+ ret = aq_mss_set_ingress_sa_record(hw, &sa_record, sa_idx);
+ if (ret)
+ return ret;
+
+ return aq_mss_set_ingress_sakey_record(hw, &sa_key_record,
+ sa_idx);
+ }
+
+ return ret;
+}
+
+static int aq_mdo_del_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sc *rx_sc = ctx->sa.rx_sa->sc;
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ int rxsc_idx;
+ int ret = 0;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, rx_sc);
+ if (rxsc_idx < 0)
+ return -EINVAL;
+
+ if (ctx->prepare)
+ return 0;
+
+ ret = aq_clear_rxsa(nic, &cfg->aq_rxsc[rxsc_idx], ctx->sa.assoc_num,
+ AQ_CLEAR_ALL);
+
+ return ret;
+}
+
+static int aq_mdo_get_dev_stats(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_common_stats *stats = &nic->macsec_cfg->stats;
+ struct aq_hw_s *hw = nic->aq_hw;
+
+ if (ctx->prepare)
+ return 0;
+
+ aq_get_macsec_common_stats(hw, stats);
+
+ ctx->stats.dev_stats->OutPktsUntagged = stats->out.untagged_pkts;
+ ctx->stats.dev_stats->InPktsUntagged = stats->in.untagged_pkts;
+ ctx->stats.dev_stats->OutPktsTooLong = stats->out.too_long;
+ ctx->stats.dev_stats->InPktsNoTag = stats->in.notag_pkts;
+ ctx->stats.dev_stats->InPktsBadTag = stats->in.bad_tag_pkts;
+ ctx->stats.dev_stats->InPktsUnknownSCI = stats->in.unknown_sci_pkts;
+ ctx->stats.dev_stats->InPktsNoSCI = stats->in.no_sci_pkts;
+ ctx->stats.dev_stats->InPktsOverrun = 0;
+
+ return 0;
+}
+
+static int aq_mdo_get_tx_sc_stats(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_tx_sc_stats *stats;
+ struct aq_hw_s *hw = nic->aq_hw;
+ struct aq_macsec_txsc *aq_txsc;
+ int txsc_idx;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, ctx->secy);
+ if (txsc_idx < 0)
+ return -ENOENT;
+
+ if (ctx->prepare)
+ return 0;
+
+ aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
+ stats = &aq_txsc->stats;
+ aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx, stats);
+
+ ctx->stats.tx_sc_stats->OutPktsProtected = stats->sc_protected_pkts;
+ ctx->stats.tx_sc_stats->OutPktsEncrypted = stats->sc_encrypted_pkts;
+ ctx->stats.tx_sc_stats->OutOctetsProtected = stats->sc_protected_octets;
+ ctx->stats.tx_sc_stats->OutOctetsEncrypted = stats->sc_encrypted_octets;
+
+ return 0;
+}
+
+static int aq_mdo_get_tx_sa_stats(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_macsec_tx_sa_stats *stats;
+ struct aq_hw_s *hw = nic->aq_hw;
+ const struct macsec_secy *secy;
+ struct aq_macsec_txsc *aq_txsc;
+ struct macsec_tx_sa *tx_sa;
+ unsigned int sa_idx;
+ int txsc_idx;
+ u32 next_pn;
+ int ret;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(cfg, ctx->secy);
+ if (txsc_idx < 0)
+ return -EINVAL;
+
+ if (ctx->prepare)
+ return 0;
+
+ aq_txsc = &cfg->aq_txsc[txsc_idx];
+ sa_idx = aq_txsc->hw_sc_idx | ctx->sa.assoc_num;
+ stats = &aq_txsc->tx_sa_stats[ctx->sa.assoc_num];
+ ret = aq_get_txsa_stats(hw, sa_idx, stats);
+ if (ret)
+ return ret;
+
+ ctx->stats.tx_sa_stats->OutPktsProtected = stats->sa_protected_pkts;
+ ctx->stats.tx_sa_stats->OutPktsEncrypted = stats->sa_encrypted_pkts;
+
+ secy = aq_txsc->sw_secy;
+ tx_sa = rcu_dereference_bh(secy->tx_sc.sa[ctx->sa.assoc_num]);
+ ret = aq_get_txsa_next_pn(hw, sa_idx, &next_pn);
+ if (ret == 0) {
+ spin_lock_bh(&tx_sa->lock);
+ tx_sa->next_pn = next_pn;
+ spin_unlock_bh(&tx_sa->lock);
+ }
+
+ return ret;
+}
+
+static int aq_mdo_get_rx_sc_stats(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_macsec_rx_sa_stats *stats;
+ struct aq_hw_s *hw = nic->aq_hw;
+ struct aq_macsec_rxsc *aq_rxsc;
+ unsigned int sa_idx;
+ int rxsc_idx;
+ int ret = 0;
+ int i;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, ctx->rx_sc);
+ if (rxsc_idx < 0)
+ return -ENOENT;
+
+ if (ctx->prepare)
+ return 0;
+
+ aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
+ for (i = 0; i < MACSEC_NUM_AN; i++) {
+ if (!test_bit(i, &aq_rxsc->rx_sa_idx_busy))
+ continue;
+
+ stats = &aq_rxsc->rx_sa_stats[i];
+ sa_idx = aq_rxsc->hw_sc_idx | i;
+ ret = aq_get_rxsa_stats(hw, sa_idx, stats);
+ if (ret)
+ break;
+
+ ctx->stats.rx_sc_stats->InOctetsValidated +=
+ stats->validated_octets;
+ ctx->stats.rx_sc_stats->InOctetsDecrypted +=
+ stats->decrypted_octets;
+ ctx->stats.rx_sc_stats->InPktsUnchecked +=
+ stats->unchecked_pkts;
+ ctx->stats.rx_sc_stats->InPktsDelayed += stats->delayed_pkts;
+ ctx->stats.rx_sc_stats->InPktsOK += stats->ok_pkts;
+ ctx->stats.rx_sc_stats->InPktsInvalid += stats->invalid_pkts;
+ ctx->stats.rx_sc_stats->InPktsLate += stats->late_pkts;
+ ctx->stats.rx_sc_stats->InPktsNotValid += stats->not_valid_pkts;
+ ctx->stats.rx_sc_stats->InPktsNotUsingSA += stats->not_using_sa;
+ ctx->stats.rx_sc_stats->InPktsUnusedSA += stats->unused_sa;
+ }
+
+ return ret;
+}
+
+static int aq_mdo_get_rx_sa_stats(struct macsec_context *ctx)
+{
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev);
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_macsec_rx_sa_stats *stats;
+ struct aq_hw_s *hw = nic->aq_hw;
+ struct aq_macsec_rxsc *aq_rxsc;
+ struct macsec_rx_sa *rx_sa;
+ unsigned int sa_idx;
+ int rxsc_idx;
+ u32 next_pn;
+ int ret;
+
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(cfg, ctx->rx_sc);
+ if (rxsc_idx < 0)
+ return -EINVAL;
+
+ if (ctx->prepare)
+ return 0;
+
+ aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
+ stats = &aq_rxsc->rx_sa_stats[ctx->sa.assoc_num];
+ sa_idx = aq_rxsc->hw_sc_idx | ctx->sa.assoc_num;
+ ret = aq_get_rxsa_stats(hw, sa_idx, stats);
+ if (ret)
+ return ret;
+
+ ctx->stats.rx_sa_stats->InPktsOK = stats->ok_pkts;
+ ctx->stats.rx_sa_stats->InPktsInvalid = stats->invalid_pkts;
+ ctx->stats.rx_sa_stats->InPktsNotValid = stats->not_valid_pkts;
+ ctx->stats.rx_sa_stats->InPktsNotUsingSA = stats->not_using_sa;
+ ctx->stats.rx_sa_stats->InPktsUnusedSA = stats->unused_sa;
+
+ rx_sa = rcu_dereference_bh(aq_rxsc->sw_rxsc->sa[ctx->sa.assoc_num]);
+ ret = aq_get_rxsa_next_pn(hw, sa_idx, &next_pn);
+ if (ret == 0) {
+ spin_lock_bh(&rx_sa->lock);
+ rx_sa->next_pn = next_pn;
+ spin_unlock_bh(&rx_sa->lock);
+ }
+
+ return ret;
+}
+
+static int apply_txsc_cfg(struct aq_nic_s *nic, const int txsc_idx)
+{
+ struct aq_macsec_txsc *aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
+ const struct macsec_secy *secy = aq_txsc->sw_secy;
+ struct macsec_tx_sa *tx_sa;
+ int ret = 0;
+ int i;
+
+ if (!netif_running(secy->netdev))
+ return ret;
+
+ ret = aq_set_txsc(nic, txsc_idx);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < MACSEC_NUM_AN; i++) {
+ tx_sa = rcu_dereference_bh(secy->tx_sc.sa[i]);
+ if (tx_sa) {
+ ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
+ tx_sa, aq_txsc->tx_sa_key[i], i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int apply_rxsc_cfg(struct aq_nic_s *nic, const int rxsc_idx)
+{
+ struct aq_macsec_rxsc *aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
+ const struct macsec_secy *secy = aq_rxsc->sw_secy;
+ struct macsec_rx_sa *rx_sa;
+ int ret = 0;
+ int i;
+
+ if (!netif_running(secy->netdev))
+ return ret;
+
+ ret = aq_set_rxsc(nic, rxsc_idx);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < MACSEC_NUM_AN; i++) {
+ rx_sa = rcu_dereference_bh(aq_rxsc->sw_rxsc->sa[i]);
+ if (rx_sa) {
+ ret = aq_update_rxsa(nic, aq_rxsc->hw_sc_idx, secy,
+ rx_sa, aq_rxsc->rx_sa_key[i], i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int aq_clear_secy(struct aq_nic_s *nic, const struct macsec_secy *secy,
+ enum aq_clear_type clear_type)
+{
+ struct macsec_rx_sc *rx_sc;
+ int txsc_idx;
+ int rxsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, secy);
+ if (txsc_idx >= 0) {
+ ret = aq_clear_txsc(nic, txsc_idx, clear_type);
+ if (ret)
+ return ret;
+ }
+
+ for (rx_sc = rcu_dereference_bh(secy->rx_sc); rx_sc;
+ rx_sc = rcu_dereference_bh(rx_sc->next)) {
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, rx_sc);
+ if (rxsc_idx < 0)
+ continue;
+
+ ret = aq_clear_rxsc(nic, rxsc_idx, clear_type);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int aq_apply_secy_cfg(struct aq_nic_s *nic,
+ const struct macsec_secy *secy)
+{
+ struct macsec_rx_sc *rx_sc;
+ int txsc_idx;
+ int rxsc_idx;
+ int ret = 0;
+
+ txsc_idx = aq_get_txsc_idx_from_secy(nic->macsec_cfg, secy);
+ if (txsc_idx >= 0)
+ apply_txsc_cfg(nic, txsc_idx);
+
+ for (rx_sc = rcu_dereference_bh(secy->rx_sc); rx_sc && rx_sc->active;
+ rx_sc = rcu_dereference_bh(rx_sc->next)) {
+ rxsc_idx = aq_get_rxsc_idx_from_rxsc(nic->macsec_cfg, rx_sc);
+ if (unlikely(rxsc_idx < 0))
+ continue;
+
+ ret = apply_rxsc_cfg(nic, rxsc_idx);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int aq_apply_macsec_cfg(struct aq_nic_s *nic)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (nic->macsec_cfg->txsc_idx_busy & BIT(i)) {
+ ret = apply_txsc_cfg(nic, i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (nic->macsec_cfg->rxsc_idx_busy & BIT(i)) {
+ ret = apply_rxsc_cfg(nic, i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int aq_sa_from_sa_idx(const enum aq_macsec_sc_sa sc_sa, const int sa_idx)
+{
+ switch (sc_sa) {
+ case aq_macsec_sa_sc_4sa_8sc:
+ return sa_idx & 3;
+ case aq_macsec_sa_sc_2sa_16sc:
+ return sa_idx & 1;
+ case aq_macsec_sa_sc_1sa_32sc:
+ return 0;
+ default:
+ WARN_ONCE(true, "Invalid sc_sa");
+ }
+ return -EINVAL;
+}
+
+static int aq_sc_idx_from_sa_idx(const enum aq_macsec_sc_sa sc_sa,
+ const int sa_idx)
+{
+ switch (sc_sa) {
+ case aq_macsec_sa_sc_4sa_8sc:
+ return sa_idx & ~3;
+ case aq_macsec_sa_sc_2sa_16sc:
+ return sa_idx & ~1;
+ case aq_macsec_sa_sc_1sa_32sc:
+ return sa_idx;
+ default:
+ WARN_ONCE(true, "Invalid sc_sa");
+ }
+ return -EINVAL;
+}
+
+static void aq_check_txsa_expiration(struct aq_nic_s *nic)
+{
+ u32 egress_sa_expired, egress_sa_threshold_expired;
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_hw_s *hw = nic->aq_hw;
+ struct aq_macsec_txsc *aq_txsc;
+ const struct macsec_secy *secy;
+ int sc_idx = 0, txsc_idx = 0;
+ enum aq_macsec_sc_sa sc_sa;
+ struct macsec_tx_sa *tx_sa;
+ unsigned char an = 0;
+ int ret;
+ int i;
+
+ sc_sa = cfg->sc_sa;
+
+ ret = aq_mss_get_egress_sa_expired(hw, &egress_sa_expired);
+ if (unlikely(ret))
+ return;
+
+ ret = aq_mss_get_egress_sa_threshold_expired(hw,
+ &egress_sa_threshold_expired);
+
+ for (i = 0; i < AQ_MACSEC_MAX_SA; i++) {
+ if (egress_sa_expired & BIT(i)) {
+ an = aq_sa_from_sa_idx(sc_sa, i);
+ sc_idx = aq_sc_idx_from_sa_idx(sc_sa, i);
+ txsc_idx = aq_get_txsc_idx_from_sc_idx(sc_sa, sc_idx);
+ if (txsc_idx < 0)
+ continue;
+
+ aq_txsc = &cfg->aq_txsc[txsc_idx];
+ if (!(cfg->txsc_idx_busy & BIT(txsc_idx))) {
+ netdev_warn(nic->ndev,
+ "PN threshold expired on invalid TX SC");
+ continue;
+ }
+
+ secy = aq_txsc->sw_secy;
+ if (!netif_running(secy->netdev)) {
+ netdev_warn(nic->ndev,
+ "PN threshold expired on down TX SC");
+ continue;
+ }
+
+ if (unlikely(!(aq_txsc->tx_sa_idx_busy & BIT(an)))) {
+ netdev_warn(nic->ndev,
+ "PN threshold expired on invalid TX SA");
+ continue;
+ }
+
+ tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
+ macsec_pn_wrapped((struct macsec_secy *)secy, tx_sa);
+ }
+ }
+
+ aq_mss_set_egress_sa_expired(hw, egress_sa_expired);
+ if (likely(!ret))
+ aq_mss_set_egress_sa_threshold_expired(hw,
+ egress_sa_threshold_expired);
+}
+
+const struct macsec_ops aq_macsec_ops = {
+ .mdo_dev_open = aq_mdo_dev_open,
+ .mdo_dev_stop = aq_mdo_dev_stop,
+ .mdo_add_secy = aq_mdo_add_secy,
+ .mdo_upd_secy = aq_mdo_upd_secy,
+ .mdo_del_secy = aq_mdo_del_secy,
+ .mdo_add_rxsc = aq_mdo_add_rxsc,
+ .mdo_upd_rxsc = aq_mdo_upd_rxsc,
+ .mdo_del_rxsc = aq_mdo_del_rxsc,
+ .mdo_add_rxsa = aq_mdo_add_rxsa,
+ .mdo_upd_rxsa = aq_mdo_upd_rxsa,
+ .mdo_del_rxsa = aq_mdo_del_rxsa,
+ .mdo_add_txsa = aq_mdo_add_txsa,
+ .mdo_upd_txsa = aq_mdo_upd_txsa,
+ .mdo_del_txsa = aq_mdo_del_txsa,
+ .mdo_get_dev_stats = aq_mdo_get_dev_stats,
+ .mdo_get_tx_sc_stats = aq_mdo_get_tx_sc_stats,
+ .mdo_get_tx_sa_stats = aq_mdo_get_tx_sa_stats,
+ .mdo_get_rx_sc_stats = aq_mdo_get_rx_sc_stats,
+ .mdo_get_rx_sa_stats = aq_mdo_get_rx_sa_stats,
+};
+
+int aq_macsec_init(struct aq_nic_s *nic)
+{
+ struct aq_macsec_cfg *cfg;
+ u32 caps_lo;
+
+ if (!nic->aq_fw_ops->get_link_capabilities)
+ return 0;
+
+ caps_lo = nic->aq_fw_ops->get_link_capabilities(nic->aq_hw);
+
+ if (!(caps_lo & BIT(CAPS_LO_MACSEC)))
+ return 0;
+
+ nic->macsec_cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!nic->macsec_cfg)
+ return -ENOMEM;
+
+ nic->ndev->features |= NETIF_F_HW_MACSEC;
+ nic->ndev->macsec_ops = &aq_macsec_ops;
+
+ return 0;
+}
+
+void aq_macsec_free(struct aq_nic_s *nic)
+{
+ kfree(nic->macsec_cfg);
+ nic->macsec_cfg = NULL;
+}
+
+int aq_macsec_enable(struct aq_nic_s *nic)
+{
+ u32 ctl_ether_types[1] = { ETH_P_PAE };
+ struct macsec_msg_fw_response resp = { 0 };
+ struct macsec_msg_fw_request msg = { 0 };
+ struct aq_hw_s *hw = nic->aq_hw;
+ int num_ctl_ether_types = 0;
+ int index = 0, tbl_idx;
+ int ret;
+
+ if (!nic->macsec_cfg)
+ return 0;
+
+ rtnl_lock();
+
+ if (nic->aq_fw_ops->send_macsec_req) {
+ struct macsec_cfg_request cfg = { 0 };
+
+ cfg.enabled = 1;
+ cfg.egress_threshold = 0xffffffff;
+ cfg.ingress_threshold = 0xffffffff;
+ cfg.interrupts_enabled = 1;
+
+ msg.msg_type = macsec_cfg_msg;
+ msg.cfg = cfg;
+
+ ret = nic->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
+ if (ret)
+ goto unlock;
+ }
+
+ /* Init Ethertype bypass filters */
+ for (index = 0; index < ARRAY_SIZE(ctl_ether_types); index++) {
+ struct aq_mss_ingress_prectlf_record rx_prectlf_rec;
+ struct aq_mss_egress_ctlf_record tx_ctlf_rec;
+
+ if (ctl_ether_types[index] == 0)
+ continue;
+
+ memset(&tx_ctlf_rec, 0, sizeof(tx_ctlf_rec));
+ tx_ctlf_rec.eth_type = ctl_ether_types[index];
+ tx_ctlf_rec.match_type = 4; /* Match eth_type only */
+ tx_ctlf_rec.match_mask = 0xf; /* match for eth_type */
+ tx_ctlf_rec.action = 0; /* Bypass MACSEC modules */
+ tbl_idx = NUMROWS_EGRESSCTLFRECORD - num_ctl_ether_types - 1;
+ aq_mss_set_egress_ctlf_record(hw, &tx_ctlf_rec, tbl_idx);
+
+ memset(&rx_prectlf_rec, 0, sizeof(rx_prectlf_rec));
+ rx_prectlf_rec.eth_type = ctl_ether_types[index];
+ rx_prectlf_rec.match_type = 4; /* Match eth_type only */
+ rx_prectlf_rec.match_mask = 0xf; /* match for eth_type */
+ rx_prectlf_rec.action = 0; /* Bypass MACSEC modules */
+ tbl_idx =
+ NUMROWS_INGRESSPRECTLFRECORD - num_ctl_ether_types - 1;
+ aq_mss_set_ingress_prectlf_record(hw, &rx_prectlf_rec, tbl_idx);
+
+ num_ctl_ether_types++;
+ }
+
+ ret = aq_apply_macsec_cfg(nic);
+
+unlock:
+ rtnl_unlock();
+ return ret;
+}
+
+void aq_macsec_work(struct aq_nic_s *nic)
+{
+ if (!nic->macsec_cfg)
+ return;
+
+ if (!netif_carrier_ok(nic->ndev))
+ return;
+
+ rtnl_lock();
+ aq_check_txsa_expiration(nic);
+ rtnl_unlock();
+}
+
+int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic)
+{
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ int i, cnt = 0;
+
+ if (!cfg)
+ return 0;
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (!test_bit(i, &cfg->rxsc_idx_busy))
+ continue;
+ cnt += hweight_long(cfg->aq_rxsc[i].rx_sa_idx_busy);
+ }
+
+ return cnt;
+}
+
+int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic)
+{
+ if (!nic->macsec_cfg)
+ return 0;
+
+ return hweight_long(nic->macsec_cfg->txsc_idx_busy);
+}
+
+int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic)
+{
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ int i, cnt = 0;
+
+ if (!cfg)
+ return 0;
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (!test_bit(i, &cfg->txsc_idx_busy))
+ continue;
+ cnt += hweight_long(cfg->aq_txsc[i].tx_sa_idx_busy);
+ }
+
+ return cnt;
+}
+
+static int aq_macsec_update_stats(struct aq_nic_s *nic)
+{
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_hw_s *hw = nic->aq_hw;
+ struct aq_macsec_txsc *aq_txsc;
+ struct aq_macsec_rxsc *aq_rxsc;
+ int i, sa_idx, assoc_num;
+ int ret = 0;
+
+ aq_get_macsec_common_stats(hw, &cfg->stats);
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (!(cfg->txsc_idx_busy & BIT(i)))
+ continue;
+ aq_txsc = &cfg->aq_txsc[i];
+
+ ret = aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx,
+ &aq_txsc->stats);
+ if (ret)
+ return ret;
+
+ for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) {
+ if (!test_bit(assoc_num, &aq_txsc->tx_sa_idx_busy))
+ continue;
+ sa_idx = aq_txsc->hw_sc_idx | assoc_num;
+ ret = aq_get_txsa_stats(hw, sa_idx,
+ &aq_txsc->tx_sa_stats[assoc_num]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
+ if (!(test_bit(i, &cfg->rxsc_idx_busy)))
+ continue;
+ aq_rxsc = &cfg->aq_rxsc[i];
+
+ for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) {
+ if (!test_bit(assoc_num, &aq_rxsc->rx_sa_idx_busy))
+ continue;
+ sa_idx = aq_rxsc->hw_sc_idx | assoc_num;
+
+ ret = aq_get_rxsa_stats(hw, sa_idx,
+ &aq_rxsc->rx_sa_stats[assoc_num]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data)
+{
+ struct aq_macsec_cfg *cfg = nic->macsec_cfg;
+ struct aq_macsec_common_stats *common_stats;
+ struct aq_macsec_tx_sc_stats *txsc_stats;
+ struct aq_macsec_tx_sa_stats *txsa_stats;
+ struct aq_macsec_rx_sa_stats *rxsa_stats;
+ struct aq_macsec_txsc *aq_txsc;
+ struct aq_macsec_rxsc *aq_rxsc;
+ unsigned int assoc_num;
+ unsigned int sc_num;
+ unsigned int i = 0U;
+
+ if (!cfg)
+ return data;
+
+ aq_macsec_update_stats(nic);
+
+ common_stats = &cfg->stats;
+ data[i] = common_stats->in.ctl_pkts;
+ data[++i] = common_stats->in.tagged_miss_pkts;
+ data[++i] = common_stats->in.untagged_miss_pkts;
+ data[++i] = common_stats->in.notag_pkts;
+ data[++i] = common_stats->in.untagged_pkts;
+ data[++i] = common_stats->in.bad_tag_pkts;
+ data[++i] = common_stats->in.no_sci_pkts;
+ data[++i] = common_stats->in.unknown_sci_pkts;
+ data[++i] = common_stats->in.ctrl_prt_pass_pkts;
+ data[++i] = common_stats->in.unctrl_prt_pass_pkts;
+ data[++i] = common_stats->in.ctrl_prt_fail_pkts;
+ data[++i] = common_stats->in.unctrl_prt_fail_pkts;
+ data[++i] = common_stats->in.too_long_pkts;
+ data[++i] = common_stats->in.igpoc_ctl_pkts;
+ data[++i] = common_stats->in.ecc_error_pkts;
+ data[++i] = common_stats->in.unctrl_hit_drop_redir;
+ data[++i] = common_stats->out.ctl_pkts;
+ data[++i] = common_stats->out.unknown_sa_pkts;
+ data[++i] = common_stats->out.untagged_pkts;
+ data[++i] = common_stats->out.too_long;
+ data[++i] = common_stats->out.ecc_error_pkts;
+ data[++i] = common_stats->out.unctrl_hit_drop_redir;
+
+ for (sc_num = 0; sc_num < AQ_MACSEC_MAX_SC; sc_num++) {
+ if (!(test_bit(sc_num, &cfg->txsc_idx_busy)))
+ continue;
+
+ aq_txsc = &cfg->aq_txsc[sc_num];
+ txsc_stats = &aq_txsc->stats;
+
+ data[++i] = txsc_stats->sc_protected_pkts;
+ data[++i] = txsc_stats->sc_encrypted_pkts;
+ data[++i] = txsc_stats->sc_protected_octets;
+ data[++i] = txsc_stats->sc_encrypted_octets;
+
+ for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) {
+ if (!test_bit(assoc_num, &aq_txsc->tx_sa_idx_busy))
+ continue;
+
+ txsa_stats = &aq_txsc->tx_sa_stats[assoc_num];
+
+ data[++i] = txsa_stats->sa_hit_drop_redirect;
+ data[++i] = txsa_stats->sa_protected2_pkts;
+ data[++i] = txsa_stats->sa_protected_pkts;
+ data[++i] = txsa_stats->sa_encrypted_pkts;
+ }
+ }
+
+ for (sc_num = 0; sc_num < AQ_MACSEC_MAX_SC; sc_num++) {
+ if (!(test_bit(sc_num, &cfg->rxsc_idx_busy)))
+ continue;
+
+ aq_rxsc = &cfg->aq_rxsc[sc_num];
+
+ for (assoc_num = 0; assoc_num < MACSEC_NUM_AN; assoc_num++) {
+ if (!test_bit(assoc_num, &aq_rxsc->rx_sa_idx_busy))
+ continue;
+
+ rxsa_stats = &aq_rxsc->rx_sa_stats[assoc_num];
+
+ data[++i] = rxsa_stats->untagged_hit_pkts;
+ data[++i] = rxsa_stats->ctrl_hit_drop_redir_pkts;
+ data[++i] = rxsa_stats->not_using_sa;
+ data[++i] = rxsa_stats->unused_sa;
+ data[++i] = rxsa_stats->not_valid_pkts;
+ data[++i] = rxsa_stats->invalid_pkts;
+ data[++i] = rxsa_stats->ok_pkts;
+ data[++i] = rxsa_stats->late_pkts;
+ data[++i] = rxsa_stats->delayed_pkts;
+ data[++i] = rxsa_stats->unchecked_pkts;
+ data[++i] = rxsa_stats->validated_octets;
+ data[++i] = rxsa_stats->decrypted_octets;
+ }
+ }
+
+ i++;
+
+ data += i;
+
+ return data;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
new file mode 100644
index 000000000000..f5fba8b8cdea
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef AQ_MACSEC_H
+#define AQ_MACSEC_H
+
+#include <linux/netdevice.h>
+#if IS_ENABLED(CONFIG_MACSEC)
+
+#include "net/macsec.h"
+
+struct aq_nic_s;
+
+#define AQ_MACSEC_MAX_SC 32
+#define AQ_MACSEC_MAX_SA 32
+
+enum aq_macsec_sc_sa {
+ aq_macsec_sa_sc_4sa_8sc,
+ aq_macsec_sa_sc_not_used,
+ aq_macsec_sa_sc_2sa_16sc,
+ aq_macsec_sa_sc_1sa_32sc,
+};
+
+struct aq_macsec_common_stats {
+ /* Ingress Common Counters */
+ struct {
+ u64 ctl_pkts;
+ u64 tagged_miss_pkts;
+ u64 untagged_miss_pkts;
+ u64 notag_pkts;
+ u64 untagged_pkts;
+ u64 bad_tag_pkts;
+ u64 no_sci_pkts;
+ u64 unknown_sci_pkts;
+ u64 ctrl_prt_pass_pkts;
+ u64 unctrl_prt_pass_pkts;
+ u64 ctrl_prt_fail_pkts;
+ u64 unctrl_prt_fail_pkts;
+ u64 too_long_pkts;
+ u64 igpoc_ctl_pkts;
+ u64 ecc_error_pkts;
+ u64 unctrl_hit_drop_redir;
+ } in;
+
+ /* Egress Common Counters */
+ struct {
+ u64 ctl_pkts;
+ u64 unknown_sa_pkts;
+ u64 untagged_pkts;
+ u64 too_long;
+ u64 ecc_error_pkts;
+ u64 unctrl_hit_drop_redir;
+ } out;
+};
+
+/* Ingress SA Counters */
+struct aq_macsec_rx_sa_stats {
+ u64 untagged_hit_pkts;
+ u64 ctrl_hit_drop_redir_pkts;
+ u64 not_using_sa;
+ u64 unused_sa;
+ u64 not_valid_pkts;
+ u64 invalid_pkts;
+ u64 ok_pkts;
+ u64 late_pkts;
+ u64 delayed_pkts;
+ u64 unchecked_pkts;
+ u64 validated_octets;
+ u64 decrypted_octets;
+};
+
+/* Egress SA Counters */
+struct aq_macsec_tx_sa_stats {
+ u64 sa_hit_drop_redirect;
+ u64 sa_protected2_pkts;
+ u64 sa_protected_pkts;
+ u64 sa_encrypted_pkts;
+};
+
+/* Egress SC Counters */
+struct aq_macsec_tx_sc_stats {
+ u64 sc_protected_pkts;
+ u64 sc_encrypted_pkts;
+ u64 sc_protected_octets;
+ u64 sc_encrypted_octets;
+};
+
+struct aq_macsec_txsc {
+ u32 hw_sc_idx;
+ unsigned long tx_sa_idx_busy;
+ const struct macsec_secy *sw_secy;
+ u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN];
+ struct aq_macsec_tx_sc_stats stats;
+ struct aq_macsec_tx_sa_stats tx_sa_stats[MACSEC_NUM_AN];
+};
+
+struct aq_macsec_rxsc {
+ u32 hw_sc_idx;
+ unsigned long rx_sa_idx_busy;
+ const struct macsec_secy *sw_secy;
+ const struct macsec_rx_sc *sw_rxsc;
+ u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN];
+ struct aq_macsec_rx_sa_stats rx_sa_stats[MACSEC_NUM_AN];
+};
+
+struct aq_macsec_cfg {
+ enum aq_macsec_sc_sa sc_sa;
+ /* Egress channel configuration */
+ unsigned long txsc_idx_busy;
+ struct aq_macsec_txsc aq_txsc[AQ_MACSEC_MAX_SC];
+ /* Ingress channel configuration */
+ unsigned long rxsc_idx_busy;
+ struct aq_macsec_rxsc aq_rxsc[AQ_MACSEC_MAX_SC];
+ /* Statistics / counters */
+ struct aq_macsec_common_stats stats;
+};
+
+extern const struct macsec_ops aq_macsec_ops;
+
+int aq_macsec_init(struct aq_nic_s *nic);
+void aq_macsec_free(struct aq_nic_s *nic);
+int aq_macsec_enable(struct aq_nic_s *nic);
+void aq_macsec_work(struct aq_nic_s *nic);
+u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data);
+int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic);
+int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic);
+int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic);
+
+#endif
+
+#endif /* AQ_MACSEC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 538f460a3da7..9fcab646cbd5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -19,7 +19,6 @@
#include <linux/udp.h>
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(AQ_CFG_DRV_VERSION);
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index e95f6a6bef73..a369705a786a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -11,6 +11,7 @@
#include "aq_vec.h"
#include "aq_hw.h"
#include "aq_pci_func.h"
+#include "aq_macsec.h"
#include "aq_main.h"
#include "aq_phy.h"
#include "aq_ptp.h"
@@ -176,6 +177,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
aq_utils_obj_clear(&self->flags,
AQ_NIC_LINK_DOWN);
netif_carrier_on(self->ndev);
+#if IS_ENABLED(CONFIG_MACSEC)
+ aq_macsec_enable(self);
+#endif
netif_tx_wake_all_queues(self->ndev);
}
if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
@@ -217,6 +221,10 @@ static void aq_nic_service_task(struct work_struct *work)
if (err)
return;
+#if IS_ENABLED(CONFIG_MACSEC)
+ aq_macsec_work(self);
+#endif
+
mutex_lock(&self->fwreq_mutex);
if (self->aq_fw_ops->update_stats)
self->aq_fw_ops->update_stats(self->aq_hw);
@@ -262,6 +270,10 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
if (err)
goto err_exit;
+#if IS_ENABLED(CONFIG_MACSEC)
+ aq_macsec_init(self);
+#endif
+
mutex_lock(&self->fwreq_mutex);
err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
self->ndev->dev_addr);
@@ -296,6 +308,10 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
goto err_exit;
err_exit:
+#if IS_ENABLED(CONFIG_MACSEC)
+ if (err)
+ aq_macsec_free(self);
+#endif
return err;
}
@@ -765,7 +781,7 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
}
-void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
+u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
struct aq_vec_s *aq_vec = NULL;
struct aq_stats_s *stats;
@@ -815,7 +831,10 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
aq_vec_get_sw_stats(aq_vec, data, &count);
}
+ data += count;
+
err_exit:;
+ return data;
}
static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index a752f8bb4b08..0663b8d0220d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -17,6 +17,7 @@ struct aq_ring_s;
struct aq_hw_ops;
struct aq_fw_s;
struct aq_vec_s;
+struct aq_macsec_cfg;
struct aq_ptp_s;
enum aq_rx_filter_type;
@@ -129,6 +130,9 @@ struct aq_nic_s {
u32 irqvecs;
/* mutex to serialize FW interface access operations */
struct mutex fwreq_mutex;
+#if IS_ENABLED(CONFIG_MACSEC)
+ struct aq_macsec_cfg *macsec_cfg;
+#endif
/* PTP support */
struct aq_ptp_s *aq_ptp;
struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
@@ -154,7 +158,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self);
-void aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
+u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
int aq_nic_stop(struct aq_nic_s *self);
void aq_nic_deinit(struct aq_nic_s *self, bool link_down);
void aq_nic_set_power(struct aq_nic_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 78b6f3248756..2edf137a7030 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -18,6 +18,7 @@
#include "hw_atl/hw_atl_b0.h"
#include "aq_filters.h"
#include "aq_drvinfo.h"
+#include "aq_macsec.h"
static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
@@ -324,6 +325,10 @@ static void aq_pci_remove(struct pci_dev *pdev)
aq_clear_rxnfc_all_rules(self);
if (self->ndev->reg_state == NETREG_REGISTERED)
unregister_netdev(self->ndev);
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ aq_macsec_free(self);
+#endif
aq_nic_free_vectors(self);
aq_pci_free_irq_vectors(self);
iounmap(self->aq_hw->mmio);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 42f0c5c6ec2d..b15513914636 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -225,7 +225,7 @@ struct __packed offload_info {
struct offload_port_info ports;
struct offload_ka_info kas;
struct offload_rr_info rrs;
- u8 buf[0];
+ u8 buf[];
};
struct __packed hw_atl_utils_fw_rpc {
@@ -319,6 +319,32 @@ struct __packed hw_atl_utils_settings {
u32 media_detect;
};
+enum macsec_msg_type {
+ macsec_cfg_msg = 0,
+ macsec_add_rx_sc_msg,
+ macsec_add_tx_sc_msg,
+ macsec_add_rx_sa_msg,
+ macsec_add_tx_sa_msg,
+ macsec_get_stats_msg,
+};
+
+struct __packed macsec_cfg_request {
+ u32 enabled;
+ u32 egress_threshold;
+ u32 ingress_threshold;
+ u32 interrupts_enabled;
+};
+
+struct __packed macsec_msg_fw_request {
+ u32 msg_id; /* not used */
+ u32 msg_type;
+ struct macsec_cfg_request cfg;
+};
+
+struct __packed macsec_msg_fw_response {
+ u32 result;
+};
+
enum hw_atl_rx_action_with_traffic {
HW_ATL_RX_DISCARD,
HW_ATL_RX_HOST,
@@ -437,34 +463,43 @@ enum hw_atl_fw2x_caps_lo {
CAPS_LO_2P5GBASET_FD,
CAPS_LO_5GBASET_FD = 10,
CAPS_LO_10GBASET_FD,
+ CAPS_LO_AUTONEG,
+ CAPS_LO_SMBUS_READ,
+ CAPS_LO_SMBUS_WRITE,
+ CAPS_LO_MACSEC = 15,
+ CAPS_LO_RESERVED1,
+ CAPS_LO_WAKE_ON_LINK_FORCED,
+ CAPS_LO_HIGH_TEMP_WARNING = 29,
+ CAPS_LO_DRIVER_SCRATCHPAD = 30,
+ CAPS_LO_GLOBAL_FAULT = 31
};
/* 0x374
* Status register
*/
enum hw_atl_fw2x_caps_hi {
- CAPS_HI_RESERVED1 = 0,
+ CAPS_HI_TPO2EN = 0,
CAPS_HI_10BASET_EEE,
CAPS_HI_RESERVED2,
CAPS_HI_PAUSE,
CAPS_HI_ASYMMETRIC_PAUSE,
CAPS_HI_100BASETX_EEE = 5,
- CAPS_HI_RESERVED3,
- CAPS_HI_RESERVED4,
+ CAPS_HI_PHY_BUF_SEND,
+ CAPS_HI_PHY_BUF_RECV,
CAPS_HI_1000BASET_FD_EEE,
CAPS_HI_2P5GBASET_FD_EEE,
CAPS_HI_5GBASET_FD_EEE = 10,
CAPS_HI_10GBASET_FD_EEE,
CAPS_HI_FW_REQUEST,
- CAPS_HI_RESERVED6,
- CAPS_HI_RESERVED7,
- CAPS_HI_RESERVED8 = 15,
- CAPS_HI_RESERVED9,
+ CAPS_HI_PHY_LOG,
+ CAPS_HI_EEE_AUTO_DISABLE_SETTINGS,
+ CAPS_HI_PFC = 15,
+ CAPS_HI_WAKE_ON_LINK,
CAPS_HI_CABLE_DIAG,
CAPS_HI_TEMPERATURE,
CAPS_HI_DOWNSHIFT,
CAPS_HI_PTP_AVB_EN_FW2X = 20,
- CAPS_HI_MEDIA_DETECT,
+ CAPS_HI_THERMAL_SHUTDOWN,
CAPS_HI_LINK_DROP,
CAPS_HI_SLEEP_PROXY,
CAPS_HI_WOL,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 77a4ed64830f..1ad10cc14918 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -55,6 +55,8 @@
#define HW_ATL_FW2X_CAP_EEE_5G_MASK BIT(CAPS_HI_5GBASET_FD_EEE)
#define HW_ATL_FW2X_CAP_EEE_10G_MASK BIT(CAPS_HI_10GBASET_FD_EEE)
+#define HW_ATL_FW2X_CAP_MACSEC BIT(CAPS_LO_MACSEC)
+
#define HAL_ATLANTIC_WOL_FILTERS_COUNT 8
#define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E
@@ -86,6 +88,7 @@ static int aq_fw2x_set_state(struct aq_hw_s *self,
static u32 aq_fw2x_mbox_get(struct aq_hw_s *self);
static u32 aq_fw2x_rpc_get(struct aq_hw_s *self);
static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr);
+static u32 aq_fw2x_state_get(struct aq_hw_s *self);
static u32 aq_fw2x_state2_get(struct aq_hw_s *self);
static int aq_fw2x_init(struct aq_hw_s *self)
@@ -619,11 +622,75 @@ static int aq_fw2x_settings_get(struct aq_hw_s *self, u32 *addr)
return err;
}
+static u32 aq_fw2x_state_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR);
+}
+
static u32 aq_fw2x_state2_get(struct aq_hw_s *self)
{
return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
}
+static u32 aq_fw2x_get_link_capabilities(struct aq_hw_s *self)
+{
+ int err = 0;
+ u32 offset;
+ u32 val;
+
+ offset = self->mbox_addr +
+ offsetof(struct hw_atl_utils_mbox, info.caps_lo);
+
+ err = hw_atl_utils_fw_downld_dwords(self, offset, &val, 1);
+
+ if (err)
+ return 0;
+
+ return val;
+}
+
+static int aq_fw2x_send_macsec_req(struct aq_hw_s *hw,
+ struct macsec_msg_fw_request *req,
+ struct macsec_msg_fw_response *response)
+{
+ u32 low_status, low_req = 0;
+ u32 dword_cnt;
+ u32 caps_lo;
+ u32 offset;
+ int err;
+
+ if (!req || !response)
+ return -EINVAL;
+
+ caps_lo = aq_fw2x_get_link_capabilities(hw);
+ if (!(caps_lo & BIT(CAPS_LO_MACSEC)))
+ return -EOPNOTSUPP;
+
+ /* Write macsec request to cfg memory */
+ dword_cnt = (sizeof(*req) + sizeof(u32) - 1) / sizeof(u32);
+ err = hw_atl_write_fwcfg_dwords(hw, (void *)req, dword_cnt);
+ if (err < 0)
+ return err;
+
+ /* Toggle 0x368.CAPS_LO_MACSEC bit */
+ low_req = aq_hw_read_reg(hw, HW_ATL_FW2X_MPI_CONTROL_ADDR);
+ low_req ^= HW_ATL_FW2X_CAP_MACSEC;
+ aq_hw_write_reg(hw, HW_ATL_FW2X_MPI_CONTROL_ADDR, low_req);
+
+ /* Wait FW to report back */
+ err = readx_poll_timeout_atomic(aq_fw2x_state_get, hw, low_status,
+ low_req != (low_status & BIT(CAPS_LO_MACSEC)), 1U, 10000U);
+ if (err)
+ return -EIO;
+
+ /* Read status of write operation */
+ offset = hw->rpc_addr + sizeof(u32);
+ err = hw_atl_utils_fw_downld_dwords(hw, offset, (u32 *)(void *)response,
+ sizeof(*response) / sizeof(u32));
+
+ return err;
+}
+
const struct aq_fw_ops aq_fw_2x_ops = {
.init = aq_fw2x_init,
.deinit = aq_fw2x_deinit,
@@ -645,4 +712,6 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.led_control = aq_fw2x_led_control,
.set_phyloopback = aq_fw2x_set_phyloopback,
.adjust_ptp = aq_fw3x_adjust_ptp,
+ .get_link_capabilities = aq_fw2x_get_link_capabilities,
+ .send_macsec_req = aq_fw2x_send_macsec_req,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Egress_registers.h b/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Egress_registers.h
new file mode 100644
index 000000000000..71d08ea80b54
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Egress_registers.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef MSS_EGRESS_REGS_HEADER
+#define MSS_EGRESS_REGS_HEADER
+
+#define MSS_EGRESS_CTL_REGISTER_ADDR 0x00005002
+#define MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR 0x00005060
+#define MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR 0x00005062
+#define MSS_EGRESS_LUT_ADDR_CTL_REGISTER_ADDR 0x00005080
+#define MSS_EGRESS_LUT_CTL_REGISTER_ADDR 0x00005081
+#define MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR 0x000050A0
+
+struct mss_egress_ctl_register {
+ union {
+ struct {
+ unsigned int soft_reset : 1;
+ unsigned int drop_kay_packet : 1;
+ unsigned int drop_egprc_lut_miss : 1;
+ unsigned int gcm_start : 1;
+ unsigned int gcm_test_mode : 1;
+ unsigned int unmatched_use_sc_0 : 1;
+ unsigned int drop_invalid_sa_sc_packets : 1;
+ unsigned int reserved0 : 1;
+ /* Should always be set to 0. */
+ unsigned int external_classification_enable : 1;
+ unsigned int icv_lsb_8bytes_enable : 1;
+ unsigned int high_prio : 1;
+ unsigned int clear_counter : 1;
+ unsigned int clear_global_time : 1;
+ unsigned int ethertype_explicit_sectag_lsb : 3;
+ } bits_0;
+ unsigned short word_0;
+ };
+ union {
+ struct {
+ unsigned int ethertype_explicit_sectag_msb : 13;
+ unsigned int reserved0 : 3;
+ } bits_1;
+ unsigned short word_1;
+ };
+};
+
+struct mss_egress_lut_addr_ctl_register {
+ union {
+ struct {
+ unsigned int lut_addr : 9;
+ unsigned int reserved0 : 3;
+ /* 0x0 : Egress MAC Control FIlter (CTLF) LUT
+ * 0x1 : Egress Classification LUT
+ * 0x2 : Egress SC/SA LUT
+ * 0x3 : Egress SMIB
+ */
+ unsigned int lut_select : 4;
+ } bits_0;
+ unsigned short word_0;
+ };
+};
+
+struct mss_egress_lut_ctl_register {
+ union {
+ struct {
+ unsigned int reserved0 : 14;
+ unsigned int lut_read : 1;
+ unsigned int lut_write : 1;
+ } bits_0;
+ unsigned short word_0;
+ };
+};
+
+#endif /* MSS_EGRESS_REGS_HEADER */
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Ingress_registers.h b/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Ingress_registers.h
new file mode 100644
index 000000000000..d4c00d9a0fc6
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/MSS_Ingress_registers.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef MSS_INGRESS_REGS_HEADER
+#define MSS_INGRESS_REGS_HEADER
+
+#define MSS_INGRESS_CTL_REGISTER_ADDR 0x0000800E
+#define MSS_INGRESS_LUT_ADDR_CTL_REGISTER_ADDR 0x00008080
+#define MSS_INGRESS_LUT_CTL_REGISTER_ADDR 0x00008081
+#define MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR 0x000080A0
+
+struct mss_ingress_ctl_register {
+ union {
+ struct {
+ unsigned int soft_reset : 1;
+ unsigned int operation_point_to_point : 1;
+ unsigned int create_sci : 1;
+ /* Unused */
+ unsigned int mask_short_length_error : 1;
+ unsigned int drop_kay_packet : 1;
+ unsigned int drop_igprc_miss : 1;
+ /* Unused */
+ unsigned int check_icv : 1;
+ unsigned int clear_global_time : 1;
+ unsigned int clear_count : 1;
+ unsigned int high_prio : 1;
+ unsigned int remove_sectag : 1;
+ unsigned int global_validate_frames : 2;
+ unsigned int icv_lsb_8bytes_enabled : 1;
+ unsigned int reserved0 : 2;
+ } bits_0;
+ unsigned short word_0;
+ };
+ union {
+ struct {
+ unsigned int reserved0 : 16;
+ } bits_1;
+ unsigned short word_1;
+ };
+};
+
+struct mss_ingress_lut_addr_ctl_register {
+ union {
+ struct {
+ unsigned int lut_addr : 9;
+ unsigned int reserved0 : 3;
+ /* 0x0 : Ingress Pre-Security MAC Control FIlter
+ * (IGPRCTLF) LUT
+ * 0x1 : Ingress Pre-Security Classification LUT (IGPRC)
+ * 0x2 : Ingress Packet Format (IGPFMT) SAKey LUT
+ * 0x3 : Ingress Packet Format (IGPFMT) SC/SA LUT
+ * 0x4 : Ingress Post-Security Classification LUT
+ * (IGPOC)
+ * 0x5 : Ingress Post-Security MAC Control Filter
+ * (IGPOCTLF) LUT
+ * 0x6 : Ingress MIB (IGMIB)
+ */
+ unsigned int lut_select : 4;
+ } bits_0;
+ unsigned short word_0;
+ };
+};
+
+struct mss_ingress_lut_ctl_register {
+ union {
+ struct {
+ unsigned int reserved0 : 14;
+ unsigned int lut_read : 1;
+ unsigned int lut_write : 1;
+ } bits_0;
+ unsigned short word_0;
+ };
+};
+
+#endif /* MSS_INGRESS_REGS_HEADER */
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
new file mode 100644
index 000000000000..97901c114bfa
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
@@ -0,0 +1,2473 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "macsec_api.h"
+#include <linux/mdio.h>
+#include "MSS_Ingress_registers.h"
+#include "MSS_Egress_registers.h"
+#include "aq_phy.h"
+
+#define AQ_API_CALL_SAFE(func, ...) \
+({ \
+ int ret; \
+ do { \
+ ret = aq_mss_mdio_sem_get(hw); \
+ if (unlikely(ret)) \
+ break; \
+ \
+ ret = func(__VA_ARGS__); \
+ \
+ aq_mss_mdio_sem_put(hw); \
+ } while (0); \
+ ret; \
+})
+
+/*******************************************************************************
+ * MDIO wrappers
+ ******************************************************************************/
+static int aq_mss_mdio_sem_get(struct aq_hw_s *hw)
+{
+ u32 val;
+
+ return readx_poll_timeout_atomic(hw_atl_sem_mdio_get, hw, val,
+ val == 1U, 10U, 100000U);
+}
+
+static void aq_mss_mdio_sem_put(struct aq_hw_s *hw)
+{
+ hw_atl_reg_glb_cpu_sem_set(hw, 1U, HW_ATL_FW_SM_MDIO);
+}
+
+static int aq_mss_mdio_read(struct aq_hw_s *hw, u16 mmd, u16 addr, u16 *data)
+{
+ *data = aq_mdio_read_word(hw, mmd, addr);
+ return (*data != 0xffff) ? 0 : -ETIME;
+}
+
+static int aq_mss_mdio_write(struct aq_hw_s *hw, u16 mmd, u16 addr, u16 data)
+{
+ aq_mdio_write_word(hw, mmd, addr, data);
+ return 0;
+}
+
+/*******************************************************************************
+ * MACSEC config and status
+ ******************************************************************************/
+
+static int set_raw_ingress_record(struct aq_hw_s *hw, u16 *packed_record,
+ u8 num_words, u8 table_id,
+ u16 table_index)
+{
+ struct mss_ingress_lut_addr_ctl_register lut_sel_reg;
+ struct mss_ingress_lut_ctl_register lut_op_reg;
+
+ unsigned int i;
+
+ /* NOTE: MSS registers must always be read/written as adjacent pairs.
+ * For instance, to write either or both 1E.80A0 and 80A1, we have to:
+ * 1. Write 1E.80A0 first
+ * 2. Then write 1E.80A1
+ *
+ * For HHD devices: These writes need to be performed consecutively, and
+ * to ensure this we use the PIF mailbox to delegate the reads/writes to
+ * the FW.
+ *
+ * For EUR devices: Not need to use the PIF mailbox; it is safe to
+ * write to the registers directly.
+ */
+
+ /* Write the packed record words to the data buffer registers. */
+ for (i = 0; i < num_words; i += 2) {
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR + i,
+ packed_record[i]);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR + i +
+ 1,
+ packed_record[i + 1]);
+ }
+
+ /* Clear out the unused data buffer registers. */
+ for (i = num_words; i < 24; i += 2) {
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR + i,
+ 0);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR + i + 1, 0);
+ }
+
+ /* Select the table and row index to write to */
+ lut_sel_reg.bits_0.lut_select = table_id;
+ lut_sel_reg.bits_0.lut_addr = table_index;
+
+ lut_op_reg.bits_0.lut_read = 0;
+ lut_op_reg.bits_0.lut_write = 1;
+
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
+ lut_sel_reg.word_0);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1, MSS_INGRESS_LUT_CTL_REGISTER_ADDR,
+ lut_op_reg.word_0);
+
+ return 0;
+}
+
+/*! Read the specified Ingress LUT table row.
+ * packed_record - [OUT] The table row data (raw).
+ */
+static int get_raw_ingress_record(struct aq_hw_s *hw, u16 *packed_record,
+ u8 num_words, u8 table_id,
+ u16 table_index)
+{
+ struct mss_ingress_lut_addr_ctl_register lut_sel_reg;
+ struct mss_ingress_lut_ctl_register lut_op_reg;
+ int ret;
+
+ unsigned int i;
+
+ /* Select the table and row index to read */
+ lut_sel_reg.bits_0.lut_select = table_id;
+ lut_sel_reg.bits_0.lut_addr = table_index;
+
+ lut_op_reg.bits_0.lut_read = 1;
+ lut_op_reg.bits_0.lut_write = 0;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
+ lut_sel_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_CTL_REGISTER_ADDR,
+ lut_op_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+
+ memset(packed_record, 0, sizeof(u16) * num_words);
+
+ for (i = 0; i < num_words; i += 2) {
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR +
+ i,
+ &packed_record[i]);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_LUT_DATA_CTL_REGISTER_ADDR +
+ i + 1,
+ &packed_record[i + 1]);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ return 0;
+}
+
+/*! Write packed_record to the specified Egress LUT table row. */
+static int set_raw_egress_record(struct aq_hw_s *hw, u16 *packed_record,
+ u8 num_words, u8 table_id,
+ u16 table_index)
+{
+ struct mss_egress_lut_addr_ctl_register lut_sel_reg;
+ struct mss_egress_lut_ctl_register lut_op_reg;
+
+ unsigned int i;
+
+ /* Write the packed record words to the data buffer registers. */
+ for (i = 0; i < num_words; i += 2) {
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + i,
+ packed_record[i]);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + i + 1,
+ packed_record[i + 1]);
+ }
+
+ /* Clear out the unused data buffer registers. */
+ for (i = num_words; i < 28; i += 2) {
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + i, 0);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR + i + 1,
+ 0);
+ }
+
+ /* Select the table and row index to write to */
+ lut_sel_reg.bits_0.lut_select = table_id;
+ lut_sel_reg.bits_0.lut_addr = table_index;
+
+ lut_op_reg.bits_0.lut_read = 0;
+ lut_op_reg.bits_0.lut_write = 1;
+
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
+ lut_sel_reg.word_0);
+ aq_mss_mdio_write(hw, MDIO_MMD_VEND1, MSS_EGRESS_LUT_CTL_REGISTER_ADDR,
+ lut_op_reg.word_0);
+
+ return 0;
+}
+
+static int get_raw_egress_record(struct aq_hw_s *hw, u16 *packed_record,
+ u8 num_words, u8 table_id,
+ u16 table_index)
+{
+ struct mss_egress_lut_addr_ctl_register lut_sel_reg;
+ struct mss_egress_lut_ctl_register lut_op_reg;
+ int ret;
+
+ unsigned int i;
+
+ /* Select the table and row index to read */
+ lut_sel_reg.bits_0.lut_select = table_id;
+ lut_sel_reg.bits_0.lut_addr = table_index;
+
+ lut_op_reg.bits_0.lut_read = 1;
+ lut_op_reg.bits_0.lut_write = 0;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_ADDR_CTL_REGISTER_ADDR,
+ lut_sel_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_CTL_REGISTER_ADDR,
+ lut_op_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+
+ memset(packed_record, 0, sizeof(u16) * num_words);
+
+ for (i = 0; i < num_words; i += 2) {
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR +
+ i,
+ &packed_record[i]);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_LUT_DATA_CTL_REGISTER_ADDR +
+ i + 1,
+ &packed_record[i + 1]);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+set_ingress_prectlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+
+ if (table_index >= NUMROWS_INGRESSPRECTLFRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 6);
+
+ packed_record[0] = rec->sa_da[0] & 0xFFFF;
+ packed_record[1] = (rec->sa_da[0] >> 16) & 0xFFFF;
+ packed_record[2] = rec->sa_da[1] & 0xFFFF;
+ packed_record[3] = rec->eth_type & 0xFFFF;
+ packed_record[4] = rec->match_mask & 0xFFFF;
+ packed_record[5] = rec->match_type & 0xF;
+ packed_record[5] |= (rec->action & 0x1) << 4;
+
+ return set_raw_ingress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_INGRESSPRECTLFRECORD +
+ table_index);
+}
+
+int aq_mss_set_ingress_prectlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_ingress_prectlf_record, hw, rec,
+ table_index);
+}
+
+static int get_ingress_prectlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSPRECTLFRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ * This is a workaround for EUR devices that allows us to read
+ * odd-numbered rows. For HHD devices: this workaround will not work,
+ * so don't bother; odd-numbered rows are not readable.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_ingress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_INGRESSPRECTLFRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_ingress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_INGRESSPRECTLFRECORD +
+ table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->sa_da[0] = packed_record[0];
+ rec->sa_da[0] |= packed_record[1] << 16;
+
+ rec->sa_da[1] = packed_record[2];
+
+ rec->eth_type = packed_record[3];
+
+ rec->match_mask = packed_record[4];
+
+ rec->match_type = packed_record[5] & 0xF;
+
+ rec->action = (packed_record[5] >> 4) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_prectlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_prectlf_record, hw, rec,
+ table_index);
+}
+
+static int
+set_ingress_preclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[20];
+
+ if (table_index >= NUMROWS_INGRESSPRECLASSRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 20);
+
+ packed_record[0] = rec->sci[0] & 0xFFFF;
+ packed_record[1] = (rec->sci[0] >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->sci[1] & 0xFFFF;
+ packed_record[3] = (rec->sci[1] >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->tci & 0xFF;
+
+ packed_record[4] |= (rec->encr_offset & 0xFF) << 8;
+
+ packed_record[5] = rec->eth_type & 0xFFFF;
+
+ packed_record[6] = rec->snap[0] & 0xFFFF;
+ packed_record[7] = (rec->snap[0] >> 16) & 0xFFFF;
+
+ packed_record[8] = rec->snap[1] & 0xFF;
+
+ packed_record[8] |= (rec->llc & 0xFF) << 8;
+ packed_record[9] = (rec->llc >> 8) & 0xFFFF;
+
+ packed_record[10] = rec->mac_sa[0] & 0xFFFF;
+ packed_record[11] = (rec->mac_sa[0] >> 16) & 0xFFFF;
+
+ packed_record[12] = rec->mac_sa[1] & 0xFFFF;
+
+ packed_record[13] = rec->mac_da[0] & 0xFFFF;
+ packed_record[14] = (rec->mac_da[0] >> 16) & 0xFFFF;
+
+ packed_record[15] = rec->mac_da[1] & 0xFFFF;
+
+ packed_record[16] = rec->lpbk_packet & 0x1;
+
+ packed_record[16] |= (rec->an_mask & 0x3) << 1;
+
+ packed_record[16] |= (rec->tci_mask & 0x3F) << 3;
+
+ packed_record[16] |= (rec->sci_mask & 0x7F) << 9;
+ packed_record[17] = (rec->sci_mask >> 7) & 0x1;
+
+ packed_record[17] |= (rec->eth_type_mask & 0x3) << 1;
+
+ packed_record[17] |= (rec->snap_mask & 0x1F) << 3;
+
+ packed_record[17] |= (rec->llc_mask & 0x7) << 8;
+
+ packed_record[17] |= (rec->_802_2_encapsulate & 0x1) << 11;
+
+ packed_record[17] |= (rec->sa_mask & 0xF) << 12;
+ packed_record[18] = (rec->sa_mask >> 4) & 0x3;
+
+ packed_record[18] |= (rec->da_mask & 0x3F) << 2;
+
+ packed_record[18] |= (rec->lpbk_mask & 0x1) << 8;
+
+ packed_record[18] |= (rec->sc_idx & 0x1F) << 9;
+
+ packed_record[18] |= (rec->proc_dest & 0x1) << 14;
+
+ packed_record[18] |= (rec->action & 0x1) << 15;
+ packed_record[19] = (rec->action >> 1) & 0x1;
+
+ packed_record[19] |= (rec->ctrl_unctrl & 0x1) << 1;
+
+ packed_record[19] |= (rec->sci_from_table & 0x1) << 2;
+
+ packed_record[19] |= (rec->reserved & 0xF) << 3;
+
+ packed_record[19] |= (rec->valid & 0x1) << 7;
+
+ return set_raw_ingress_record(hw, packed_record, 20, 1,
+ ROWOFFSET_INGRESSPRECLASSRECORD +
+ table_index);
+}
+
+int aq_mss_set_ingress_preclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_ingress_preclass_record, hw, rec,
+ table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int
+get_ingress_preclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[20];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSPRECLASSRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_ingress_record(hw, packed_record, 20, 1,
+ ROWOFFSET_INGRESSPRECLASSRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_ingress_record(hw, packed_record, 20, 1,
+ ROWOFFSET_INGRESSPRECLASSRECORD +
+ table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->sci[0] = packed_record[0];
+ rec->sci[0] |= packed_record[1] << 16;
+
+ rec->sci[1] = packed_record[2];
+ rec->sci[1] |= packed_record[3] << 16;
+
+ rec->tci = packed_record[4] & 0xFF;
+
+ rec->encr_offset = (packed_record[4] >> 8) & 0xFF;
+
+ rec->eth_type = packed_record[5];
+
+ rec->snap[0] = packed_record[6];
+ rec->snap[0] |= packed_record[7] << 16;
+
+ rec->snap[1] = packed_record[8] & 0xFF;
+
+ rec->llc = (packed_record[8] >> 8) & 0xFF;
+ rec->llc = packed_record[9] << 8;
+
+ rec->mac_sa[0] = packed_record[10];
+ rec->mac_sa[0] |= packed_record[11] << 16;
+
+ rec->mac_sa[1] = packed_record[12];
+
+ rec->mac_da[0] = packed_record[13];
+ rec->mac_da[0] |= packed_record[14] << 16;
+
+ rec->mac_da[1] = packed_record[15];
+
+ rec->lpbk_packet = packed_record[16] & 0x1;
+
+ rec->an_mask = (packed_record[16] >> 1) & 0x3;
+
+ rec->tci_mask = (packed_record[16] >> 3) & 0x3F;
+
+ rec->sci_mask = (packed_record[16] >> 9) & 0x7F;
+ rec->sci_mask |= (packed_record[17] & 0x1) << 7;
+
+ rec->eth_type_mask = (packed_record[17] >> 1) & 0x3;
+
+ rec->snap_mask = (packed_record[17] >> 3) & 0x1F;
+
+ rec->llc_mask = (packed_record[17] >> 8) & 0x7;
+
+ rec->_802_2_encapsulate = (packed_record[17] >> 11) & 0x1;
+
+ rec->sa_mask = (packed_record[17] >> 12) & 0xF;
+ rec->sa_mask |= (packed_record[18] & 0x3) << 4;
+
+ rec->da_mask = (packed_record[18] >> 2) & 0x3F;
+
+ rec->lpbk_mask = (packed_record[18] >> 8) & 0x1;
+
+ rec->sc_idx = (packed_record[18] >> 9) & 0x1F;
+
+ rec->proc_dest = (packed_record[18] >> 14) & 0x1;
+
+ rec->action = (packed_record[18] >> 15) & 0x1;
+ rec->action |= (packed_record[19] & 0x1) << 1;
+
+ rec->ctrl_unctrl = (packed_record[19] >> 1) & 0x1;
+
+ rec->sci_from_table = (packed_record[19] >> 2) & 0x1;
+
+ rec->reserved = (packed_record[19] >> 3) & 0xF;
+
+ rec->valid = (packed_record[19] >> 7) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_preclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_preclass_record, hw, rec,
+ table_index);
+}
+
+static int set_ingress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sc_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+
+ if (table_index >= NUMROWS_INGRESSSCRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 8);
+
+ packed_record[0] = rec->stop_time & 0xFFFF;
+ packed_record[1] = (rec->stop_time >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->start_time & 0xFFFF;
+ packed_record[3] = (rec->start_time >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->validate_frames & 0x3;
+
+ packed_record[4] |= (rec->replay_protect & 0x1) << 2;
+
+ packed_record[4] |= (rec->anti_replay_window & 0x1FFF) << 3;
+ packed_record[5] = (rec->anti_replay_window >> 13) & 0xFFFF;
+ packed_record[6] = (rec->anti_replay_window >> 29) & 0x7;
+
+ packed_record[6] |= (rec->receiving & 0x1) << 3;
+
+ packed_record[6] |= (rec->fresh & 0x1) << 4;
+
+ packed_record[6] |= (rec->an_rol & 0x1) << 5;
+
+ packed_record[6] |= (rec->reserved & 0x3FF) << 6;
+ packed_record[7] = (rec->reserved >> 10) & 0x7FFF;
+
+ packed_record[7] |= (rec->valid & 0x1) << 15;
+
+ return set_raw_ingress_record(hw, packed_record, 8, 3,
+ ROWOFFSET_INGRESSSCRECORD + table_index);
+}
+
+int aq_mss_set_ingress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sc_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_ingress_sc_record, hw, rec, table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int get_ingress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sc_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSSCRECORD)
+ return -EINVAL;
+
+ ret = get_raw_ingress_record(hw, packed_record, 8, 3,
+ ROWOFFSET_INGRESSSCRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->stop_time = packed_record[0];
+ rec->stop_time |= packed_record[1] << 16;
+
+ rec->start_time = packed_record[2];
+ rec->start_time |= packed_record[3] << 16;
+
+ rec->validate_frames = packed_record[4] & 0x3;
+
+ rec->replay_protect = (packed_record[4] >> 2) & 0x1;
+
+ rec->anti_replay_window = (packed_record[4] >> 3) & 0x1FFF;
+ rec->anti_replay_window |= packed_record[5] << 13;
+ rec->anti_replay_window |= (packed_record[6] & 0x7) << 29;
+
+ rec->receiving = (packed_record[6] >> 3) & 0x1;
+
+ rec->fresh = (packed_record[6] >> 4) & 0x1;
+
+ rec->an_rol = (packed_record[6] >> 5) & 0x1;
+
+ rec->reserved = (packed_record[6] >> 6) & 0x3FF;
+ rec->reserved |= (packed_record[7] & 0x7FFF) << 10;
+
+ rec->valid = (packed_record[7] >> 15) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sc_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_sc_record, hw, rec, table_index);
+}
+
+static int set_ingress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sa_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+
+ if (table_index >= NUMROWS_INGRESSSARECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 8);
+
+ packed_record[0] = rec->stop_time & 0xFFFF;
+ packed_record[1] = (rec->stop_time >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->start_time & 0xFFFF;
+ packed_record[3] = (rec->start_time >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->next_pn & 0xFFFF;
+ packed_record[5] = (rec->next_pn >> 16) & 0xFFFF;
+
+ packed_record[6] = rec->sat_nextpn & 0x1;
+
+ packed_record[6] |= (rec->in_use & 0x1) << 1;
+
+ packed_record[6] |= (rec->fresh & 0x1) << 2;
+
+ packed_record[6] |= (rec->reserved & 0x1FFF) << 3;
+ packed_record[7] = (rec->reserved >> 13) & 0x7FFF;
+
+ packed_record[7] |= (rec->valid & 0x1) << 15;
+
+ return set_raw_ingress_record(hw, packed_record, 8, 3,
+ ROWOFFSET_INGRESSSARECORD + table_index);
+}
+
+int aq_mss_set_ingress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sa_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_ingress_sa_record, hw, rec, table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int get_ingress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSSARECORD)
+ return -EINVAL;
+
+ ret = get_raw_ingress_record(hw, packed_record, 8, 3,
+ ROWOFFSET_INGRESSSARECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->stop_time = packed_record[0];
+ rec->stop_time |= packed_record[1] << 16;
+
+ rec->start_time = packed_record[2];
+ rec->start_time |= packed_record[3] << 16;
+
+ rec->next_pn = packed_record[4];
+ rec->next_pn |= packed_record[5] << 16;
+
+ rec->sat_nextpn = packed_record[6] & 0x1;
+
+ rec->in_use = (packed_record[6] >> 1) & 0x1;
+
+ rec->fresh = (packed_record[6] >> 2) & 0x1;
+
+ rec->reserved = (packed_record[6] >> 3) & 0x1FFF;
+ rec->reserved |= (packed_record[7] & 0x7FFF) << 13;
+
+ rec->valid = (packed_record[7] >> 15) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_sa_record, hw, rec, table_index);
+}
+
+static int
+set_ingress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[18];
+
+ if (table_index >= NUMROWS_INGRESSSAKEYRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 18);
+
+ packed_record[0] = rec->key[0] & 0xFFFF;
+ packed_record[1] = (rec->key[0] >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->key[1] & 0xFFFF;
+ packed_record[3] = (rec->key[1] >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->key[2] & 0xFFFF;
+ packed_record[5] = (rec->key[2] >> 16) & 0xFFFF;
+
+ packed_record[6] = rec->key[3] & 0xFFFF;
+ packed_record[7] = (rec->key[3] >> 16) & 0xFFFF;
+
+ packed_record[8] = rec->key[4] & 0xFFFF;
+ packed_record[9] = (rec->key[4] >> 16) & 0xFFFF;
+
+ packed_record[10] = rec->key[5] & 0xFFFF;
+ packed_record[11] = (rec->key[5] >> 16) & 0xFFFF;
+
+ packed_record[12] = rec->key[6] & 0xFFFF;
+ packed_record[13] = (rec->key[6] >> 16) & 0xFFFF;
+
+ packed_record[14] = rec->key[7] & 0xFFFF;
+ packed_record[15] = (rec->key[7] >> 16) & 0xFFFF;
+
+ packed_record[16] = rec->key_len & 0x3;
+
+ return set_raw_ingress_record(hw, packed_record, 18, 2,
+ ROWOFFSET_INGRESSSAKEYRECORD +
+ table_index);
+}
+
+int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_ingress_sakey_record, hw, rec,
+ table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int get_ingress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[18];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSSAKEYRECORD)
+ return -EINVAL;
+
+ ret = get_raw_ingress_record(hw, packed_record, 18, 2,
+ ROWOFFSET_INGRESSSAKEYRECORD +
+ table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->key[0] = packed_record[0];
+ rec->key[0] |= packed_record[1] << 16;
+
+ rec->key[1] = packed_record[2];
+ rec->key[1] |= packed_record[3] << 16;
+
+ rec->key[2] = packed_record[4];
+ rec->key[2] |= packed_record[5] << 16;
+
+ rec->key[3] = packed_record[6];
+ rec->key[3] |= packed_record[7] << 16;
+
+ rec->key[4] = packed_record[8];
+ rec->key[4] |= packed_record[9] << 16;
+
+ rec->key[5] = packed_record[10];
+ rec->key[5] |= packed_record[11] << 16;
+
+ rec->key[6] = packed_record[12];
+ rec->key[6] |= packed_record[13] << 16;
+
+ rec->key[7] = packed_record[14];
+ rec->key[7] |= packed_record[15] << 16;
+
+ rec->key_len = (rec->key_len & 0xFFFFFFFC) |
+ (packed_record[16] & 0x3);
+
+ return 0;
+}
+
+int aq_mss_get_ingress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_sakey_record, hw, rec, table_index);
+}
+
+static int
+set_ingress_postclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+
+ if (table_index >= NUMROWS_INGRESSPOSTCLASSRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 8);
+
+ packed_record[0] = rec->byte0 & 0xFF;
+
+ packed_record[0] |= (rec->byte1 & 0xFF) << 8;
+
+ packed_record[1] = rec->byte2 & 0xFF;
+
+ packed_record[1] |= (rec->byte3 & 0xFF) << 8;
+
+ packed_record[2] = rec->eth_type & 0xFFFF;
+
+ packed_record[3] = rec->eth_type_valid & 0x1;
+
+ packed_record[3] |= (rec->vlan_id & 0xFFF) << 1;
+
+ packed_record[3] |= (rec->vlan_up & 0x7) << 13;
+
+ packed_record[4] = rec->vlan_valid & 0x1;
+
+ packed_record[4] |= (rec->sai & 0x1F) << 1;
+
+ packed_record[4] |= (rec->sai_hit & 0x1) << 6;
+
+ packed_record[4] |= (rec->eth_type_mask & 0xF) << 7;
+
+ packed_record[4] |= (rec->byte3_location & 0x1F) << 11;
+ packed_record[5] = (rec->byte3_location >> 5) & 0x1;
+
+ packed_record[5] |= (rec->byte3_mask & 0x3) << 1;
+
+ packed_record[5] |= (rec->byte2_location & 0x3F) << 3;
+
+ packed_record[5] |= (rec->byte2_mask & 0x3) << 9;
+
+ packed_record[5] |= (rec->byte1_location & 0x1F) << 11;
+ packed_record[6] = (rec->byte1_location >> 5) & 0x1;
+
+ packed_record[6] |= (rec->byte1_mask & 0x3) << 1;
+
+ packed_record[6] |= (rec->byte0_location & 0x3F) << 3;
+
+ packed_record[6] |= (rec->byte0_mask & 0x3) << 9;
+
+ packed_record[6] |= (rec->eth_type_valid_mask & 0x3) << 11;
+
+ packed_record[6] |= (rec->vlan_id_mask & 0x7) << 13;
+ packed_record[7] = (rec->vlan_id_mask >> 3) & 0x1;
+
+ packed_record[7] |= (rec->vlan_up_mask & 0x3) << 1;
+
+ packed_record[7] |= (rec->vlan_valid_mask & 0x3) << 3;
+
+ packed_record[7] |= (rec->sai_mask & 0x3) << 5;
+
+ packed_record[7] |= (rec->sai_hit_mask & 0x3) << 7;
+
+ packed_record[7] |= (rec->firstlevel_actions & 0x1) << 9;
+
+ packed_record[7] |= (rec->secondlevel_actions & 0x1) << 10;
+
+ packed_record[7] |= (rec->reserved & 0xF) << 11;
+
+ packed_record[7] |= (rec->valid & 0x1) << 15;
+
+ return set_raw_ingress_record(hw, packed_record, 8, 4,
+ ROWOFFSET_INGRESSPOSTCLASSRECORD +
+ table_index);
+}
+
+int aq_mss_set_ingress_postclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_ingress_postclass_record, hw, rec,
+ table_index);
+}
+
+static int
+get_ingress_postclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSPOSTCLASSRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_ingress_record(hw, packed_record, 8, 4,
+ ROWOFFSET_INGRESSPOSTCLASSRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_ingress_record(hw, packed_record, 8, 4,
+ ROWOFFSET_INGRESSPOSTCLASSRECORD +
+ table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->byte0 = packed_record[0] & 0xFF;
+
+ rec->byte1 = (packed_record[0] >> 8) & 0xFF;
+
+ rec->byte2 = packed_record[1] & 0xFF;
+
+ rec->byte3 = (packed_record[1] >> 8) & 0xFF;
+
+ rec->eth_type = packed_record[2];
+
+ rec->eth_type_valid = packed_record[3] & 0x1;
+
+ rec->vlan_id = (packed_record[3] >> 1) & 0xFFF;
+
+ rec->vlan_up = (packed_record[3] >> 13) & 0x7;
+
+ rec->vlan_valid = packed_record[4] & 0x1;
+
+ rec->sai = (packed_record[4] >> 1) & 0x1F;
+
+ rec->sai_hit = (packed_record[4] >> 6) & 0x1;
+
+ rec->eth_type_mask = (packed_record[4] >> 7) & 0xF;
+
+ rec->byte3_location = (packed_record[4] >> 11) & 0x1F;
+ rec->byte3_location |= (packed_record[5] & 0x1) << 5;
+
+ rec->byte3_mask = (packed_record[5] >> 1) & 0x3;
+
+ rec->byte2_location = (packed_record[5] >> 3) & 0x3F;
+
+ rec->byte2_mask = (packed_record[5] >> 9) & 0x3;
+
+ rec->byte1_location = (packed_record[5] >> 11) & 0x1F;
+ rec->byte1_location |= (packed_record[6] & 0x1) << 5;
+
+ rec->byte1_mask = (packed_record[6] >> 1) & 0x3;
+
+ rec->byte0_location = (packed_record[6] >> 3) & 0x3F;
+
+ rec->byte0_mask = (packed_record[6] >> 9) & 0x3;
+
+ rec->eth_type_valid_mask = (packed_record[6] >> 11) & 0x3;
+
+ rec->vlan_id_mask = (packed_record[6] >> 13) & 0x7;
+ rec->vlan_id_mask |= (packed_record[7] & 0x1) << 3;
+
+ rec->vlan_up_mask = (packed_record[7] >> 1) & 0x3;
+
+ rec->vlan_valid_mask = (packed_record[7] >> 3) & 0x3;
+
+ rec->sai_mask = (packed_record[7] >> 5) & 0x3;
+
+ rec->sai_hit_mask = (packed_record[7] >> 7) & 0x3;
+
+ rec->firstlevel_actions = (packed_record[7] >> 9) & 0x1;
+
+ rec->secondlevel_actions = (packed_record[7] >> 10) & 0x1;
+
+ rec->reserved = (packed_record[7] >> 11) & 0xF;
+
+ rec->valid = (packed_record[7] >> 15) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_postclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_postclass_record, hw, rec,
+ table_index);
+}
+
+static int
+set_ingress_postctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+
+ if (table_index >= NUMROWS_INGRESSPOSTCTLFRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 6);
+
+ packed_record[0] = rec->sa_da[0] & 0xFFFF;
+ packed_record[1] = (rec->sa_da[0] >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->sa_da[1] & 0xFFFF;
+
+ packed_record[3] = rec->eth_type & 0xFFFF;
+
+ packed_record[4] = rec->match_mask & 0xFFFF;
+
+ packed_record[5] = rec->match_type & 0xF;
+
+ packed_record[5] |= (rec->action & 0x1) << 4;
+
+ return set_raw_ingress_record(hw, packed_record, 6, 5,
+ ROWOFFSET_INGRESSPOSTCTLFRECORD +
+ table_index);
+}
+
+int aq_mss_set_ingress_postctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_ingress_postctlf_record, hw, rec,
+ table_index);
+}
+
+static int
+get_ingress_postctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+ int ret;
+
+ if (table_index >= NUMROWS_INGRESSPOSTCTLFRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_ingress_record(hw, packed_record, 6, 5,
+ ROWOFFSET_INGRESSPOSTCTLFRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_ingress_record(hw, packed_record, 6, 5,
+ ROWOFFSET_INGRESSPOSTCTLFRECORD +
+ table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->sa_da[0] = packed_record[0];
+ rec->sa_da[0] |= packed_record[1] << 16;
+
+ rec->sa_da[1] = packed_record[2];
+
+ rec->eth_type = packed_record[3];
+
+ rec->match_mask = packed_record[4];
+
+ rec->match_type = packed_record[5] & 0xF;
+
+ rec->action = (packed_record[5] >> 4) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_ingress_postctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_ingress_postctlf_record, hw, rec,
+ table_index);
+}
+
+static int set_egress_ctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+
+ if (table_index >= NUMROWS_EGRESSCTLFRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 6);
+
+ packed_record[0] = rec->sa_da[0] & 0xFFFF;
+ packed_record[1] = (rec->sa_da[0] >> 16) & 0xFFFF;
+ packed_record[2] = rec->sa_da[1] & 0xFFFF;
+
+ packed_record[3] = rec->eth_type & 0xFFFF;
+
+ packed_record[4] = rec->match_mask & 0xFFFF;
+
+ packed_record[5] = rec->match_type & 0xF;
+
+ packed_record[5] |= (rec->action & 0x1) << 4;
+
+ return set_raw_egress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_EGRESSCTLFRECORD + table_index);
+}
+
+int aq_mss_set_egress_ctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_egress_ctlf_record, hw, rec, table_index);
+}
+
+static int get_egress_ctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[6];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSCTLFRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_egress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_EGRESSCTLFRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_egress_record(hw, packed_record, 6, 0,
+ ROWOFFSET_EGRESSCTLFRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->sa_da[0] = packed_record[0];
+ rec->sa_da[0] |= packed_record[1] << 16;
+
+ rec->sa_da[1] = packed_record[2];
+
+ rec->eth_type = packed_record[3];
+
+ rec->match_mask = packed_record[4];
+
+ rec->match_type = packed_record[5] & 0xF;
+
+ rec->action = (packed_record[5] >> 4) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_egress_ctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_egress_ctlf_record, hw, rec, table_index);
+}
+
+static int set_egress_class_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_class_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[28];
+
+ if (table_index >= NUMROWS_EGRESSCLASSRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 28);
+
+ packed_record[0] = rec->vlan_id & 0xFFF;
+
+ packed_record[0] |= (rec->vlan_up & 0x7) << 12;
+
+ packed_record[0] |= (rec->vlan_valid & 0x1) << 15;
+
+ packed_record[1] = rec->byte3 & 0xFF;
+
+ packed_record[1] |= (rec->byte2 & 0xFF) << 8;
+
+ packed_record[2] = rec->byte1 & 0xFF;
+
+ packed_record[2] |= (rec->byte0 & 0xFF) << 8;
+
+ packed_record[3] = rec->tci & 0xFF;
+
+ packed_record[3] |= (rec->sci[0] & 0xFF) << 8;
+ packed_record[4] = (rec->sci[0] >> 8) & 0xFFFF;
+ packed_record[5] = (rec->sci[0] >> 24) & 0xFF;
+
+ packed_record[5] |= (rec->sci[1] & 0xFF) << 8;
+ packed_record[6] = (rec->sci[1] >> 8) & 0xFFFF;
+ packed_record[7] = (rec->sci[1] >> 24) & 0xFF;
+
+ packed_record[7] |= (rec->eth_type & 0xFF) << 8;
+ packed_record[8] = (rec->eth_type >> 8) & 0xFF;
+
+ packed_record[8] |= (rec->snap[0] & 0xFF) << 8;
+ packed_record[9] = (rec->snap[0] >> 8) & 0xFFFF;
+ packed_record[10] = (rec->snap[0] >> 24) & 0xFF;
+
+ packed_record[10] |= (rec->snap[1] & 0xFF) << 8;
+
+ packed_record[11] = rec->llc & 0xFFFF;
+ packed_record[12] = (rec->llc >> 16) & 0xFF;
+
+ packed_record[12] |= (rec->mac_sa[0] & 0xFF) << 8;
+ packed_record[13] = (rec->mac_sa[0] >> 8) & 0xFFFF;
+ packed_record[14] = (rec->mac_sa[0] >> 24) & 0xFF;
+
+ packed_record[14] |= (rec->mac_sa[1] & 0xFF) << 8;
+ packed_record[15] = (rec->mac_sa[1] >> 8) & 0xFF;
+
+ packed_record[15] |= (rec->mac_da[0] & 0xFF) << 8;
+ packed_record[16] = (rec->mac_da[0] >> 8) & 0xFFFF;
+ packed_record[17] = (rec->mac_da[0] >> 24) & 0xFF;
+
+ packed_record[17] |= (rec->mac_da[1] & 0xFF) << 8;
+ packed_record[18] = (rec->mac_da[1] >> 8) & 0xFF;
+
+ packed_record[18] |= (rec->pn & 0xFF) << 8;
+ packed_record[19] = (rec->pn >> 8) & 0xFFFF;
+ packed_record[20] = (rec->pn >> 24) & 0xFF;
+
+ packed_record[20] |= (rec->byte3_location & 0x3F) << 8;
+
+ packed_record[20] |= (rec->byte3_mask & 0x1) << 14;
+
+ packed_record[20] |= (rec->byte2_location & 0x1) << 15;
+ packed_record[21] = (rec->byte2_location >> 1) & 0x1F;
+
+ packed_record[21] |= (rec->byte2_mask & 0x1) << 5;
+
+ packed_record[21] |= (rec->byte1_location & 0x3F) << 6;
+
+ packed_record[21] |= (rec->byte1_mask & 0x1) << 12;
+
+ packed_record[21] |= (rec->byte0_location & 0x7) << 13;
+ packed_record[22] = (rec->byte0_location >> 3) & 0x7;
+
+ packed_record[22] |= (rec->byte0_mask & 0x1) << 3;
+
+ packed_record[22] |= (rec->vlan_id_mask & 0x3) << 4;
+
+ packed_record[22] |= (rec->vlan_up_mask & 0x1) << 6;
+
+ packed_record[22] |= (rec->vlan_valid_mask & 0x1) << 7;
+
+ packed_record[22] |= (rec->tci_mask & 0xFF) << 8;
+
+ packed_record[23] = rec->sci_mask & 0xFF;
+
+ packed_record[23] |= (rec->eth_type_mask & 0x3) << 8;
+
+ packed_record[23] |= (rec->snap_mask & 0x1F) << 10;
+
+ packed_record[23] |= (rec->llc_mask & 0x1) << 15;
+ packed_record[24] = (rec->llc_mask >> 1) & 0x3;
+
+ packed_record[24] |= (rec->sa_mask & 0x3F) << 2;
+
+ packed_record[24] |= (rec->da_mask & 0x3F) << 8;
+
+ packed_record[24] |= (rec->pn_mask & 0x3) << 14;
+ packed_record[25] = (rec->pn_mask >> 2) & 0x3;
+
+ packed_record[25] |= (rec->eight02dot2 & 0x1) << 2;
+
+ packed_record[25] |= (rec->tci_sc & 0x1) << 3;
+
+ packed_record[25] |= (rec->tci_87543 & 0x1) << 4;
+
+ packed_record[25] |= (rec->exp_sectag_en & 0x1) << 5;
+
+ packed_record[25] |= (rec->sc_idx & 0x1F) << 6;
+
+ packed_record[25] |= (rec->sc_sa & 0x3) << 11;
+
+ packed_record[25] |= (rec->debug & 0x1) << 13;
+
+ packed_record[25] |= (rec->action & 0x3) << 14;
+
+ packed_record[26] = (rec->valid & 0x1) << 3;
+
+ return set_raw_egress_record(hw, packed_record, 28, 1,
+ ROWOFFSET_EGRESSCLASSRECORD + table_index);
+}
+
+int aq_mss_set_egress_class_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_class_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_egress_class_record, hw, rec, table_index);
+}
+
+static int get_egress_class_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_class_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[28];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSCLASSRECORD)
+ return -EINVAL;
+
+ /* If the row that we want to read is odd, first read the previous even
+ * row, throw that value away, and finally read the desired row.
+ */
+ if ((table_index % 2) > 0) {
+ ret = get_raw_egress_record(hw, packed_record, 28, 1,
+ ROWOFFSET_EGRESSCLASSRECORD +
+ table_index - 1);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = get_raw_egress_record(hw, packed_record, 28, 1,
+ ROWOFFSET_EGRESSCLASSRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->vlan_id = packed_record[0] & 0xFFF;
+
+ rec->vlan_up = (packed_record[0] >> 12) & 0x7;
+
+ rec->vlan_valid = (packed_record[0] >> 15) & 0x1;
+
+ rec->byte3 = packed_record[1] & 0xFF;
+
+ rec->byte2 = (packed_record[1] >> 8) & 0xFF;
+
+ rec->byte1 = packed_record[2] & 0xFF;
+
+ rec->byte0 = (packed_record[2] >> 8) & 0xFF;
+
+ rec->tci = packed_record[3] & 0xFF;
+
+ rec->sci[0] = (packed_record[3] >> 8) & 0xFF;
+ rec->sci[0] |= packed_record[4] << 8;
+ rec->sci[0] |= (packed_record[5] & 0xFF) << 24;
+
+ rec->sci[1] = (packed_record[5] >> 8) & 0xFF;
+ rec->sci[1] |= packed_record[6] << 8;
+ rec->sci[1] |= (packed_record[7] & 0xFF) << 24;
+
+ rec->eth_type = (packed_record[7] >> 8) & 0xFF;
+ rec->eth_type |= (packed_record[8] & 0xFF) << 8;
+
+ rec->snap[0] = (packed_record[8] >> 8) & 0xFF;
+ rec->snap[0] |= packed_record[9] << 8;
+ rec->snap[0] |= (packed_record[10] & 0xFF) << 24;
+
+ rec->snap[1] = (packed_record[10] >> 8) & 0xFF;
+
+ rec->llc = packed_record[11];
+ rec->llc |= (packed_record[12] & 0xFF) << 16;
+
+ rec->mac_sa[0] = (packed_record[12] >> 8) & 0xFF;
+ rec->mac_sa[0] |= packed_record[13] << 8;
+ rec->mac_sa[0] |= (packed_record[14] & 0xFF) << 24;
+
+ rec->mac_sa[1] = (packed_record[14] >> 8) & 0xFF;
+ rec->mac_sa[1] |= (packed_record[15] & 0xFF) << 8;
+
+ rec->mac_da[0] = (packed_record[15] >> 8) & 0xFF;
+ rec->mac_da[0] |= packed_record[16] << 8;
+ rec->mac_da[0] |= (packed_record[17] & 0xFF) << 24;
+
+ rec->mac_da[1] = (packed_record[17] >> 8) & 0xFF;
+ rec->mac_da[1] |= (packed_record[18] & 0xFF) << 8;
+
+ rec->pn = (packed_record[18] >> 8) & 0xFF;
+ rec->pn |= packed_record[19] << 8;
+ rec->pn |= (packed_record[20] & 0xFF) << 24;
+
+ rec->byte3_location = (packed_record[20] >> 8) & 0x3F;
+
+ rec->byte3_mask = (packed_record[20] >> 14) & 0x1;
+
+ rec->byte2_location = (packed_record[20] >> 15) & 0x1;
+ rec->byte2_location |= (packed_record[21] & 0x1F) << 1;
+
+ rec->byte2_mask = (packed_record[21] >> 5) & 0x1;
+
+ rec->byte1_location = (packed_record[21] >> 6) & 0x3F;
+
+ rec->byte1_mask = (packed_record[21] >> 12) & 0x1;
+
+ rec->byte0_location = (packed_record[21] >> 13) & 0x7;
+ rec->byte0_location |= (packed_record[22] & 0x7) << 3;
+
+ rec->byte0_mask = (packed_record[22] >> 3) & 0x1;
+
+ rec->vlan_id_mask = (packed_record[22] >> 4) & 0x3;
+
+ rec->vlan_up_mask = (packed_record[22] >> 6) & 0x1;
+
+ rec->vlan_valid_mask = (packed_record[22] >> 7) & 0x1;
+
+ rec->tci_mask = (packed_record[22] >> 8) & 0xFF;
+
+ rec->sci_mask = packed_record[23] & 0xFF;
+
+ rec->eth_type_mask = (packed_record[23] >> 8) & 0x3;
+
+ rec->snap_mask = (packed_record[23] >> 10) & 0x1F;
+
+ rec->llc_mask = (packed_record[23] >> 15) & 0x1;
+ rec->llc_mask |= (packed_record[24] & 0x3) << 1;
+
+ rec->sa_mask = (packed_record[24] >> 2) & 0x3F;
+
+ rec->da_mask = (packed_record[24] >> 8) & 0x3F;
+
+ rec->pn_mask = (packed_record[24] >> 14) & 0x3;
+ rec->pn_mask |= (packed_record[25] & 0x3) << 2;
+
+ rec->eight02dot2 = (packed_record[25] >> 2) & 0x1;
+
+ rec->tci_sc = (packed_record[25] >> 3) & 0x1;
+
+ rec->tci_87543 = (packed_record[25] >> 4) & 0x1;
+
+ rec->exp_sectag_en = (packed_record[25] >> 5) & 0x1;
+
+ rec->sc_idx = (packed_record[25] >> 6) & 0x1F;
+
+ rec->sc_sa = (packed_record[25] >> 11) & 0x3;
+
+ rec->debug = (packed_record[25] >> 13) & 0x1;
+
+ rec->action = (packed_record[25] >> 14) & 0x3;
+
+ rec->valid = (packed_record[26] >> 3) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_egress_class_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_class_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_egress_class_record, hw, rec, table_index);
+}
+
+static int set_egress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sc_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+
+ if (table_index >= NUMROWS_EGRESSSCRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 8);
+
+ packed_record[0] = rec->start_time & 0xFFFF;
+ packed_record[1] = (rec->start_time >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->stop_time & 0xFFFF;
+ packed_record[3] = (rec->stop_time >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->curr_an & 0x3;
+
+ packed_record[4] |= (rec->an_roll & 0x1) << 2;
+
+ packed_record[4] |= (rec->tci & 0x3F) << 3;
+
+ packed_record[4] |= (rec->enc_off & 0x7F) << 9;
+ packed_record[5] = (rec->enc_off >> 7) & 0x1;
+
+ packed_record[5] |= (rec->protect & 0x1) << 1;
+
+ packed_record[5] |= (rec->recv & 0x1) << 2;
+
+ packed_record[5] |= (rec->fresh & 0x1) << 3;
+
+ packed_record[5] |= (rec->sak_len & 0x3) << 4;
+
+ packed_record[7] |= (rec->valid & 0x1) << 15;
+
+ return set_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSCRECORD + table_index);
+}
+
+int aq_mss_set_egress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sc_record *rec,
+ u16 table_index)
+{
+ return AQ_API_CALL_SAFE(set_egress_sc_record, hw, rec, table_index);
+}
+
+static int get_egress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSSCRECORD)
+ return -EINVAL;
+
+ ret = get_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSCRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->start_time = packed_record[0];
+ rec->start_time |= packed_record[1] << 16;
+
+ rec->stop_time = packed_record[2];
+ rec->stop_time |= packed_record[3] << 16;
+
+ rec->curr_an = packed_record[4] & 0x3;
+
+ rec->an_roll = (packed_record[4] >> 2) & 0x1;
+
+ rec->tci = (packed_record[4] >> 3) & 0x3F;
+
+ rec->enc_off = (packed_record[4] >> 9) & 0x7F;
+ rec->enc_off |= (packed_record[5] & 0x1) << 7;
+
+ rec->protect = (packed_record[5] >> 1) & 0x1;
+
+ rec->recv = (packed_record[5] >> 2) & 0x1;
+
+ rec->fresh = (packed_record[5] >> 3) & 0x1;
+
+ rec->sak_len = (packed_record[5] >> 4) & 0x3;
+
+ rec->valid = (packed_record[7] >> 15) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_egress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_egress_sc_record, hw, rec, table_index);
+}
+
+static int set_egress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sa_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+
+ if (table_index >= NUMROWS_EGRESSSARECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 8);
+
+ packed_record[0] = rec->start_time & 0xFFFF;
+ packed_record[1] = (rec->start_time >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->stop_time & 0xFFFF;
+ packed_record[3] = (rec->stop_time >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->next_pn & 0xFFFF;
+ packed_record[5] = (rec->next_pn >> 16) & 0xFFFF;
+
+ packed_record[6] = rec->sat_pn & 0x1;
+
+ packed_record[6] |= (rec->fresh & 0x1) << 1;
+
+ packed_record[7] = (rec->valid & 0x1) << 15;
+
+ return set_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSARECORD + table_index);
+}
+
+int aq_mss_set_egress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sa_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_egress_sa_record, hw, rec, table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int get_egress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[8];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSSARECORD)
+ return -EINVAL;
+
+ ret = get_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSARECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+
+ rec->start_time = packed_record[0];
+ rec->start_time |= packed_record[1] << 16;
+
+ rec->stop_time = packed_record[2];
+ rec->stop_time |= packed_record[3] << 16;
+
+ rec->next_pn = packed_record[4];
+ rec->next_pn |= packed_record[5] << 16;
+
+ rec->sat_pn = packed_record[6] & 0x1;
+
+ rec->fresh = (packed_record[6] >> 1) & 0x1;
+
+ rec->valid = (packed_record[7] >> 15) & 0x1;
+
+ return 0;
+}
+
+int aq_mss_get_egress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_egress_sa_record, hw, rec, table_index);
+}
+
+static int set_egress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sakey_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[16];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSSAKEYRECORD)
+ return -EINVAL;
+
+ memset(packed_record, 0, sizeof(u16) * 16);
+
+ packed_record[0] = rec->key[0] & 0xFFFF;
+ packed_record[1] = (rec->key[0] >> 16) & 0xFFFF;
+
+ packed_record[2] = rec->key[1] & 0xFFFF;
+ packed_record[3] = (rec->key[1] >> 16) & 0xFFFF;
+
+ packed_record[4] = rec->key[2] & 0xFFFF;
+ packed_record[5] = (rec->key[2] >> 16) & 0xFFFF;
+
+ packed_record[6] = rec->key[3] & 0xFFFF;
+ packed_record[7] = (rec->key[3] >> 16) & 0xFFFF;
+
+ packed_record[8] = rec->key[4] & 0xFFFF;
+ packed_record[9] = (rec->key[4] >> 16) & 0xFFFF;
+
+ packed_record[10] = rec->key[5] & 0xFFFF;
+ packed_record[11] = (rec->key[5] >> 16) & 0xFFFF;
+
+ packed_record[12] = rec->key[6] & 0xFFFF;
+ packed_record[13] = (rec->key[6] >> 16) & 0xFFFF;
+
+ packed_record[14] = rec->key[7] & 0xFFFF;
+ packed_record[15] = (rec->key[7] >> 16) & 0xFFFF;
+
+ ret = set_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSAKEYRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+ ret = set_raw_egress_record(hw, packed_record + 8, 8, 2,
+ ROWOFFSET_EGRESSSAKEYRECORD + table_index -
+ 32);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
+int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sakey_record *rec,
+ u16 table_index)
+{
+ int err = AQ_API_CALL_SAFE(set_egress_sakey_record, hw, rec,
+ table_index);
+
+ WARN_ONCE(err, "%s failed with %d\n", __func__, err);
+
+ return err;
+}
+
+static int get_egress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sakey_record *rec,
+ u16 table_index)
+{
+ u16 packed_record[16];
+ int ret;
+
+ if (table_index >= NUMROWS_EGRESSSAKEYRECORD)
+ return -EINVAL;
+
+ ret = get_raw_egress_record(hw, packed_record, 8, 2,
+ ROWOFFSET_EGRESSSAKEYRECORD + table_index);
+ if (unlikely(ret))
+ return ret;
+ ret = get_raw_egress_record(hw, packed_record + 8, 8, 2,
+ ROWOFFSET_EGRESSSAKEYRECORD + table_index -
+ 32);
+ if (unlikely(ret))
+ return ret;
+
+ rec->key[0] = packed_record[0];
+ rec->key[0] |= packed_record[1] << 16;
+
+ rec->key[1] = packed_record[2];
+ rec->key[1] |= packed_record[3] << 16;
+
+ rec->key[2] = packed_record[4];
+ rec->key[2] |= packed_record[5] << 16;
+
+ rec->key[3] = packed_record[6];
+ rec->key[3] |= packed_record[7] << 16;
+
+ rec->key[4] = packed_record[8];
+ rec->key[4] |= packed_record[9] << 16;
+
+ rec->key[5] = packed_record[10];
+ rec->key[5] |= packed_record[11] << 16;
+
+ rec->key[6] = packed_record[12];
+ rec->key[6] |= packed_record[13] << 16;
+
+ rec->key[7] = packed_record[14];
+ rec->key[7] |= packed_record[15] << 16;
+
+ return 0;
+}
+
+int aq_mss_get_egress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sakey_record *rec,
+ u16 table_index)
+{
+ memset(rec, 0, sizeof(*rec));
+
+ return AQ_API_CALL_SAFE(get_egress_sakey_record, hw, rec, table_index);
+}
+
+static int get_egress_sc_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_counters *counters,
+ u16 sc_index)
+{
+ u16 packed_record[4];
+ int ret;
+
+ if (sc_index >= NUMROWS_EGRESSSCRECORD)
+ return -EINVAL;
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 4);
+ if (unlikely(ret))
+ return ret;
+ counters->sc_protected_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sc_protected_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 5);
+ if (unlikely(ret))
+ return ret;
+ counters->sc_encrypted_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sc_encrypted_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 6);
+ if (unlikely(ret))
+ return ret;
+ counters->sc_protected_octets[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sc_protected_octets[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sc_index * 8 + 7);
+ if (unlikely(ret))
+ return ret;
+ counters->sc_encrypted_octets[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sc_encrypted_octets[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ return 0;
+}
+
+int aq_mss_get_egress_sc_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_counters *counters,
+ u16 sc_index)
+{
+ memset(counters, 0, sizeof(*counters));
+
+ return AQ_API_CALL_SAFE(get_egress_sc_counters, hw, counters, sc_index);
+}
+
+static int get_egress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_counters *counters,
+ u16 sa_index)
+{
+ u16 packed_record[4];
+ int ret;
+
+ if (sa_index >= NUMROWS_EGRESSSARECORD)
+ return -EINVAL;
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 0);
+ if (unlikely(ret))
+ return ret;
+ counters->sa_hit_drop_redirect[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sa_hit_drop_redirect[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 1);
+ if (unlikely(ret))
+ return ret;
+ counters->sa_protected2_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sa_protected2_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 2);
+ if (unlikely(ret))
+ return ret;
+ counters->sa_protected_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sa_protected_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, sa_index * 8 + 3);
+ if (unlikely(ret))
+ return ret;
+ counters->sa_encrypted_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->sa_encrypted_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ return 0;
+}
+
+int aq_mss_get_egress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_counters *counters,
+ u16 sa_index)
+{
+ memset(counters, 0, sizeof(*counters));
+
+ return AQ_API_CALL_SAFE(get_egress_sa_counters, hw, counters, sa_index);
+}
+
+static int
+get_egress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_common_counters *counters)
+{
+ u16 packed_record[4];
+ int ret;
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 0);
+ if (unlikely(ret))
+ return ret;
+ counters->ctl_pkt[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->ctl_pkt[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 1);
+ if (unlikely(ret))
+ return ret;
+ counters->unknown_sa_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unknown_sa_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 2);
+ if (unlikely(ret))
+ return ret;
+ counters->untagged_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->untagged_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 3);
+ if (unlikely(ret))
+ return ret;
+ counters->too_long[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->too_long[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 4);
+ if (unlikely(ret))
+ return ret;
+ counters->ecc_error_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->ecc_error_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_egress_record(hw, packed_record, 4, 3, 256 + 5);
+ if (unlikely(ret))
+ return ret;
+ counters->unctrl_hit_drop_redir[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unctrl_hit_drop_redir[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ return 0;
+}
+
+int aq_mss_get_egress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_common_counters *counters)
+{
+ memset(counters, 0, sizeof(*counters));
+
+ return AQ_API_CALL_SAFE(get_egress_common_counters, hw, counters);
+}
+
+static int clear_egress_counters(struct aq_hw_s *hw)
+{
+ struct mss_egress_ctl_register ctl_reg;
+ int ret;
+
+ memset(&ctl_reg, 0, sizeof(ctl_reg));
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1, MSS_EGRESS_CTL_REGISTER_ADDR,
+ &ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR + 4,
+ &ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ /* Toggle the Egress MIB clear bit 0->1->0 */
+ ctl_reg.bits_0.clear_counter = 0;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ ctl_reg.bits_0.clear_counter = 1;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ ctl_reg.bits_0.clear_counter = 0;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
+int aq_mss_clear_egress_counters(struct aq_hw_s *hw)
+{
+ return AQ_API_CALL_SAFE(clear_egress_counters, hw);
+}
+
+static int get_ingress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_counters *counters,
+ u16 sa_index)
+{
+ u16 packed_record[4];
+ int ret;
+
+ if (sa_index >= NUMROWS_INGRESSSARECORD)
+ return -EINVAL;
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 0);
+ if (unlikely(ret))
+ return ret;
+ counters->untagged_hit_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->untagged_hit_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 1);
+ if (unlikely(ret))
+ return ret;
+ counters->ctrl_hit_drop_redir_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->ctrl_hit_drop_redir_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 2);
+ if (unlikely(ret))
+ return ret;
+ counters->not_using_sa[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->not_using_sa[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 3);
+ if (unlikely(ret))
+ return ret;
+ counters->unused_sa[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->unused_sa[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 4);
+ if (unlikely(ret))
+ return ret;
+ counters->not_valid_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->not_valid_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 5);
+ if (unlikely(ret))
+ return ret;
+ counters->invalid_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->invalid_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 6);
+ if (unlikely(ret))
+ return ret;
+ counters->ok_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->ok_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 7);
+ if (unlikely(ret))
+ return ret;
+ counters->late_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->late_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 8);
+ if (unlikely(ret))
+ return ret;
+ counters->delayed_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->delayed_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 9);
+ if (unlikely(ret))
+ return ret;
+ counters->unchecked_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unchecked_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 10);
+ if (unlikely(ret))
+ return ret;
+ counters->validated_octets[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->validated_octets[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6,
+ sa_index * 12 + 11);
+ if (unlikely(ret))
+ return ret;
+ counters->decrypted_octets[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->decrypted_octets[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ return 0;
+}
+
+int aq_mss_get_ingress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_counters *counters,
+ u16 sa_index)
+{
+ memset(counters, 0, sizeof(*counters));
+
+ return AQ_API_CALL_SAFE(get_ingress_sa_counters, hw, counters,
+ sa_index);
+}
+
+static int
+get_ingress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_common_counters *counters)
+{
+ u16 packed_record[4];
+ int ret;
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 0);
+ if (unlikely(ret))
+ return ret;
+ counters->ctl_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->ctl_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 1);
+ if (unlikely(ret))
+ return ret;
+ counters->tagged_miss_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->tagged_miss_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 2);
+ if (unlikely(ret))
+ return ret;
+ counters->untagged_miss_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->untagged_miss_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 3);
+ if (unlikely(ret))
+ return ret;
+ counters->notag_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->notag_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 4);
+ if (unlikely(ret))
+ return ret;
+ counters->untagged_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->untagged_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 5);
+ if (unlikely(ret))
+ return ret;
+ counters->bad_tag_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->bad_tag_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 6);
+ if (unlikely(ret))
+ return ret;
+ counters->no_sci_pkts[0] = packed_record[0] | (packed_record[1] << 16);
+ counters->no_sci_pkts[1] = packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 7);
+ if (unlikely(ret))
+ return ret;
+ counters->unknown_sci_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unknown_sci_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 8);
+ if (unlikely(ret))
+ return ret;
+ counters->ctrl_prt_pass_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->ctrl_prt_pass_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 9);
+ if (unlikely(ret))
+ return ret;
+ counters->unctrl_prt_pass_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unctrl_prt_pass_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 10);
+ if (unlikely(ret))
+ return ret;
+ counters->ctrl_prt_fail_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->ctrl_prt_fail_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 11);
+ if (unlikely(ret))
+ return ret;
+ counters->unctrl_prt_fail_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unctrl_prt_fail_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 12);
+ if (unlikely(ret))
+ return ret;
+ counters->too_long_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->too_long_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 13);
+ if (unlikely(ret))
+ return ret;
+ counters->igpoc_ctl_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->igpoc_ctl_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 14);
+ if (unlikely(ret))
+ return ret;
+ counters->ecc_error_pkts[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->ecc_error_pkts[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ ret = get_raw_ingress_record(hw, packed_record, 4, 6, 385 + 15);
+ if (unlikely(ret))
+ return ret;
+ counters->unctrl_hit_drop_redir[0] =
+ packed_record[0] | (packed_record[1] << 16);
+ counters->unctrl_hit_drop_redir[1] =
+ packed_record[2] | (packed_record[3] << 16);
+
+ return 0;
+}
+
+int aq_mss_get_ingress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_common_counters *counters)
+{
+ memset(counters, 0, sizeof(*counters));
+
+ return AQ_API_CALL_SAFE(get_ingress_common_counters, hw, counters);
+}
+
+static int clear_ingress_counters(struct aq_hw_s *hw)
+{
+ struct mss_ingress_ctl_register ctl_reg;
+ int ret;
+
+ memset(&ctl_reg, 0, sizeof(ctl_reg));
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR, &ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR + 4,
+ &ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ /* Toggle the Ingress MIB clear bit 0->1->0 */
+ ctl_reg.bits_0.clear_count = 0;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ ctl_reg.bits_0.clear_count = 1;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ ctl_reg.bits_0.clear_count = 0;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR, ctl_reg.word_0);
+ if (unlikely(ret))
+ return ret;
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_INGRESS_CTL_REGISTER_ADDR + 4,
+ ctl_reg.word_1);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
+int aq_mss_clear_ingress_counters(struct aq_hw_s *hw)
+{
+ return AQ_API_CALL_SAFE(clear_ingress_counters, hw);
+}
+
+static int get_egress_sa_expired(struct aq_hw_s *hw, u32 *expired)
+{
+ u16 val;
+ int ret;
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR,
+ &val);
+ if (unlikely(ret))
+ return ret;
+
+ *expired = val;
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR + 1,
+ &val);
+ if (unlikely(ret))
+ return ret;
+
+ *expired |= val << 16;
+
+ return 0;
+}
+
+int aq_mss_get_egress_sa_expired(struct aq_hw_s *hw, u32 *expired)
+{
+ *expired = 0;
+
+ return AQ_API_CALL_SAFE(get_egress_sa_expired, hw, expired);
+}
+
+static int get_egress_sa_threshold_expired(struct aq_hw_s *hw,
+ u32 *expired)
+{
+ u16 val;
+ int ret;
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR, &val);
+ if (unlikely(ret))
+ return ret;
+
+ *expired = val;
+
+ ret = aq_mss_mdio_read(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR + 1, &val);
+ if (unlikely(ret))
+ return ret;
+
+ *expired |= val << 16;
+
+ return 0;
+}
+
+int aq_mss_get_egress_sa_threshold_expired(struct aq_hw_s *hw,
+ u32 *expired)
+{
+ *expired = 0;
+
+ return AQ_API_CALL_SAFE(get_egress_sa_threshold_expired, hw, expired);
+}
+
+static int set_egress_sa_expired(struct aq_hw_s *hw, u32 expired)
+{
+ int ret;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR,
+ expired & 0xFFFF);
+ if (unlikely(ret))
+ return ret;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_EXPIRED_STATUS_REGISTER_ADDR + 1,
+ expired >> 16);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
+int aq_mss_set_egress_sa_expired(struct aq_hw_s *hw, u32 expired)
+{
+ return AQ_API_CALL_SAFE(set_egress_sa_expired, hw, expired);
+}
+
+static int set_egress_sa_threshold_expired(struct aq_hw_s *hw, u32 expired)
+{
+ int ret;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR,
+ expired & 0xFFFF);
+ if (unlikely(ret))
+ return ret;
+
+ ret = aq_mss_mdio_write(hw, MDIO_MMD_VEND1,
+ MSS_EGRESS_SA_THRESHOLD_EXPIRED_STATUS_REGISTER_ADDR + 1,
+ expired >> 16);
+ if (unlikely(ret))
+ return ret;
+
+ return 0;
+}
+
+int aq_mss_set_egress_sa_threshold_expired(struct aq_hw_s *hw, u32 expired)
+{
+ return AQ_API_CALL_SAFE(set_egress_sa_threshold_expired, hw, expired);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.h b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.h
new file mode 100644
index 000000000000..ff03cc462a37
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __MACSEC_API_H__
+#define __MACSEC_API_H__
+
+#include "aq_hw.h"
+#include "macsec_struct.h"
+
+#define NUMROWS_INGRESSPRECTLFRECORD 24
+#define ROWOFFSET_INGRESSPRECTLFRECORD 0
+
+#define NUMROWS_INGRESSPRECLASSRECORD 48
+#define ROWOFFSET_INGRESSPRECLASSRECORD 0
+
+#define NUMROWS_INGRESSPOSTCLASSRECORD 48
+#define ROWOFFSET_INGRESSPOSTCLASSRECORD 0
+
+#define NUMROWS_INGRESSSCRECORD 32
+#define ROWOFFSET_INGRESSSCRECORD 0
+
+#define NUMROWS_INGRESSSARECORD 32
+#define ROWOFFSET_INGRESSSARECORD 32
+
+#define NUMROWS_INGRESSSAKEYRECORD 32
+#define ROWOFFSET_INGRESSSAKEYRECORD 0
+
+#define NUMROWS_INGRESSPOSTCTLFRECORD 24
+#define ROWOFFSET_INGRESSPOSTCTLFRECORD 0
+
+#define NUMROWS_EGRESSCTLFRECORD 24
+#define ROWOFFSET_EGRESSCTLFRECORD 0
+
+#define NUMROWS_EGRESSCLASSRECORD 48
+#define ROWOFFSET_EGRESSCLASSRECORD 0
+
+#define NUMROWS_EGRESSSCRECORD 32
+#define ROWOFFSET_EGRESSSCRECORD 0
+
+#define NUMROWS_EGRESSSARECORD 32
+#define ROWOFFSET_EGRESSSARECORD 32
+
+#define NUMROWS_EGRESSSAKEYRECORD 32
+#define ROWOFFSET_EGRESSSAKEYRECORD 96
+
+/*! Read the raw table data from the specified row of the Egress CTL
+ * Filter table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 23).
+ */
+int aq_mss_get_egress_ctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Egress CTL Filter table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 23).
+ */
+int aq_mss_set_egress_ctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_ctlf_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Egress
+ * Packet Classifier table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 47).
+ */
+int aq_mss_get_egress_class_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_class_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Egress Packet Classifier table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write (max 47).
+ */
+int aq_mss_set_egress_class_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_class_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Egress SC
+ * Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_egress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Egress SC Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write (max 31).
+ */
+int aq_mss_set_egress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sc_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Egress SA
+ * Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_egress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Egress SA Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write (max 31).
+ */
+int aq_mss_set_egress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sa_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Egress SA
+ * Key Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_egress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_egress_sakey_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Egress SA Key Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write (max 31).
+ */
+int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_egress_sakey_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress
+ * Pre-MACSec CTL Filter table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 23).
+ */
+int aq_mss_get_ingress_prectlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress Pre-MACSec CTL Filter table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 23).
+ */
+int aq_mss_set_ingress_prectlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_prectlf_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress
+ * Pre-MACSec Packet Classifier table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 47).
+ */
+int aq_mss_get_ingress_preclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress Pre-MACSec Packet Classifier table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 47).
+ */
+int aq_mss_set_ingress_preclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_preclass_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress SC
+ * Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_ingress_sc_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sc_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress SC Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 31).
+ */
+int aq_mss_set_ingress_sc_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sc_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress SA
+ * Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_ingress_sa_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress SA Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 31).
+ */
+int aq_mss_set_ingress_sa_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sa_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress SA
+ * Key Lookup table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 31).
+ */
+int aq_mss_get_ingress_sakey_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress SA Key Lookup table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 31).
+ */
+int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_sakey_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress
+ * Post-MACSec Packet Classifier table, and unpack it into the
+ * fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 48).
+ */
+int aq_mss_get_ingress_postclass_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress Post-MACSec Packet Classifier table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 48).
+ */
+int aq_mss_set_ingress_postclass_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postclass_record *rec,
+ u16 table_index);
+
+/*! Read the raw table data from the specified row of the Ingress
+ * Post-MACSec CTL Filter table, and unpack it into the fields of rec.
+ * rec - [OUT] The raw table row data will be unpacked into the fields of rec.
+ * table_index - The table row to read (max 23).
+ */
+int aq_mss_get_ingress_postctlf_record(struct aq_hw_s *hw,
+ struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index);
+
+/*! Pack the fields of rec, and write the packed data into the
+ * specified row of the Ingress Post-MACSec CTL Filter table.
+ * rec - [IN] The bitfield values to write to the table row.
+ * table_index - The table row to write(max 23).
+ */
+int aq_mss_set_ingress_postctlf_record(struct aq_hw_s *hw,
+ const struct aq_mss_ingress_postctlf_record *rec,
+ u16 table_index);
+
+/*! Read the counters for the specified SC, and unpack them into the
+ * fields of counters.
+ * counters - [OUT] The raw table row data will be unpacked here.
+ * sc_index - The table row to read (max 31).
+ */
+int aq_mss_get_egress_sc_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sc_counters *counters,
+ u16 sc_index);
+
+/*! Read the counters for the specified SA, and unpack them into the
+ * fields of counters.
+ * counters - [OUT] The raw table row data will be unpacked here.
+ * sa_index - The table row to read (max 31).
+ */
+int aq_mss_get_egress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_sa_counters *counters,
+ u16 sa_index);
+
+/*! Read the counters for the common egress counters, and unpack them
+ * into the fields of counters.
+ * counters - [OUT] The raw table row data will be unpacked here.
+ */
+int aq_mss_get_egress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_egress_common_counters *counters);
+
+/*! Clear all Egress counters to 0.*/
+int aq_mss_clear_egress_counters(struct aq_hw_s *hw);
+
+/*! Read the counters for the specified SA, and unpack them into the
+ * fields of counters.
+ * counters - [OUT] The raw table row data will be unpacked here.
+ * sa_index - The table row to read (max 31).
+ */
+int aq_mss_get_ingress_sa_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_sa_counters *counters,
+ u16 sa_index);
+
+/*! Read the counters for the common ingress counters, and unpack them
+ * into the fields of counters.
+ * counters - [OUT] The raw table row data will be unpacked here.
+ */
+int aq_mss_get_ingress_common_counters(struct aq_hw_s *hw,
+ struct aq_mss_ingress_common_counters *counters);
+
+/*! Clear all Ingress counters to 0. */
+int aq_mss_clear_ingress_counters(struct aq_hw_s *hw);
+
+/*! Get Egress SA expired. */
+int aq_mss_get_egress_sa_expired(struct aq_hw_s *hw, u32 *expired);
+/*! Get Egress SA threshold expired. */
+int aq_mss_get_egress_sa_threshold_expired(struct aq_hw_s *hw,
+ u32 *expired);
+/*! Set Egress SA expired. */
+int aq_mss_set_egress_sa_expired(struct aq_hw_s *hw, u32 expired);
+/*! Set Egress SA threshold expired. */
+int aq_mss_set_egress_sa_threshold_expired(struct aq_hw_s *hw,
+ u32 expired);
+
+#endif /* __MACSEC_API_H__ */
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
new file mode 100644
index 000000000000..b6119dcc3bb9
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
@@ -0,0 +1,914 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Atlantic Network Driver
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _MACSEC_STRUCT_H_
+#define _MACSEC_STRUCT_H_
+
+/*! Represents the bitfields of a single row in the Egress CTL Filter
+ * table.
+ */
+struct aq_mss_egress_ctlf_record {
+ /*! This is used to store the 48 bit value used to compare SA, DA or
+ * halfDA+half SA value.
+ */
+ u32 sa_da[2];
+ /*! This is used to store the 16 bit ethertype value used for
+ * comparison.
+ */
+ u32 eth_type;
+ /*! The match mask is per-nibble. 0 means don't care, i.e. every value
+ * will match successfully. The total data is 64 bit, i.e. 16 nibbles
+ * masks.
+ */
+ u32 match_mask;
+ /*! 0: No compare, i.e. This entry is not used
+ * 1: compare DA only
+ * 2: compare SA only
+ * 3: compare half DA + half SA
+ * 4: compare ether type only
+ * 5: compare DA + ethertype
+ * 6: compare SA + ethertype
+ * 7: compare DA+ range.
+ */
+ u32 match_type;
+ /*! 0: Bypass the remaining modules if matched.
+ * 1: Forward to next module for more classifications.
+ */
+ u32 action;
+};
+
+/*! Represents the bitfields of a single row in the Egress Packet
+ * Classifier table.
+ */
+struct aq_mss_egress_class_record {
+ /*! VLAN ID field. */
+ u32 vlan_id;
+ /*! VLAN UP field. */
+ u32 vlan_up;
+ /*! VLAN Present in the Packet. */
+ u32 vlan_valid;
+ /*! The 8 bit value used to compare with extracted value for byte 3. */
+ u32 byte3;
+ /*! The 8 bit value used to compare with extracted value for byte 2. */
+ u32 byte2;
+ /*! The 8 bit value used to compare with extracted value for byte 1. */
+ u32 byte1;
+ /*! The 8 bit value used to compare with extracted value for byte 0. */
+ u32 byte0;
+ /*! The 8 bit TCI field used to compare with extracted value. */
+ u32 tci;
+ /*! The 64 bit SCI field in the SecTAG. */
+ u32 sci[2];
+ /*! The 16 bit Ethertype (in the clear) field used to compare with
+ * extracted value.
+ */
+ u32 eth_type;
+ /*! This is to specify the 40bit SNAP header if the SNAP header's mask
+ * is enabled.
+ */
+ u32 snap[2];
+ /*! This is to specify the 24bit LLC header if the LLC header's mask is
+ * enabled.
+ */
+ u32 llc;
+ /*! The 48 bit MAC_SA field used to compare with extracted value. */
+ u32 mac_sa[2];
+ /*! The 48 bit MAC_DA field used to compare with extracted value. */
+ u32 mac_da[2];
+ /*! The 32 bit Packet number used to compare with extracted value. */
+ u32 pn;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte3_location;
+ /*! 0: don't care
+ * 1: enable comparison of extracted byte pointed by byte 3 location.
+ */
+ u32 byte3_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte2_location;
+ /*! 0: don't care
+ * 1: enable comparison of extracted byte pointed by byte 2 location.
+ */
+ u32 byte2_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte1_location;
+ /*! 0: don't care
+ * 1: enable comparison of extracted byte pointed by byte 1 location.
+ */
+ u32 byte1_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte0_location;
+ /*! 0: don't care
+ * 1: enable comparison of extracted byte pointed by byte 0 location.
+ */
+ u32 byte0_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of extracted VLAN ID field.
+ */
+ u32 vlan_id_mask;
+ /*! 0: don't care
+ * 1: enable comparison of extracted VLAN UP field.
+ */
+ u32 vlan_up_mask;
+ /*! 0: don't care
+ * 1: enable comparison of extracted VLAN Valid field.
+ */
+ u32 vlan_valid_mask;
+ /*! This is bit mask to enable comparison the 8 bit TCI field,
+ * including the AN field.
+ * For explicit SECTAG, AN is hardware controlled. For sending
+ * packet w/ explicit SECTAG, rest of the TCI fields are directly
+ * from the SECTAG.
+ */
+ u32 tci_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of SCI
+ * Note: If this field is not 0, this means the input packet's
+ * SECTAG is explicitly tagged and MACSEC module will only update
+ * the MSDU.
+ * PN number is hardware controlled.
+ */
+ u32 sci_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of Ethertype.
+ */
+ u32 eth_type_mask;
+ /*! Mask is per-byte.
+ * 0: don't care and no SNAP header exist.
+ * 1: compare the SNAP header.
+ * If this bit is set to 1, the extracted filed will assume the
+ * SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
+ * next 5 bytes after the the LLC header is SNAP header.
+ */
+ u32 snap_mask;
+ /*! 0: don't care and no LLC header exist.
+ * 1: compare the LLC header.
+ * If this bit is set to 1, the extracted filed will assume the
+ * LLC header exist as encapsulated in 802.3 (RFC 1042). I.E. the
+ * next three bytes after the 802.3MAC header is LLC header.
+ */
+ u32 llc_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of MAC_SA.
+ */
+ u32 sa_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of MAC_DA.
+ */
+ u32 da_mask;
+ /*! Mask is per-byte. */
+ u32 pn_mask;
+ /*! Reserved. This bit should be always 0. */
+ u32 eight02dot2;
+ /*! 1: For explicit sectag case use TCI_SC from table
+ * 0: use TCI_SC from explicit sectag.
+ */
+ u32 tci_sc;
+ /*! 1: For explicit sectag case,use TCI_V,ES,SCB,E,C from table
+ * 0: use TCI_V,ES,SCB,E,C from explicit sectag.
+ */
+ u32 tci_87543;
+ /*! 1: indicates that incoming packet has explicit sectag. */
+ u32 exp_sectag_en;
+ /*! If packet matches and tagged as controlled-packet, this SC/SA
+ * index is used for later SC and SA table lookup.
+ */
+ u32 sc_idx;
+ /*! This field is used to specify how many SA entries are
+ * associated with 1 SC entry.
+ * 2'b00: 1 SC has 4 SA.
+ * SC index is equivalent to {SC_Index[4:2], 1'b0}.
+ * SA index is equivalent to {SC_Index[4:2], SC entry's current AN[1:0]
+ * 2'b10: 1 SC has 2 SA.
+ * SC index is equivalent to SC_Index[4:1]
+ * SA index is equivalent to {SC_Index[4:1], SC entry's current AN[0]}
+ * 2'b11: 1 SC has 1 SA. No SC entry exists for the specific SA.
+ * SA index is equivalent to SC_Index[4:0]
+ * Note: if specified as 2'b11, hardware AN roll over is not
+ * supported.
+ */
+ u32 sc_sa;
+ /*! 0: the packets will be sent to MAC FIFO
+ * 1: The packets will be sent to Debug/Loopback FIFO.
+ * If the above's action is drop, this bit has no meaning.
+ */
+ u32 debug;
+ /*! 0: forward to remaining modules
+ * 1: bypass the next encryption modules. This packet is considered
+ * un-control packet.
+ * 2: drop
+ * 3: Reserved.
+ */
+ u32 action;
+ /*! 0: Not valid entry. This entry is not used
+ * 1: valid entry.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Egress SC Lookup table. */
+struct aq_mss_egress_sc_record {
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 start_time;
+ /*! This is to specify when the SC was last used. Set by HW. */
+ u32 stop_time;
+ /*! This is to specify which of the SA entries are used by current HW.
+ * Note: This value need to be set by SW after reset. It will be
+ * automatically updated by HW, if AN roll over is enabled.
+ */
+ u32 curr_an;
+ /*! 0: Clear the SA Valid Bit after PN expiry.
+ * 1: Do not Clear the SA Valid bit after PN expiry of the current SA.
+ * When the Enable AN roll over is set, S/W does not need to
+ * program the new SA's and the H/W will automatically roll over
+ * between the SA's without session expiry.
+ * For normal operation, Enable AN Roll over will be set to '0'
+ * and in which case, the SW needs to program the new SA values
+ * after the current PN expires.
+ */
+ u32 an_roll;
+ /*! This is the TCI field used if packet is not explicitly tagged. */
+ u32 tci;
+ /*! This value indicates the offset where the decryption will start.
+ * [[Values of 0, 4, 8-50].
+ */
+ u32 enc_off;
+ /*! 0: Do not protect frames, all the packets will be forwarded
+ * unchanged. MIB counter (OutPktsUntagged) will be updated.
+ * 1: Protect.
+ */
+ u32 protect;
+ /*! 0: when none of the SA related to SC has inUse set.
+ * 1: when either of the SA related to the SC has inUse set.
+ * This bit is set by HW.
+ */
+ u32 recv;
+ /*! 0: H/W Clears this bit on the first use.
+ * 1: SW updates this entry, when programming the SC Table.
+ */
+ u32 fresh;
+ /*! AES Key size
+ * 00 - 128bits
+ * 01 - 192bits
+ * 10 - 256bits
+ * 11 - Reserved.
+ */
+ u32 sak_len;
+ /*! 0: Invalid SC
+ * 1: Valid SC.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Egress SA Lookup table. */
+struct aq_mss_egress_sa_record {
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 start_time;
+ /*! This is to specify when the SC was last used. Set by HW. */
+ u32 stop_time;
+ /*! This is set by SW and updated by HW to store the Next PN number
+ * used for encryption.
+ */
+ u32 next_pn;
+ /*! The Next_PN number is going to wrapped around from 0xFFFF_FFFF
+ * to 0. set by HW.
+ */
+ u32 sat_pn;
+ /*! 0: This SA is in use.
+ * 1: This SA is Fresh and set by SW.
+ */
+ u32 fresh;
+ /*! 0: Invalid SA
+ * 1: Valid SA.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Egress SA Key
+ * Lookup table.
+ */
+struct aq_mss_egress_sakey_record {
+ /*! Key for AES-GCM processing. */
+ u32 key[8];
+};
+
+/*! Represents the bitfields of a single row in the Ingress Pre-MACSec
+ * CTL Filter table.
+ */
+struct aq_mss_ingress_prectlf_record {
+ /*! This is used to store the 48 bit value used to compare SA, DA
+ * or halfDA+half SA value.
+ */
+ u32 sa_da[2];
+ /*! This is used to store the 16 bit ethertype value used for
+ * comparison.
+ */
+ u32 eth_type;
+ /*! The match mask is per-nibble. 0 means don't care, i.e. every
+ * value will match successfully. The total data is 64 bit, i.e.
+ * 16 nibbles masks.
+ */
+ u32 match_mask;
+ /*! 0: No compare, i.e. This entry is not used
+ * 1: compare DA only
+ * 2: compare SA only
+ * 3: compare half DA + half SA
+ * 4: compare ether type only
+ * 5: compare DA + ethertype
+ * 6: compare SA + ethertype
+ * 7: compare DA+ range.
+ */
+ u32 match_type;
+ /*! 0: Bypass the remaining modules if matched.
+ * 1: Forward to next module for more classifications.
+ */
+ u32 action;
+};
+
+/*! Represents the bitfields of a single row in the Ingress Pre-MACSec
+ * Packet Classifier table.
+ */
+struct aq_mss_ingress_preclass_record {
+ /*! The 64 bit SCI field used to compare with extracted value.
+ * Should have SCI value in case TCI[SCI_SEND] == 0. This will be
+ * used for ICV calculation.
+ */
+ u32 sci[2];
+ /*! The 8 bit TCI field used to compare with extracted value. */
+ u32 tci;
+ /*! 8 bit encryption offset. */
+ u32 encr_offset;
+ /*! The 16 bit Ethertype (in the clear) field used to compare with
+ * extracted value.
+ */
+ u32 eth_type;
+ /*! This is to specify the 40bit SNAP header if the SNAP header's
+ * mask is enabled.
+ */
+ u32 snap[2];
+ /*! This is to specify the 24bit LLC header if the LLC header's
+ * mask is enabled.
+ */
+ u32 llc;
+ /*! The 48 bit MAC_SA field used to compare with extracted value. */
+ u32 mac_sa[2];
+ /*! The 48 bit MAC_DA field used to compare with extracted value. */
+ u32 mac_da[2];
+ /*! 0: this is to compare with non-LPBK packet
+ * 1: this is to compare with LPBK packet.
+ * This value is used to compare with a controlled-tag which goes
+ * with the packet when looped back from Egress port.
+ */
+ u32 lpbk_packet;
+ /*! The value of this bit mask will affects how the SC index and SA
+ * index created.
+ * 2'b00: 1 SC has 4 SA.
+ * SC index is equivalent to {SC_Index[4:2], 1'b0}.
+ * SA index is equivalent to {SC_Index[4:2], SECTAG's AN[1:0]}
+ * Here AN bits are not compared.
+ * 2'b10: 1 SC has 2 SA.
+ * SC index is equivalent to SC_Index[4:1]
+ * SA index is equivalent to {SC_Index[4:1], SECTAG's AN[0]}
+ * Compare AN[1] field only
+ * 2'b11: 1 SC has 1 SA. No SC entry exists for the specific SA.
+ * SA index is equivalent to SC_Index[4:0]
+ * AN[1:0] bits are compared.
+ * NOTE: This design is to supports different usage of AN. User
+ * can either ping-pong buffer 2 SA by using only the AN[0] bit.
+ * Or use 4 SA per SC by use AN[1:0] bits. Or even treat each SA
+ * as independent. i.e. AN[1:0] is just another matching pointer
+ * to select SA.
+ */
+ u32 an_mask;
+ /*! This is bit mask to enable comparison the upper 6 bits TCI
+ * field, which does not include the AN field.
+ * 0: don't compare
+ * 1: enable comparison of the bits.
+ */
+ u32 tci_mask;
+ /*! 0: don't care
+ * 1: enable comparison of SCI.
+ */
+ u32 sci_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of Ethertype.
+ */
+ u32 eth_type_mask;
+ /*! Mask is per-byte.
+ * 0: don't care and no SNAP header exist.
+ * 1: compare the SNAP header.
+ * If this bit is set to 1, the extracted filed will assume the
+ * SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
+ * next 5 bytes after the the LLC header is SNAP header.
+ */
+ u32 snap_mask;
+ /*! Mask is per-byte.
+ * 0: don't care and no LLC header exist.
+ * 1: compare the LLC header.
+ * If this bit is set to 1, the extracted filed will assume the
+ * LLC header exist as encapsulated in 802.3 (RFC 1042). I.E. the
+ * next three bytes after the 802.3MAC header is LLC header.
+ */
+ u32 llc_mask;
+ /*! Reserved. This bit should be always 0. */
+ u32 _802_2_encapsulate;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of MAC_SA.
+ */
+ u32 sa_mask;
+ /*! Mask is per-byte.
+ * 0: don't care
+ * 1: enable comparison of MAC_DA.
+ */
+ u32 da_mask;
+ /*! 0: don't care
+ * 1: enable checking if this is loopback packet or not.
+ */
+ u32 lpbk_mask;
+ /*! If packet matches and tagged as controlled-packet. This SC/SA
+ * index is used for later SC and SA table lookup.
+ */
+ u32 sc_idx;
+ /*! 0: the packets will be sent to MAC FIFO
+ * 1: The packets will be sent to Debug/Loopback FIFO.
+ * If the above's action is drop. This bit has no meaning.
+ */
+ u32 proc_dest;
+ /*! 0: Process: Forward to next two modules for 802.1AE decryption.
+ * 1: Process but keep SECTAG: Forward to next two modules for
+ * 802.1AE decryption but keep the MACSEC header with added error
+ * code information. ICV will be stripped for all control packets.
+ * 2: Bypass: Bypass the next two decryption modules but processed
+ * by post-classification.
+ * 3: Drop: drop this packet and update counts accordingly.
+ */
+ u32 action;
+ /*! 0: This is a controlled-port packet if matched.
+ * 1: This is an uncontrolled-port packet if matched.
+ */
+ u32 ctrl_unctrl;
+ /*! Use the SCI value from the Table if 'SC' bit of the input
+ * packet is not present.
+ */
+ u32 sci_from_table;
+ /*! Reserved. */
+ u32 reserved;
+ /*! 0: Not valid entry. This entry is not used
+ * 1: valid entry.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Ingress SC Lookup table. */
+struct aq_mss_ingress_sc_record {
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 stop_time;
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 start_time;
+ /*! 0: Strict
+ * 1: Check
+ * 2: Disabled.
+ */
+ u32 validate_frames;
+ /*! 1: Replay control enabled.
+ * 0: replay control disabled.
+ */
+ u32 replay_protect;
+ /*! This is to specify the window range for anti-replay. Default is 0.
+ * 0: is strict order enforcement.
+ */
+ u32 anti_replay_window;
+ /*! 0: when none of the SA related to SC has inUse set.
+ * 1: when either of the SA related to the SC has inUse set.
+ * This bit is set by HW.
+ */
+ u32 receiving;
+ /*! 0: when hardware processed the SC for the first time, it clears
+ * this bit
+ * 1: This bit is set by SW, when it sets up the SC.
+ */
+ u32 fresh;
+ /*! 0: The AN number will not automatically roll over if Next_PN is
+ * saturated.
+ * 1: The AN number will automatically roll over if Next_PN is
+ * saturated.
+ * Rollover is valid only after expiry. Normal roll over between
+ * SA's should be normal process.
+ */
+ u32 an_rol;
+ /*! Reserved. */
+ u32 reserved;
+ /*! 0: Invalid SC
+ * 1: Valid SC.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Ingress SA Lookup table. */
+struct aq_mss_ingress_sa_record {
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 stop_time;
+ /*! This is to specify when the SC was first used. Set by HW. */
+ u32 start_time;
+ /*! This is updated by HW to store the expected NextPN number for
+ * anti-replay.
+ */
+ u32 next_pn;
+ /*! The Next_PN number is going to wrapped around from 0XFFFF_FFFF
+ * to 0. set by HW.
+ */
+ u32 sat_nextpn;
+ /*! 0: This SA is not yet used.
+ * 1: This SA is inUse.
+ */
+ u32 in_use;
+ /*! 0: when hardware processed the SC for the first time, it clears
+ * this timer
+ * 1: This bit is set by SW, when it sets up the SC.
+ */
+ u32 fresh;
+ /*! Reserved. */
+ u32 reserved;
+ /*! 0: Invalid SA.
+ * 1: Valid SA.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Ingress SA Key
+ * Lookup table.
+ */
+struct aq_mss_ingress_sakey_record {
+ /*! Key for AES-GCM processing. */
+ u32 key[8];
+ /*! AES key size
+ * 00 - 128bits
+ * 01 - 192bits
+ * 10 - 256bits
+ * 11 - reserved.
+ */
+ u32 key_len;
+};
+
+/*! Represents the bitfields of a single row in the Ingress Post-
+ * MACSec Packet Classifier table.
+ */
+struct aq_mss_ingress_postclass_record {
+ /*! The 8 bit value used to compare with extracted value for byte 0. */
+ u32 byte0;
+ /*! The 8 bit value used to compare with extracted value for byte 1. */
+ u32 byte1;
+ /*! The 8 bit value used to compare with extracted value for byte 2. */
+ u32 byte2;
+ /*! The 8 bit value used to compare with extracted value for byte 3. */
+ u32 byte3;
+ /*! Ethertype in the packet. */
+ u32 eth_type;
+ /*! Ether Type value > 1500 (0x5dc). */
+ u32 eth_type_valid;
+ /*! VLAN ID after parsing. */
+ u32 vlan_id;
+ /*! VLAN priority after parsing. */
+ u32 vlan_up;
+ /*! Valid VLAN coding. */
+ u32 vlan_valid;
+ /*! SA index. */
+ u32 sai;
+ /*! SAI hit, i.e. controlled packet. */
+ u32 sai_hit;
+ /*! Mask for payload ethertype field. */
+ u32 eth_type_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte3_location;
+ /*! Mask for Byte Offset 3. */
+ u32 byte3_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte2_location;
+ /*! Mask for Byte Offset 2. */
+ u32 byte2_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte1_location;
+ /*! Mask for Byte Offset 1. */
+ u32 byte1_mask;
+ /*! 0~63: byte location used extracted by packets comparator, which
+ * can be anything from the first 64 bytes of the MAC packets.
+ * This byte location counted from MAC' DA address. i.e. set to 0
+ * will point to byte 0 of DA address.
+ */
+ u32 byte0_location;
+ /*! Mask for Byte Offset 0. */
+ u32 byte0_mask;
+ /*! Mask for Ethertype valid field. Indicates 802.3 vs. Other. */
+ u32 eth_type_valid_mask;
+ /*! Mask for VLAN ID field. */
+ u32 vlan_id_mask;
+ /*! Mask for VLAN UP field. */
+ u32 vlan_up_mask;
+ /*! Mask for VLAN valid field. */
+ u32 vlan_valid_mask;
+ /*! Mask for SAI. */
+ u32 sai_mask;
+ /*! Mask for SAI_HIT. */
+ u32 sai_hit_mask;
+ /*! Action if only first level matches and second level does not.
+ * 0: pass
+ * 1: drop (fail).
+ */
+ u32 firstlevel_actions;
+ /*! Action if both first and second level matched.
+ * 0: pass
+ * 1: drop (fail).
+ */
+ u32 secondlevel_actions;
+ /*! Reserved. */
+ u32 reserved;
+ /*! 0: Not valid entry. This entry is not used
+ * 1: valid entry.
+ */
+ u32 valid;
+};
+
+/*! Represents the bitfields of a single row in the Ingress Post-
+ * MACSec CTL Filter table.
+ */
+struct aq_mss_ingress_postctlf_record {
+ /*! This is used to store the 48 bit value used to compare SA, DA
+ * or halfDA+half SA value.
+ */
+ u32 sa_da[2];
+ /*! This is used to store the 16 bit ethertype value used for
+ * comparison.
+ */
+ u32 eth_type;
+ /*! The match mask is per-nibble. 0 means don't care, i.e. every
+ * value will match successfully. The total data is 64 bit, i.e.
+ * 16 nibbles masks.
+ */
+ u32 match_mask;
+ /*! 0: No compare, i.e. This entry is not used
+ * 1: compare DA only
+ * 2: compare SA only
+ * 3: compare half DA + half SA
+ * 4: compare ether type only
+ * 5: compare DA + ethertype
+ * 6: compare SA + ethertype
+ * 7: compare DA+ range.
+ */
+ u32 match_type;
+ /*! 0: Bypass the remaining modules if matched.
+ * 1: Forward to next module for more classifications.
+ */
+ u32 action;
+};
+
+/*! Represents the Egress MIB counters for a single SC. Counters are
+ * 64 bits, lower 32 bits in field[0].
+ */
+struct aq_mss_egress_sc_counters {
+ /*! The number of integrity protected but not encrypted packets
+ * for this transmitting SC.
+ */
+ u32 sc_protected_pkts[2];
+ /*! The number of integrity protected and encrypted packets for
+ * this transmitting SC.
+ */
+ u32 sc_encrypted_pkts[2];
+ /*! The number of plain text octets that are integrity protected
+ * but not encrypted on the transmitting SC.
+ */
+ u32 sc_protected_octets[2];
+ /*! The number of plain text octets that are integrity protected
+ * and encrypted on the transmitting SC.
+ */
+ u32 sc_encrypted_octets[2];
+};
+
+/*! Represents the Egress MIB counters for a single SA. Counters are
+ * 64 bits, lower 32 bits in field[0].
+ */
+struct aq_mss_egress_sa_counters {
+ /*! The number of dropped packets for this transmitting SA. */
+ u32 sa_hit_drop_redirect[2];
+ /*! TODO */
+ u32 sa_protected2_pkts[2];
+ /*! The number of integrity protected but not encrypted packets
+ * for this transmitting SA.
+ */
+ u32 sa_protected_pkts[2];
+ /*! The number of integrity protected and encrypted packets for
+ * this transmitting SA.
+ */
+ u32 sa_encrypted_pkts[2];
+};
+
+/*! Represents the common Egress MIB counters; the counter not
+ * associated with a particular SC/SA. Counters are 64 bits, lower 32
+ * bits in field[0].
+ */
+struct aq_mss_egress_common_counters {
+ /*! The number of transmitted packets classified as MAC_CTL packets. */
+ u32 ctl_pkt[2];
+ /*! The number of transmitted packets that did not match any rows
+ * in the Egress Packet Classifier table.
+ */
+ u32 unknown_sa_pkts[2];
+ /*! The number of transmitted packets where the SC table entry has
+ * protect=0 (so packets are forwarded unchanged).
+ */
+ u32 untagged_pkts[2];
+ /*! The number of transmitted packets discarded because the packet
+ * length is greater than the ifMtu of the Common Port interface.
+ */
+ u32 too_long[2];
+ /*! The number of transmitted packets for which table memory was
+ * affected by an ECC error during processing.
+ */
+ u32 ecc_error_pkts[2];
+ /*! The number of transmitted packets for where the matched row in
+ * the Egress Packet Classifier table has action=drop.
+ */
+ u32 unctrl_hit_drop_redir[2];
+};
+
+/*! Represents the Ingress MIB counters for a single SA. Counters are
+ * 64 bits, lower 32 bits in field[0].
+ */
+struct aq_mss_ingress_sa_counters {
+ /*! For this SA, the number of received packets without a SecTAG. */
+ u32 untagged_hit_pkts[2];
+ /*! For this SA, the number of received packets that were dropped. */
+ u32 ctrl_hit_drop_redir_pkts[2];
+ /*! For this SA which is not currently in use, the number of
+ * received packets that have been discarded, and have either the
+ * packets encrypted or the matched row in the Ingress SC Lookup
+ * table has validate_frames=Strict.
+ */
+ u32 not_using_sa[2];
+ /*! For this SA which is not currently in use, the number of
+ * received, unencrypted, packets with the matched row in the
+ * Ingress SC Lookup table has validate_frames!=Strict.
+ */
+ u32 unused_sa[2];
+ /*! For this SA, the number discarded packets with the condition
+ * that the packets are not valid and one of the following
+ * conditions are true: either the matched row in the Ingress SC
+ * Lookup table has validate_frames=Strict or the packets
+ * encrypted.
+ */
+ u32 not_valid_pkts[2];
+ /*! For this SA, the number of packets with the condition that the
+ * packets are not valid and the matched row in the Ingress SC
+ * Lookup table has validate_frames=Check.
+ */
+ u32 invalid_pkts[2];
+ /*! For this SA, the number of validated packets. */
+ u32 ok_pkts[2];
+ /*! For this SC, the number of received packets that have been
+ * discarded with the condition: the matched row in the Ingress
+ * SC Lookup table has replay_protect=1 and the PN of the packet
+ * is lower than the lower bound replay check PN.
+ */
+ u32 late_pkts[2];
+ /*! For this SA, the number of packets with the condition that the
+ * PN of the packets is lower than the lower bound replay
+ * protection PN.
+ */
+ u32 delayed_pkts[2];
+ /*! For this SC, the number of packets with the following condition:
+ * - the matched row in the Ingress SC Lookup table has
+ * replay_protect=0 or
+ * - the matched row in the Ingress SC Lookup table has
+ * replay_protect=1 and the packet is not encrypted and the
+ * integrity check has failed or
+ * - the matched row in the Ingress SC Lookup table has
+ * replay_protect=1 and the packet is encrypted and integrity
+ * check has failed.
+ */
+ u32 unchecked_pkts[2];
+ /*! The number of octets of plaintext recovered from received
+ * packets that were integrity protected but not encrypted.
+ */
+ u32 validated_octets[2];
+ /*! The number of octets of plaintext recovered from received
+ * packets that were integrity protected and encrypted.
+ */
+ u32 decrypted_octets[2];
+};
+
+/*! Represents the common Ingress MIB counters; the counter not
+ * associated with a particular SA. Counters are 64 bits, lower 32
+ * bits in field[0].
+ */
+struct aq_mss_ingress_common_counters {
+ /*! The number of received packets classified as MAC_CTL packets. */
+ u32 ctl_pkts[2];
+ /*! The number of received packets with the MAC security tag
+ * (SecTAG), not matching any rows in the Ingress Pre-MACSec
+ * Packet Classifier table.
+ */
+ u32 tagged_miss_pkts[2];
+ /*! The number of received packets without the MAC security tag
+ * (SecTAG), not matching any rows in the Ingress Pre-MACSec
+ * Packet Classifier table.
+ */
+ u32 untagged_miss_pkts[2];
+ /*! The number of received packets discarded without the MAC
+ * security tag (SecTAG) and with the matched row in the Ingress
+ * SC Lookup table having validate_frames=Strict.
+ */
+ u32 notag_pkts[2];
+ /*! The number of received packets without the MAC security tag
+ * (SecTAG) and with the matched row in the Ingress SC Lookup
+ * table having validate_frames!=Strict.
+ */
+ u32 untagged_pkts[2];
+ /*! The number of received packets discarded with an invalid
+ * SecTAG or a zero value PN or an invalid ICV.
+ */
+ u32 bad_tag_pkts[2];
+ /*! The number of received packets discarded with unknown SCI
+ * information with the condition:
+ * the matched row in the Ingress SC Lookup table has
+ * validate_frames=Strict or the C bit in the SecTAG is set.
+ */
+ u32 no_sci_pkts[2];
+ /*! The number of received packets with unknown SCI with the condition:
+ * The matched row in the Ingress SC Lookup table has
+ * validate_frames!=Strict and the C bit in the SecTAG is not set.
+ */
+ u32 unknown_sci_pkts[2];
+ /*! The number of received packets by the controlled port service
+ * that passed the Ingress Post-MACSec Packet Classifier table
+ * check.
+ */
+ u32 ctrl_prt_pass_pkts[2];
+ /*! The number of received packets by the uncontrolled port
+ * service that passed the Ingress Post-MACSec Packet Classifier
+ * table check.
+ */
+ u32 unctrl_prt_pass_pkts[2];
+ /*! The number of received packets by the controlled port service
+ * that failed the Ingress Post-MACSec Packet Classifier table
+ * check.
+ */
+ u32 ctrl_prt_fail_pkts[2];
+ /*! The number of received packets by the uncontrolled port
+ * service that failed the Ingress Post-MACSec Packet Classifier
+ * table check.
+ */
+ u32 unctrl_prt_fail_pkts[2];
+ /*! The number of received packets discarded because the packet
+ * length is greater than the ifMtu of the Common Port interface.
+ */
+ u32 too_long_pkts[2];
+ /*! The number of received packets classified as MAC_CTL by the
+ * Ingress Post-MACSec CTL Filter table.
+ */
+ u32 igpoc_ctl_pkts[2];
+ /*! The number of received packets for which table memory was
+ * affected by an ECC error during processing.
+ */
+ u32 ecc_error_pkts[2];
+ /*! The number of received packets by the uncontrolled port
+ * service that were dropped.
+ */
+ u32 unctrl_hit_drop_redir[2];
+};
+
+#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
deleted file mode 100644
index 597654b51e01..000000000000
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
- */
-
-#ifndef VER_H
-#define VER_H
-
-#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
-
-#endif /* VER_H */
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index d9efbc8d783b..d820ae03a966 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -130,7 +130,6 @@ struct arc_emac_mdio_bus_data {
*/
struct arc_emac_priv {
const char *drv_name;
- const char *drv_version;
void (*set_mac_speed)(void *priv, unsigned int speed);
/* Devices */
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
index 539166112993..1c7736b7eaf7 100644
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -15,7 +15,6 @@
#include "emac.h"
#define DRV_NAME "emac_arc"
-#define DRV_VERSION "1.0"
static int emac_arc_probe(struct platform_device *pdev)
{
@@ -36,7 +35,6 @@ static int emac_arc_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->drv_name = DRV_NAME;
- priv->drv_version = DRV_VERSION;
err = of_get_phy_mode(dev->of_node, &interface);
if (err) {
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 17bda4e8cc45..38cd968b6a3b 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -92,7 +92,6 @@ static void arc_emac_get_drvinfo(struct net_device *ndev,
struct arc_emac_priv *priv = netdev_priv(ndev);
strlcpy(info->driver, priv->drv_name, sizeof(info->driver));
- strlcpy(info->version, priv->drv_version, sizeof(info->version));
}
static const struct ethtool_ops arc_emac_ethtool_ops = {
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index aae231c5224f..48ecdf15eddc 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -16,7 +16,6 @@
#include "emac.h"
#define DRV_NAME "rockchip_emac"
-#define DRV_VERSION "1.1"
struct emac_rockchip_soc_data {
unsigned int grf_offset;
@@ -112,7 +111,6 @@ static int emac_rockchip_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->emac.drv_name = DRV_NAME;
- priv->emac.drv_version = DRV_VERSION;
priv->emac.set_mac_speed = emac_rockchip_set_mac_speed;
err = of_get_phy_mode(dev->of_node, &interface);
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 0058051ba925..2720bde5034e 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -20,7 +20,7 @@ if NET_VENDOR_ATHEROS
config AG71XX
tristate "Atheros AR7XXX/AR9XXX built-in ethernet mac support"
depends on ATH79
- select PHYLIB
+ select PHYLINK
help
If you wish to compile a kernel for AR7XXX/91XXX and enable
ethernet support, then you should always answer Y to this.
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index e95687a780fb..02b7705393ca 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -32,6 +32,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
+#include <linux/phylink.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/clk.h>
@@ -314,6 +315,8 @@ struct ag71xx {
dma_addr_t stop_desc_dma;
phy_interface_t phy_if_mode;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
struct delayed_work restart_work;
struct timer_list oom_timer;
@@ -845,24 +848,91 @@ static void ag71xx_hw_start(struct ag71xx *ag)
netif_wake_queue(ag->ndev);
}
-static void ag71xx_link_adjust(struct ag71xx *ag, bool update)
+static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- struct phy_device *phydev = ag->ndev->phydev;
- u32 cfg2;
- u32 ifctl;
- u32 fifo5;
+ struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
- if (!phydev->link && update) {
- ag71xx_hw_stop(ag);
+ if (phylink_autoneg_inband(mode))
return;
- }
if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
ag71xx_fast_reset(ag);
+ if (ag->tx_ring.desc_split) {
+ ag->fifodata[2] &= 0xffff;
+ ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
+ }
+
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
+}
+
+static void ag71xx_mac_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_GMII &&
+ state->interface != PHY_INTERFACE_MODE_MII) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ phylink_set(mask, MII);
+
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ if (state->interface == PHY_INTERFACE_MODE_NA ||
+ state->interface == PHY_INTERFACE_MODE_GMII) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ }
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ state->link = 0;
+}
+
+static void ag71xx_mac_an_restart(struct phylink_config *config)
+{
+ /* Not Supported */
+}
+
+static void ag71xx_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
+
+ ag71xx_hw_stop(ag);
+}
+
+static void ag71xx_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
+ u32 cfg2;
+ u32 ifctl;
+ u32 fifo5;
+
cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
- cfg2 |= (phydev->duplex) ? MAC_CFG2_FDX : 0;
+ cfg2 |= duplex ? MAC_CFG2_FDX : 0;
ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
ifctl &= ~(MAC_IFCTL_SPEED);
@@ -870,7 +940,7 @@ static void ag71xx_link_adjust(struct ag71xx *ag, bool update)
fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
fifo5 &= ~FIFO_CFG5_BM;
- switch (phydev->speed) {
+ switch (speed) {
case SPEED_1000:
cfg2 |= MAC_CFG2_IF_1000;
fifo5 |= FIFO_CFG5_BM;
@@ -883,72 +953,38 @@ static void ag71xx_link_adjust(struct ag71xx *ag, bool update)
cfg2 |= MAC_CFG2_IF_10_100;
break;
default:
- WARN(1, "not supported speed %i\n", phydev->speed);
return;
}
- if (ag->tx_ring.desc_split) {
- ag->fifodata[2] &= 0xffff;
- ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
- }
-
- ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
-
ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
ag71xx_hw_start(ag);
-
- if (update)
- phy_print_status(phydev);
}
-static void ag71xx_phy_link_adjust(struct net_device *ndev)
-{
- struct ag71xx *ag = netdev_priv(ndev);
-
- ag71xx_link_adjust(ag, true);
-}
+static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
+ .validate = ag71xx_mac_validate,
+ .mac_pcs_get_state = ag71xx_mac_pcs_get_state,
+ .mac_an_restart = ag71xx_mac_an_restart,
+ .mac_config = ag71xx_mac_config,
+ .mac_link_down = ag71xx_mac_link_down,
+ .mac_link_up = ag71xx_mac_link_up,
+};
-static int ag71xx_phy_connect(struct ag71xx *ag)
+static int ag71xx_phylink_setup(struct ag71xx *ag)
{
- struct device_node *np = ag->pdev->dev.of_node;
- struct net_device *ndev = ag->ndev;
- struct device_node *phy_node;
- struct phy_device *phydev;
- int ret;
-
- if (of_phy_is_fixed_link(np)) {
- ret = of_phy_register_fixed_link(np);
- if (ret < 0) {
- netif_err(ag, probe, ndev, "Failed to register fixed PHY link: %d\n",
- ret);
- return ret;
- }
+ struct phylink *phylink;
- phy_node = of_node_get(np);
- } else {
- phy_node = of_parse_phandle(np, "phy-handle", 0);
- }
+ ag->phylink_config.dev = &ag->ndev->dev;
+ ag->phylink_config.type = PHYLINK_NETDEV;
- if (!phy_node) {
- netif_err(ag, probe, ndev, "Could not find valid phy node\n");
- return -ENODEV;
- }
-
- phydev = of_phy_connect(ag->ndev, phy_node, ag71xx_phy_link_adjust,
- 0, ag->phy_if_mode);
-
- of_node_put(phy_node);
-
- if (!phydev) {
- netif_err(ag, probe, ndev, "Could not connect to PHY device\n");
- return -ENODEV;
- }
-
- phy_attached_info(phydev);
+ phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
+ ag->phy_if_mode, &ag71xx_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+ ag->phylink = phylink;
return 0;
}
@@ -1239,6 +1275,13 @@ static int ag71xx_open(struct net_device *ndev)
unsigned int max_frame_len;
int ret;
+ ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0);
+ if (ret) {
+ netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
+ ret);
+ goto err;
+ }
+
max_frame_len = ag71xx_max_frame_len(ndev->mtu);
ag->rx_buf_size =
SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
@@ -1251,11 +1294,7 @@ static int ag71xx_open(struct net_device *ndev)
if (ret)
goto err;
- ret = ag71xx_phy_connect(ag);
- if (ret)
- goto err;
-
- phy_start(ndev->phydev);
+ phylink_start(ag->phylink);
return 0;
@@ -1268,8 +1307,8 @@ static int ag71xx_stop(struct net_device *ndev)
{
struct ag71xx *ag = netdev_priv(ndev);
- phy_stop(ndev->phydev);
- phy_disconnect(ndev->phydev);
+ phylink_stop(ag->phylink);
+ phylink_disconnect_phy(ag->phylink);
ag71xx_hw_disable(ag);
return 0;
@@ -1414,13 +1453,14 @@ static void ag71xx_restart_work_func(struct work_struct *work)
{
struct ag71xx *ag = container_of(work, struct ag71xx,
restart_work.work);
- struct net_device *ndev = ag->ndev;
rtnl_lock();
ag71xx_hw_disable(ag);
ag71xx_hw_enable(ag);
- if (ndev->phydev->link)
- ag71xx_link_adjust(ag, false);
+
+ phylink_stop(ag->phylink);
+ phylink_start(ag->phylink);
+
rtnl_unlock();
}
@@ -1759,6 +1799,12 @@ static int ag71xx_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
+ err = ag71xx_phylink_setup(ag);
+ if (err) {
+ netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
+ goto err_mdio_remove;
+ }
+
err = register_netdev(ndev);
if (err) {
netif_err(ag, probe, ndev, "unable to register net device\n");
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 1dcbc486eca9..b9b4edb913c1 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1416,10 +1416,7 @@ static int alx_tso(struct sk_buff *skb, struct alx_txd *first)
0, IPPROTO_TCP, 0);
first->word1 |= 1 << TPD_IPV4_SHIFT;
} else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
/* LSOv2: the first TPD only provides the packet length */
first->adrl.l.pkt_len = skb->len;
first->word1 |= 1 << TPD_LSO_V2_SHIFT;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index 60b2febd7315..a0562a90fb6d 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -583,7 +583,6 @@ struct atl1c_adapter {
readl(((a)->hw_addr + reg) + ((offset) << 2)))
extern char atl1c_driver_name[];
-extern char atl1c_driver_version[];
void atl1c_reinit_locked(struct atl1c_adapter *adapter);
s32 atl1c_reset_hw(struct atl1c_hw *hw);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index b5a70a36fa04..e2eb7b8c63a0 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -221,8 +221,6 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
struct atl1c_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, atl1c_driver_version,
- sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 4c0b1f8551dd..00bd7bd55794 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -8,9 +8,7 @@
#include "atl1c.h"
-#define ATL1C_DRV_VERSION "1.0.1.1-NAPI"
char atl1c_driver_name[] = "atl1c";
-char atl1c_driver_version[] = ATL1C_DRV_VERSION;
/*
* atl1c_pci_tbl - PCI Device ID Table
@@ -37,7 +35,6 @@ MODULE_AUTHOR("Jie Yang");
MODULE_AUTHOR("Qualcomm Atheros Inc., <nic-devel@qualcomm.com>");
MODULE_DESCRIPTION("Qualcomm Atheros 100/1000M Ethernet Network Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(ATL1C_DRV_VERSION);
static int atl1c_stop_mac(struct atl1c_hw *hw);
static void atl1c_disable_l0s_l1(struct atl1c_hw *hw);
@@ -2025,10 +2022,8 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
"IPV6 tso with zero data??\n");
goto check_sum;
} else
- tcp_hdr(skb)->check = ~csum_ipv6_magic(
- &ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
+
etpd->word1 |= 1 << TPD_LSO_EN_SHIFT;
etpd->word1 |= 1 << TPD_LSO_VER_SHIFT;
etpd->pkt_len = cpu_to_le32(skb->len);
@@ -2644,8 +2639,6 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_register;
}
- if (netif_msg_probe(adapter))
- dev_info(&pdev->dev, "version %s\n", ATL1C_DRV_VERSION);
cards_found++;
return 0;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index e9893da50995..9fcad783c939 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -482,7 +482,6 @@ struct atl1e_adapter {
readl(((a)->hw_addr + reg) + ((offset) << 2)))
extern char atl1e_driver_name[];
-extern char atl1e_driver_version[];
void atl1e_check_options(struct atl1e_adapter *adapter);
int atl1e_up(struct atl1e_adapter *adapter);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index c6b9e7ea8e38..0cbde352d1ba 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -307,8 +307,6 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
struct atl1e_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, atl1e_driver_version,
- sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index e0d89942d537..223ef846123e 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -8,10 +8,7 @@
#include "atl1e.h"
-#define DRV_VERSION "1.0.0.7-NAPI"
-
char atl1e_driver_name[] = "ATL1E";
-char atl1e_driver_version[] = DRV_VERSION;
#define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026
/*
* atl1e_pci_tbl - PCI Device ID Table
@@ -33,7 +30,6 @@ MODULE_DEVICE_TABLE(pci, atl1e_pci_tbl);
MODULE_AUTHOR("Atheros Corporation, <xiong.huang@atheros.com>, Jie Yang <jie.yang@atheros.com>");
MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index b498fd6a47d0..271e7034fa70 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -65,12 +65,10 @@
#include "atl1.h"
-#define ATLX_DRIVER_VERSION "2.1.3"
MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, "
"Chris Snook <csnook@redhat.com>, "
"Jay Cliburn <jcliburn@gmail.com>");
MODULE_LICENSE("GPL");
-MODULE_VERSION(ATLX_DRIVER_VERSION);
/* Temporary hack for merging atl1 and atl2 */
#include "atlx.c"
@@ -2965,8 +2963,6 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get device revision number */
adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
(REG_MASTER_CTRL + 2));
- if (netif_msg_probe(adapter))
- dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION);
/* set default ring resource counts */
adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
@@ -3344,8 +3340,6 @@ static void atl1_get_drvinfo(struct net_device *netdev,
struct atl1_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, ATLX_DRIVER_VERSION,
- sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index b81a4e0c5b57..c915852b8892 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -36,18 +36,12 @@
#include "atl2.h"
-#define ATL2_DRV_VERSION "2.2.3"
-
static const char atl2_driver_name[] = "atl2";
-static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
-static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
-static const char atl2_driver_version[] = ATL2_DRV_VERSION;
static const struct ethtool_ops atl2_ethtool_ops;
MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(ATL2_DRV_VERSION);
/*
* atl2_pci_tbl - PCI Device ID Table
@@ -1688,9 +1682,6 @@ static struct pci_driver atl2_driver = {
*/
static int __init atl2_init_module(void)
{
- printk(KERN_INFO "%s - version %s\n", atl2_driver_string,
- atl2_driver_version);
- printk(KERN_INFO "%s\n", atl2_copyright);
return pci_register_driver(&atl2_driver);
}
module_init(atl2_init_module);
@@ -2011,8 +2002,6 @@ static void atl2_get_drvinfo(struct net_device *netdev,
struct atl2_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, atl2_driver_version,
- sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index a780b7215021..6fb620e25208 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -40,7 +40,6 @@
#include "b44.h"
#define DRV_MODULE_NAME "b44"
-#define DRV_MODULE_VERSION "2.0"
#define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
#define B44_DEF_MSG_ENABLE \
@@ -97,7 +96,6 @@
MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
module_param(b44_debug, int, 0);
@@ -1791,7 +1789,6 @@ static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *inf
struct ssb_bus *bus = bp->sdev->bus;
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
switch (bus->bustype) {
case SSB_BUSTYPE_PCI:
strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
@@ -2347,8 +2344,6 @@ static int b44_init_one(struct ssb_device *sdev,
instance++;
- pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
-
dev = alloc_etherdev(sizeof(*bp));
if (!dev) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 620cd3fc1fbc..916824cca3fd 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -22,7 +22,6 @@
#include "bcm63xx_enet.h"
static char bcm_enet_driver_name[] = "bcm63xx_enet";
-static char bcm_enet_driver_version[] = "1.0";
static int copybreak __read_mostly = 128;
module_param(copybreak, int, 0);
@@ -1304,9 +1303,6 @@ static void bcm_enet_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, bcm_enet_driver_version,
- sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
}
@@ -1706,7 +1702,6 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (!res_irq || !res_irq_rx || !res_irq_tx)
return -ENODEV;
- ret = 0;
dev = alloc_etherdev(sizeof(*priv));
if (!dev)
return -ENOMEM;
@@ -2529,10 +2524,8 @@ static int bcm_enetsw_get_sset_count(struct net_device *netdev,
static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
- strncpy(drvinfo->version, bcm_enet_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, "bcm63xx", 32);
+ strncpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
+ strncpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
}
static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 15b31cddc054..af7ce5c5488c 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -287,7 +287,6 @@ static void bcm_sysport_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, "0.1", sizeof(info->version));
strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
}
@@ -624,8 +623,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
return -EINVAL;
if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
- (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) ||
- ec->use_adaptive_tx_coalesce)
+ (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
return -EINVAL;
for (i = 0; i < dev->num_tx_queues; i++)
@@ -2210,6 +2208,9 @@ static int bcm_sysport_set_rxnfc(struct net_device *dev,
}
static const struct ethtool_ops bcm_sysport_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = bcm_sysport_get_drvinfo,
.get_msglevel = bcm_sysport_get_msglvl,
.set_msglevel = bcm_sysport_set_msglvl,
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 1bb07a5d82c9..98ec1b8a7d8e 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1248,6 +1248,14 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
return 0;
}
+static int bgmac_change_mtu(struct net_device *net_dev, int mtu)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+
+ bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + mtu);
+ return 0;
+}
+
static const struct net_device_ops bgmac_netdev_ops = {
.ndo_open = bgmac_open,
.ndo_stop = bgmac_stop,
@@ -1256,6 +1264,7 @@ static const struct net_device_ops bgmac_netdev_ops = {
.ndo_set_mac_address = bgmac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_change_mtu = bgmac_change_mtu,
};
/**************************************************
@@ -1530,6 +1539,9 @@ int bgmac_enet_probe(struct bgmac *bgmac)
net_dev->hw_features = net_dev->features;
net_dev->vlan_features = net_dev->features;
+ /* Omit FCS from max MTU size */
+ net_dev->max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN;
+
err = register_netdev(bgmac->net_dev);
if (err) {
dev_err(bgmac->dev, "Cannot register net device\n");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 40d02fec2747..351c598a3ec6 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -351,7 +351,7 @@
#define BGMAC_DESC_CTL0_IOC 0x20000000 /* IRQ on complete */
#define BGMAC_DESC_CTL0_EOF 0x40000000 /* End of frame */
#define BGMAC_DESC_CTL0_SOF 0x80000000 /* Start of frame */
-#define BGMAC_DESC_CTL1_LEN 0x00001FFF
+#define BGMAC_DESC_CTL1_LEN 0x00003FFF
#define BGMAC_PHY_NOREGS BRCM_PSEUDO_PHY_ADDR
#define BGMAC_PHY_MASK 0x1F
@@ -366,7 +366,8 @@
#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
#define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \
BGMAC_RX_FRAME_OFFSET)
-#define BGMAC_RX_MAX_FRAME_SIZE 1536 /* Copied from b44/tg3 */
+/* Jumbo frame size with FCS */
+#define BGMAC_RX_MAX_FRAME_SIZE 9724
#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
#define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index dbb7874607ca..e1c236cab2a7 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -59,8 +59,6 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.2.6"
-#define DRV_MODULE_RELDATE "January 29, 2014"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -72,13 +70,9 @@
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (5*HZ)
-static char version[] =
- "QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_FIRMWARE(FW_MIPS_FILE_06);
MODULE_FIRMWARE(FW_RV2P_FILE_06);
MODULE_FIRMWARE(FW_MIPS_FILE_09);
@@ -7048,7 +7042,6 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct bnx2 *bp = netdev_priv(dev);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
}
@@ -7819,6 +7812,11 @@ static int bnx2_set_channels(struct net_device *dev,
}
static const struct ethtool_ops bnx2_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS_IRQ |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_STATS_BLOCK_USECS,
.get_drvinfo = bnx2_get_drvinfo,
.get_regs_len = bnx2_get_regs_len,
.get_regs = bnx2_get_regs,
@@ -8562,15 +8560,11 @@ static const struct net_device_ops bnx2_netdev_ops = {
static int
bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int version_printed = 0;
struct net_device *dev;
struct bnx2 *bp;
int rc;
char str[40];
- if (version_printed++ == 0)
- pr_info("%s", version);
-
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
if (!dev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 6026b53137aa..4f5b2b81be3d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -32,8 +32,14 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
+/* FIXME: Delete the DRV_MODULE_VERSION below, but please be warned
+ * that it is not an easy task because such change has all chances
+ * to break this driver due to amount of abuse of in-kernel interfaces
+ * between modules and FW.
+ *
+ * DO NOT UPDATE DRV_MODULE_VERSION below.
+ */
#define DRV_MODULE_VERSION "1.713.36-0"
-#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 4a0ba6801c9e..7cea33803f7f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1112,13 +1112,6 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
u32 mbi;
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
-
- memset(version, 0, sizeof(version));
- snprintf(version, ETHTOOL_FWVERS_LEN, " storm %d.%d.%d.%d",
- BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_ENGINEERING_VERSION);
- strlcat(info->version, version, sizeof(info->version));
if (SHMEM2_HAS(bp, extended_dev_info_shared_addr)) {
ext_dev_info_offset = SHMEM2_RD(bp,
@@ -3663,6 +3656,7 @@ static int bnx2x_get_ts_info(struct net_device *dev,
}
static const struct ethtool_ops bnx2x_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = bnx2x_get_drvinfo,
.get_regs_len = bnx2x_get_regs_len,
.get_regs = bnx2x_get_regs,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 1c26fa962233..db5107e7937c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -81,17 +81,12 @@
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
-static char version[] =
- "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
- DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
MODULE_AUTHOR("Eliezer Tamir");
MODULE_DESCRIPTION("QLogic "
"BCM57710/57711/57711E/"
"57712/57712_MF/57800/57800_MF/57810/57810_MF/"
"57840/57840_MF Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
@@ -14480,8 +14475,6 @@ static int __init bnx2x_init(void)
{
int ret;
- pr_info("%s", version);
-
bnx2x_wq = create_singlethread_workqueue("bnx2x");
if (bnx2x_wq == NULL) {
pr_err("Cannot create workqueue\n");
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index d28b406a26b1..fead64f1ad90 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -70,12 +70,8 @@
#define BNXT_TX_TIMEOUT (5 * HZ)
-static const char version[] =
- "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
-MODULE_VERSION(DRV_MODULE_VERSION);
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
@@ -2166,6 +2162,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct tx_cmp *txcmp;
cpr->has_more_work = 0;
+ cpr->had_work_done = 1;
while (1) {
int rc;
@@ -2179,7 +2176,6 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
* reading any further.
*/
dma_rmb();
- cpr->had_work_done = 1;
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
@@ -2396,7 +2392,7 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
}
static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
- u64 dbr_type, bool all)
+ u64 dbr_type)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int i;
@@ -2405,7 +2401,7 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
struct bnxt_db_info *db;
- if (cpr2 && (all || cpr2->had_work_done)) {
+ if (cpr2 && cpr2->had_work_done) {
db = &cpr2->cp_db;
writeq(db->db_key64 | dbr_type |
RING_CMP(cpr2->cp_raw_cons), db->doorbell);
@@ -2428,22 +2424,16 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
if (cpr->has_more_work) {
cpr->has_more_work = 0;
work_done = __bnxt_poll_cqs(bp, bnapi, budget);
- if (cpr->has_more_work) {
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
- return work_done;
- }
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
- if (napi_complete_done(napi, work_done))
- BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
- return work_done;
}
while (1) {
cons = RING_CMP(raw_cons);
nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
- false);
+ if (cpr->has_more_work)
+ break;
+
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
cpr->cp_raw_cons = raw_cons;
if (napi_complete_done(napi, work_done))
BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
@@ -2463,16 +2453,17 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
cpr2 = cpr->cp_ring_arr[idx];
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
- cpr->has_more_work = cpr2->has_more_work;
+ cpr->has_more_work |= cpr2->has_more_work;
} else {
bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
}
raw_cons = NEXT_RAW_CMP(raw_cons);
- if (cpr->has_more_work)
- break;
}
- __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
- cpr->cp_raw_cons = raw_cons;
+ __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
+ if (raw_cons != cpr->cp_raw_cons) {
+ cpr->cp_raw_cons = raw_cons;
+ BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
+ }
return work_done;
}
@@ -4170,6 +4161,7 @@ static int bnxt_hwrm_to_stderr(u32 hwrm_err)
case HWRM_ERR_CODE_NO_BUFFER:
return -ENOMEM;
case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
+ case HWRM_ERR_CODE_BUSY:
return -EAGAIN;
case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
return -EOPNOTSUPP;
@@ -5069,10 +5061,8 @@ vnic_mru:
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
{
- u32 rc = 0;
-
if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
struct hwrm_vnic_free_input req = {0};
@@ -5080,10 +5070,9 @@ static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
req.vnic_id =
cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
}
- return rc;
}
static void bnxt_hwrm_vnic_free(struct bnxt *bp)
@@ -5200,14 +5189,13 @@ static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
return rc;
}
-static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
+static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
{
u16 i;
- u32 rc = 0;
struct hwrm_ring_grp_free_input req = {0};
if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
- return 0;
+ return;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
@@ -5218,12 +5206,10 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
req.ring_group_id =
cpu_to_le32(bp->grp_info[i].fw_grp_id);
- rc = _hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
+ _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
}
mutex_unlock(&bp->hwrm_cmd_lock);
- return rc;
}
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
@@ -5847,8 +5833,7 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (bp->hwrm_spec_code < 0x10601)
bp->hw_resc.resv_tx_rings = tx_rings;
- rc = bnxt_hwrm_get_rings(bp);
- return rc;
+ return bnxt_hwrm_get_rings(bp);
}
static int
@@ -5869,8 +5854,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (rc)
return rc;
- rc = bnxt_hwrm_get_rings(bp);
- return rc;
+ return bnxt_hwrm_get_rings(bp);
}
static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
@@ -6030,7 +6014,6 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
{
struct hwrm_func_vf_cfg_input req = {0};
u32 flags;
- int rc;
if (!BNXT_NEW_RM(bp))
return 0;
@@ -6047,8 +6030,8 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
req.flags = cpu_to_le32(flags);
- rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message_silent(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
@@ -6057,7 +6040,6 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
{
struct hwrm_func_cfg_input req = {0};
u32 flags;
- int rc;
__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
cp_rings, stats, vnics);
@@ -6075,8 +6057,8 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
}
req.flags = cpu_to_le32(flags);
- rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message_silent(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
@@ -6315,16 +6297,16 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
return rc;
}
-static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
+static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
{
- int rc = 0, i;
struct hwrm_stat_ctx_free_input req = {0};
+ int i;
if (!bp->bnapi)
- return 0;
+ return;
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
- return 0;
+ return;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
@@ -6336,14 +6318,13 @@ static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
- rc = _hwrm_send_message(bp, &req, sizeof(req),
- HWRM_CMD_TIMEOUT);
+ _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
- return rc;
}
static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
@@ -6548,8 +6529,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
__le64 *pg_dir;
u32 flags = 0;
u8 *pg_attr;
- int i, rc;
u32 ena;
+ int i;
if (!ctx)
return 0;
@@ -6636,8 +6617,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
req.flags = cpu_to_le32(flags);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
@@ -7243,7 +7223,7 @@ static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
static int bnxt_hwrm_ver_get(struct bnxt *bp)
{
struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
- u32 dev_caps_cfg;
+ u32 dev_caps_cfg, hwrm_ver;
int rc;
bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
@@ -7263,6 +7243,19 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_intf_upd_8b);
netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
}
+
+ hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
+ HWRM_VERSION_UPDATE;
+
+ if (bp->hwrm_spec_code > hwrm_ver)
+ snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
+ HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
+ HWRM_VERSION_UPDATE);
+ else
+ snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
+ resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+ resp->hwrm_intf_upd_8b);
+
snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
@@ -7341,7 +7334,6 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp)
static int bnxt_hwrm_port_qstats(struct bnxt *bp)
{
- int rc;
struct bnxt_pf_info *pf = &bp->pf;
struct hwrm_port_qstats_input req = {0};
@@ -7352,8 +7344,7 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp)
req.port_id = cpu_to_le16(pf->port_id);
req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
@@ -7515,7 +7506,6 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
{
struct hwrm_func_cfg_input req = {0};
- int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(0xffff);
@@ -7526,14 +7516,12 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
else
return -EINVAL;
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
{
struct hwrm_func_cfg_input req = {0};
- int rc;
if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
return 0;
@@ -7545,8 +7533,7 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
if (size == 128)
req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
@@ -8804,6 +8791,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
+ bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@@ -8899,14 +8887,12 @@ int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
{
struct hwrm_wol_filter_free_input req = {0};
- int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
req.port_id = cpu_to_le16(bp->pf.port_id);
req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
req.wol_filter_id = bp->wol_filter_id;
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
@@ -11464,6 +11450,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_sriov_disable(bp);
bnxt_dl_fw_reporters_destroy(bp, true);
+ if (BNXT_PF(bp))
+ devlink_port_type_clear(&bp->dl_port);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
bnxt_dl_unregister(bp);
@@ -11764,30 +11752,82 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
return rc;
}
+#define BNXT_VPD_LEN 512
+static void bnxt_vpd_read_info(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+ int i, len, pos, ro_size;
+ ssize_t vpd_size;
+ u8 *vpd_data;
+
+ vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
+ if (!vpd_data)
+ return;
+
+ vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
+ if (vpd_size <= 0) {
+ netdev_err(bp->dev, "Unable to read VPD\n");
+ goto exit;
+ }
+
+ i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ if (i < 0) {
+ netdev_err(bp->dev, "VPD READ-Only not found\n");
+ goto exit;
+ }
+
+ ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
+ i += PCI_VPD_LRDT_TAG_SIZE;
+ if (i + ro_size > vpd_size)
+ goto exit;
+
+ pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
+ PCI_VPD_RO_KEYWORD_PARTNO);
+ if (pos < 0)
+ goto read_sn;
+
+ len = pci_vpd_info_field_size(&vpd_data[pos]);
+ pos += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (len + pos > vpd_size)
+ goto read_sn;
+
+ strlcpy(bp->board_partno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN));
+
+read_sn:
+ pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO);
+ if (pos < 0)
+ goto exit;
+
+ len = pci_vpd_info_field_size(&vpd_data[pos]);
+ pos += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (len + pos > vpd_size)
+ goto exit;
+
+ strlcpy(bp->board_serialno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN));
+exit:
+ kfree(vpd_data);
+}
+
static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
{
struct pci_dev *pdev = bp->pdev;
- int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- u32 dw;
+ u64 qword;
- if (!pos) {
- netdev_info(bp->dev, "Unable do read adapter's DSN\n");
+ qword = pci_get_dsn(pdev);
+ if (!qword) {
+ netdev_info(bp->dev, "Unable to read adapter's DSN\n");
return -EOPNOTSUPP;
}
- /* DSN (two dw) is at an offset of 4 from the cap pos */
- pos += 4;
- pci_read_config_dword(pdev, pos, &dw);
- put_unaligned_le32(dw, &dsn[0]);
- pci_read_config_dword(pdev, pos + 4, &dw);
- put_unaligned_le32(dw, &dsn[4]);
+ put_unaligned_le64(qword, dsn);
+
bp->flags |= BNXT_FLAG_DSN_VALID;
return 0;
}
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int version_printed;
struct net_device *dev;
struct bnxt *bp;
int rc, max_irqs;
@@ -11795,9 +11835,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_is_bridge(pdev))
return -ENODEV;
- if (version_printed++ == 0)
- pr_info("%s", version);
-
/* Clear any pending DMA transactions from crash kernel
* while loading driver in capture kernel.
*/
@@ -11829,6 +11866,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &bnxt_ethtool_ops;
pci_set_drvdata(pdev, dev);
+ bnxt_vpd_read_info(bp);
+
rc = bnxt_alloc_hwrm_resources(bp);
if (rc)
goto init_err_pci_clean;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 63b170658532..f2caa2756f5b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -12,8 +12,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.10.1"
+/* DO NOT CHANGE DRV_VER_* defines
+ * FIXME: Delete them
+ */
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 10
#define DRV_VER_UPD 1
@@ -1498,6 +1500,10 @@ struct bnxt {
(chip_num) == CHIP_NUM_58804 || \
(chip_num) == CHIP_NUM_58808)
+#define BNXT_VPD_FLD_LEN 32
+ char board_partno[BNXT_VPD_FLD_LEN];
+ char board_serialno[BNXT_VPD_FLD_LEN];
+
struct net_device *dev;
struct pci_dev *pdev;
@@ -1728,6 +1734,7 @@ struct bnxt {
#define BC_HWRM_STR_LEN 21
#define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
char fw_ver_str[FW_VER_STR_LEN];
+ char hwrm_ver_supp[FW_VER_STR_LEN];
__be16 vxlan_port;
u8 vxlan_port_cnt;
__le16 vxlan_fw_dst_port_id;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index b1511bcffb1b..02b27551d34d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -39,8 +39,8 @@ static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id)
static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
{
struct hwrm_queue_pri2cos_cfg_input req = {0};
- int rc = 0, i;
u8 *pri2cos;
+ int i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1);
req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
@@ -56,8 +56,7 @@ static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
qidx = bp->tc_to_qidx[ets->prio_tc[i]];
pri2cos[i] = bp->q_info[qidx].queue_id;
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
@@ -93,8 +92,8 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
{
struct hwrm_queue_cos2bw_cfg_input req = {0};
struct bnxt_cos2bw_cfg cos2bw;
- int rc = 0, i;
void *data;
+ int i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
for (i = 0; i < max_tc; i++) {
@@ -128,8 +127,7 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
req.unused_0 = 0;
}
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
@@ -236,7 +234,6 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
unsigned int tc_mask = 0, pri_mask = 0;
u8 i, pri, lltc_count = 0;
bool need_q_remap = false;
- int rc;
if (!my_ets)
return -EINVAL;
@@ -267,15 +264,11 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
}
if (need_q_remap)
- rc = bnxt_queue_remap(bp, tc_mask);
+ bnxt_queue_remap(bp, tc_mask);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
req.flags = cpu_to_le32(pri_mask);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return rc;
-
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index d3c93ccee86a..a812beb46325 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -150,7 +150,7 @@ void bnxt_dl_fw_reporters_create(struct bnxt *bp)
health->fw_reset_reporter =
devlink_health_reporter_create(bp->dl,
&bnxt_dl_fw_reset_reporter_ops,
- 0, true, bp);
+ 0, bp);
if (IS_ERR(health->fw_reset_reporter)) {
netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
PTR_ERR(health->fw_reset_reporter));
@@ -166,7 +166,7 @@ err_recovery:
health->fw_reporter =
devlink_health_reporter_create(bp->dl,
&bnxt_dl_fw_reporter_ops,
- 0, false, bp);
+ 0, bp);
if (IS_ERR(health->fw_reporter)) {
netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
PTR_ERR(health->fw_reporter));
@@ -182,7 +182,7 @@ err_recovery:
health->fw_fatal_reporter =
devlink_health_reporter_create(bp->dl,
&bnxt_dl_fw_fatal_reporter_ops,
- 0, true, bp);
+ 0, bp);
if (IS_ERR(health->fw_fatal_reporter)) {
netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
PTR_ERR(health->fw_fatal_reporter));
@@ -403,6 +403,14 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (rc)
return rc;
+ if (strlen(bp->board_partno)) {
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ bp->board_partno);
+ if (rc)
+ return rc;
+ }
+
sprintf(buf, "%X", bp->chip_num);
rc = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
@@ -471,13 +479,19 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b);
}
rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_APP, fw_ver);
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, fw_ver);
+ if (rc)
+ return rc;
+
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
+ bp->hwrm_ver_supp);
if (rc)
return rc;
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, mgmt_ver);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 3f8a1ded662a..34046a6286e8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1236,7 +1236,6 @@ static void bnxt_get_drvinfo(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = bnxt_get_num_stats(bp);
@@ -2605,7 +2604,7 @@ static int bnxt_set_phys_id(struct net_device *dev,
struct bnxt_led_cfg *led_cfg;
u8 led_state;
__le16 duration;
- int i, rc;
+ int i;
if (!bp->num_leds || BNXT_VF(bp))
return -EOPNOTSUPP;
@@ -2631,8 +2630,7 @@ static int bnxt_set_phys_id(struct net_device *dev,
led_cfg->led_blink_off = duration;
led_cfg->led_group_id = bp->leds[i].led_group_id;
}
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
@@ -3471,6 +3469,12 @@ void bnxt_ethtool_free(struct bnxt *bp)
}
const struct ethtool_ops bnxt_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS_IRQ |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_STATS_BLOCK_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_pauseparam = bnxt_get_pauseparam,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 2aba1e02a8f4..6ea3df6da18c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -138,7 +138,6 @@ static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
{
struct hwrm_func_cfg_input req = {0};
- int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
return 0;
@@ -149,8 +148,7 @@ static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
else
req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- return rc;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 9bec256b0934..782ea0771221 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -279,7 +279,8 @@ bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
static int bnxt_tc_parse_actions(struct bnxt *bp,
struct bnxt_tc_actions *actions,
- struct flow_action *flow_action)
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
{
/* Used to store the L2 rewrite mask for dmac (6 bytes) followed by
* smac (6 bytes) if rewrite of both is specified, otherwise either
@@ -299,6 +300,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
return -EINVAL;
}
+ if (!flow_action_basic_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_DROP:
@@ -491,7 +495,8 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
flow->tun_mask.tp_src = match.mask->src;
}
- return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action);
+ return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action,
+ tc_flow_cmd->common.extack);
}
static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
@@ -1634,7 +1639,7 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
spin_unlock(&flow->stats_lock);
flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets,
- lastused);
+ lastused, FLOW_ACTION_HW_STATS_DELAYED);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 6f2faf81c1ae..4b5c8fd76a51 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -219,7 +219,6 @@ static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 1d678bee2cc9..d975338bf78d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) "bcmgenet: " fmt
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -621,10 +622,6 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
* always generate an interrupt either after MBDONE packets have been
* transmitted, or when the ring is empty.
*/
- if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
- ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low ||
- ec->use_adaptive_tx_coalesce)
- return -EOPNOTSUPP;
/* Program all TX queues with the same values, as there is no
* ethtool knob to do coalescing on a per-queue basis
@@ -814,7 +811,6 @@ static void bcmgenet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
- strlcpy(info->version, "v2.0", sizeof(info->version));
}
static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@ -1049,6 +1045,9 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
/* standard ethtool support functions. */
static const struct ethtool_ops bcmgenet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.begin = bcmgenet_begin,
.complete = bcmgenet_complete,
.get_strings = bcmgenet_get_strings,
@@ -2713,6 +2712,21 @@ static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
}
+static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
+ unsigned char *addr)
+{
+ u32 addr_tmp;
+
+ addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0);
+ addr[0] = addr_tmp >> 24;
+ addr[1] = (addr_tmp >> 16) & 0xff;
+ addr[2] = (addr_tmp >> 8) & 0xff;
+ addr[3] = addr_tmp & 0xff;
+ addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1);
+ addr[4] = (addr_tmp >> 8) & 0xff;
+ addr[5] = addr_tmp & 0xff;
+}
+
/* Returns a reusable dma control register value */
static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
{
@@ -3408,10 +3422,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
const struct bcmgenet_plat_data *pdata;
struct bcmgenet_priv *priv;
struct net_device *dev;
- const void *macaddr;
unsigned int i;
int err = -EIO;
- const char *phy_mode_str;
/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
@@ -3440,11 +3452,6 @@ static int bcmgenet_probe(struct platform_device *pdev)
}
priv->wol_irq = platform_get_irq_optional(pdev, 2);
- if (dn)
- macaddr = of_get_mac_address(dn);
- else
- macaddr = pd->mac_address;
-
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
err = PTR_ERR(priv->base);
@@ -3455,12 +3462,6 @@ static int bcmgenet_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
dev_set_drvdata(&pdev->dev, dev);
- if (IS_ERR_OR_NULL(macaddr) || !is_valid_ether_addr(macaddr)) {
- dev_warn(&pdev->dev, "using random Ethernet MAC\n");
- eth_hw_addr_random(dev);
- } else {
- ether_addr_copy(dev->dev_addr, macaddr);
- }
dev->watchdog_timeo = 2 * HZ;
dev->ethtool_ops = &bcmgenet_ethtool_ops;
dev->netdev_ops = &bcmgenet_netdev_ops;
@@ -3489,8 +3490,9 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->dev = dev;
priv->pdev = pdev;
- if (of_id) {
- pdata = of_id->data;
+
+ pdata = device_get_match_data(&pdev->dev);
+ if (pdata) {
priv->version = pdata->version;
priv->dma_max_burst_length = pdata->dma_max_burst_length;
} else {
@@ -3500,7 +3502,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
if (IS_ERR(priv->clk)) {
- dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+ dev_dbg(&priv->pdev->dev, "failed to get enet clock\n");
priv->clk = NULL;
}
@@ -3524,23 +3526,34 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
if (IS_ERR(priv->clk_wol)) {
- dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+ dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
priv->clk_wol = NULL;
}
priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
if (IS_ERR(priv->clk_eee)) {
- dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
+ dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
priv->clk_eee = NULL;
}
/* If this is an internal GPHY, power it on now, before UniMAC is
* brought out of reset as absolutely no UniMAC activity is allowed
*/
- if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
- !strcasecmp(phy_mode_str, "internal"))
+ if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+ if ((pd) && (!IS_ERR_OR_NULL(pd->mac_address)))
+ ether_addr_copy(dev->dev_addr, pd->mac_address);
+ else
+ if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN))
+ if (has_acpi_companion(&pdev->dev))
+ bcmgenet_get_hw_addr(priv, dev->dev_addr);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ dev_warn(&pdev->dev, "using random Ethernet MAC\n");
+ eth_hw_addr_random(dev);
+ }
+
reset_umac(priv);
err = bcmgenet_mii_init(dev);
@@ -3713,6 +3726,12 @@ static int bcmgenet_suspend(struct device *d)
static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
+static const struct acpi_device_id genet_acpi_match[] = {
+ { "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, genet_acpi_match);
+
static struct platform_driver bcmgenet_driver = {
.probe = bcmgenet_probe,
.remove = bcmgenet_remove,
@@ -3721,6 +3740,7 @@ static struct platform_driver bcmgenet_driver = {
.name = "bcmgenet",
.of_match_table = bcmgenet_match,
.pm = &bcmgenet_pm_ops,
+ .acpi_match_table = ACPI_PTR(genet_acpi_match),
},
};
module_platform_driver(bcmgenet_driver);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index b5930f80039d..511d553a4d11 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -5,7 +5,7 @@
* Copyright (c) 2014-2017 Broadcom
*/
-
+#include <linux/acpi.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/wait.h>
@@ -284,7 +284,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
int bcmgenet_mii_probe(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device_node *dn = priv->pdev->dev.of_node;
+ struct device *kdev = &priv->pdev->dev;
+ struct device_node *dn = kdev->of_node;
struct phy_device *phydev;
u32 phy_flags = 0;
int ret;
@@ -307,7 +308,27 @@ int bcmgenet_mii_probe(struct net_device *dev)
return -ENODEV;
}
} else {
- phydev = dev->phydev;
+ if (has_acpi_companion(kdev)) {
+ char mdio_bus_id[MII_BUS_ID_SIZE];
+ struct mii_bus *unimacbus;
+
+ snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d",
+ UNIMAC_MDIO_DRV_NAME, priv->pdev->id);
+
+ unimacbus = mdio_find_bus(mdio_bus_id);
+ if (!unimacbus) {
+ pr_err("Unable to find mii\n");
+ return -ENODEV;
+ }
+ phydev = phy_find_first(unimacbus);
+ put_device(&unimacbus->dev);
+ if (!phydev) {
+ pr_err("Unable to find PHY\n");
+ return -ENODEV;
+ }
+ } else {
+ phydev = dev->phydev;
+ }
phydev->dev_flags = phy_flags;
ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
@@ -428,9 +449,12 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
/* Retain this platform_device pointer for later cleanup */
priv->mii_pdev = ppdev;
ppdev->dev.parent = &pdev->dev;
- ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv);
- if (pdata)
+ if (dn)
+ ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv);
+ else if (pdata)
bcmgenet_mii_pdata_init(priv, &ppd);
+ else
+ ppd.phy_mask = ~0;
ret = platform_device_add_resources(ppdev, &res, 1);
if (ret)
@@ -450,12 +474,33 @@ out:
return ret;
}
+static int bcmgenet_phy_interface_init(struct bcmgenet_priv *priv)
+{
+ struct device *kdev = &priv->pdev->dev;
+ int phy_mode = device_get_phy_mode(kdev);
+
+ if (phy_mode < 0) {
+ dev_err(kdev, "invalid PHY mode property\n");
+ return phy_mode;
+ }
+
+ priv->phy_interface = phy_mode;
+
+ /* We need to specifically look up whether this PHY interface is
+ * internal or not *before* we even try to probe the PHY driver
+ * over MDIO as we may have shut down the internal PHY for power
+ * saving purposes.
+ */
+ if (priv->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
+ priv->internal_phy = true;
+
+ return 0;
+}
+
static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
{
struct device_node *dn = priv->pdev->dev.of_node;
- struct device *kdev = &priv->pdev->dev;
struct phy_device *phydev;
- phy_interface_t phy_mode;
int ret;
/* Fetch the PHY phandle */
@@ -473,23 +518,12 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
}
/* Get the link mode */
- ret = of_get_phy_mode(dn, &phy_mode);
- if (ret) {
- dev_err(kdev, "invalid PHY mode property\n");
+ ret = bcmgenet_phy_interface_init(priv);
+ if (ret)
return ret;
- }
-
- priv->phy_interface = phy_mode;
-
- /* We need to specifically look up whether this PHY interface is internal
- * or not *before* we even try to probe the PHY driver over MDIO as we
- * may have shut down the internal PHY for power saving purposes.
- */
- if (priv->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
- priv->internal_phy = true;
/* Make sure we initialize MoCA PHYs with a link down */
- if (phy_mode == PHY_INTERFACE_MODE_MOCA) {
+ if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
phydev = of_phy_find_device(dn);
if (phydev) {
phydev->link = 0;
@@ -554,10 +588,13 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv)
{
- struct device_node *dn = priv->pdev->dev.of_node;
+ struct device *kdev = &priv->pdev->dev;
+ struct device_node *dn = kdev->of_node;
if (dn)
return bcmgenet_mii_of_init(priv);
+ else if (has_acpi_companion(kdev))
+ return bcmgenet_phy_interface_init(priv);
else
return bcmgenet_mii_pd_init(priv);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 88466255bf66..ff98a82b7bc4 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -96,11 +96,9 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
#define DRV_MODULE_NAME "tg3"
+/* DO NOT UPDATE TG3_*_NUM defines */
#define TG3_MAJ_NUM 3
#define TG3_MIN_NUM 137
-#define DRV_MODULE_VERSION \
- __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "May 11, 2014"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -222,13 +220,9 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
-static char version[] =
- DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
-
MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_FIRMWARE(FIRMWARE_TG3);
MODULE_FIRMWARE(FIRMWARE_TG3TSO);
MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
@@ -12317,7 +12311,6 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
struct tg3 *tp = netdev_priv(dev);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
}
@@ -14160,6 +14153,11 @@ static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
}
static const struct ethtool_ops tg3_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS_IRQ |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_STATS_BLOCK_USECS,
.get_drvinfo = tg3_get_drvinfo,
.get_regs_len = tg3_get_regs_len,
.get_regs = tg3_get_regs,
@@ -17625,8 +17623,6 @@ static int tg3_init_one(struct pci_dev *pdev,
u64 dma_mask, persist_dma_mask;
netdev_features_t features = 0;
- printk_once(KERN_INFO "%s\n", version);
-
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 01a50a4b2113..cc80bbbefe87 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2504,12 +2504,7 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
IPPROTO_TCP, 0);
BNAD_UPDATE_CTR(bnad, tso4);
} else {
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
-
- ipv6h->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
- IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
BNAD_UPDATE_CTR(bnad, tso6);
}
@@ -3847,9 +3842,6 @@ bnad_module_init(void)
{
int err;
- pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
- BNAD_VERSION);
-
bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
err = pci_register_driver(&bnad_pci_driver);
@@ -3874,6 +3866,5 @@ module_exit(bnad_module_exit);
MODULE_AUTHOR("Brocade");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
-MODULE_VERSION(BNAD_VERSION);
MODULE_FIRMWARE(CNA_FW_FILE_CT);
MODULE_FIRMWARE(CNA_FW_FILE_CT2);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 492a02d54f14..627a93ce38ab 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -64,8 +64,6 @@ struct bnad_rx_ctrl {
#define BNAD_NAME "bna"
#define BNAD_NAME_LEN 64
-#define BNAD_VERSION "3.2.25.1"
-
#define BNAD_MAILBOX_MSIX_INDEX 0
#define BNAD_MAILBOX_MSIX_VECTORS 1
#define BNAD_INTX_TX_IB_BITMASK 0x1
@@ -253,7 +251,7 @@ struct bnad_rx_unmap_q {
int alloc_order;
u32 map_size;
enum bnad_rxbuf_type type;
- struct bnad_rx_unmap unmap[0] ____cacheline_aligned;
+ struct bnad_rx_unmap unmap[] ____cacheline_aligned;
};
#define BNAD_PCI_DEV_IS_CAT2(_bnad) \
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index b764c9ff9ad1..588c4804d10a 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -284,7 +284,6 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
unsigned long flags;
strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version));
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) {
@@ -1116,6 +1115,9 @@ out:
}
static const struct ethtool_ops bnad_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = bnad_get_drvinfo,
.get_wol = bnad_get_wol,
.get_link = ethtool_op_get_link,
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index a3f0f27fc79a..ab827fb4b6b9 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -1200,7 +1200,6 @@ struct macb {
unsigned int dma_burst_length;
phy_interface_t phy_interface;
- int speed;
/* AT91RM9200 transmit */
struct sk_buff *skb; /* holds skb until xmit interrupt completes */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 2c28da1737fe..a0e8c5bbabc0 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -571,37 +571,20 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode,
old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
- /* Clear all the bits we might set later */
- ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | MACB_BIT(PAE));
-
if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
if (state->interface == PHY_INTERFACE_MODE_RMII)
ctrl |= MACB_BIT(RM9200_RMII);
} else {
- ctrl &= ~(GEM_BIT(GBE) | GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
-
- /* We do not support MLO_PAUSE_RX yet */
- if (state->pause & MLO_PAUSE_TX)
- ctrl |= MACB_BIT(PAE);
+ ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
if (state->interface == PHY_INTERFACE_MODE_SGMII)
ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
}
- if (state->speed == SPEED_1000)
- ctrl |= GEM_BIT(GBE);
- else if (state->speed == SPEED_100)
- ctrl |= MACB_BIT(SPD);
-
- if (state->duplex)
- ctrl |= MACB_BIT(FD);
-
/* Apply the new configuration, if any */
if (old_ctrl ^ ctrl)
macb_or_gem_writel(bp, NCFGR, ctrl);
- bp->speed = state->speed;
-
spin_unlock_irqrestore(&bp->lock, flags);
}
@@ -626,16 +609,42 @@ static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
netif_tx_stop_all_queues(ndev);
}
-static void macb_mac_link_up(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface, struct phy_device *phy)
+static void macb_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct net_device *ndev = to_net_dev(config->dev);
struct macb *bp = netdev_priv(ndev);
struct macb_queue *queue;
+ unsigned long flags;
unsigned int q;
+ u32 ctrl;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ ctrl = macb_or_gem_readl(bp, NCFGR);
+
+ ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
+
+ if (speed == SPEED_100)
+ ctrl |= MACB_BIT(SPD);
+
+ if (duplex)
+ ctrl |= MACB_BIT(FD);
if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
- macb_set_tx_clk(bp->tx_clk, bp->speed, ndev);
+ ctrl &= ~(GEM_BIT(GBE) | MACB_BIT(PAE));
+
+ if (speed == SPEED_1000)
+ ctrl |= GEM_BIT(GBE);
+
+ /* We do not support MLO_PAUSE_RX yet */
+ if (tx_pause)
+ ctrl |= MACB_BIT(PAE);
+
+ macb_set_tx_clk(bp->tx_clk, speed, ndev);
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
* cleared the pipeline and control registers.
@@ -648,6 +657,10 @@ static void macb_mac_link_up(struct phylink_config *config, unsigned int mode,
bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
}
+ macb_or_gem_writel(bp, NCFGR, ctrl);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
/* Enable Rx and Tx */
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
@@ -724,6 +737,9 @@ static int macb_mdiobus_register(struct macb *bp)
{
struct device_node *child, *np = bp->pdev->dev.of_node;
+ if (of_phy_is_fixed_link(np))
+ return mdiobus_register(bp->mii_bus);
+
/* Only create the PHY from the device tree if at least one PHY is
* described. Otherwise scan the entire MDIO bus. We do this to support
* old device tree that did not follow the best practices and did not
@@ -4429,8 +4445,6 @@ static int macb_probe(struct platform_device *pdev)
else
bp->phy_interface = interface;
- bp->speed = SPEED_UNKNOWN;
-
/* IP specific init */
err = init(pdev);
if (err)
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index b821c9e1604c..81ff9ac73f9a 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -13,6 +13,9 @@
#define DRV_NAME "cavium_ptp"
#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
+#define PCI_SUBSYS_DEVID_88XX_PTP 0xA10C
+#define PCI_SUBSYS_DEVID_81XX_PTP 0XA20C
+#define PCI_SUBSYS_DEVID_83XX_PTP 0xA30C
#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
#define PCI_PTP_BAR_NO 0
@@ -321,7 +324,12 @@ static void cavium_ptp_remove(struct pci_dev *pdev)
}
static const struct pci_device_id cavium_ptp_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP,
+ PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_88XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP,
+ PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_81XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP,
+ PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_83XX_PTP) },
{ 0, }
};
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index abe5d0dac851..16eebfc52109 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -442,7 +442,6 @@ lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
strcpy(drvinfo->driver, "liquidio");
- strcpy(drvinfo->version, LIQUIDIO_VERSION);
strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
ETHTOOL_FWVERS_LEN);
strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
@@ -459,7 +458,6 @@ lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
strcpy(drvinfo->driver, "liquidio_vf");
- strcpy(drvinfo->version, LIQUIDIO_VERSION);
strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
ETHTOOL_FWVERS_LEN);
strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
@@ -3099,7 +3097,17 @@ static int lio_set_fecparam(struct net_device *netdev,
return 0;
}
+#define LIO_ETHTOOL_COALESCE (ETHTOOL_COALESCE_RX_USECS | \
+ ETHTOOL_COALESCE_MAX_FRAMES | \
+ ETHTOOL_COALESCE_USE_ADAPTIVE | \
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH | \
+ ETHTOOL_COALESCE_PKT_RATE_RX_USECS)
+
static const struct ethtool_ops lio_ethtool_ops = {
+ .supported_coalesce_params = LIO_ETHTOOL_COALESCE,
.get_link_ksettings = lio_get_link_ksettings,
.set_link_ksettings = lio_set_link_ksettings,
.get_fecparam = lio_get_fecparam,
@@ -3130,6 +3138,7 @@ static const struct ethtool_ops lio_ethtool_ops = {
};
static const struct ethtool_ops lio_vf_ethtool_ops = {
+ .supported_coalesce_params = LIO_ETHTOOL_COALESCE,
.get_link_ksettings = lio_get_link_ksettings,
.get_link = ethtool_op_get_link,
.get_drvinfo = lio_get_vf_drvinfo,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index eab05b5534ea..66d31c018c7e 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -39,7 +39,6 @@
MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(LIQUIDIO_VERSION);
MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
"_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
@@ -1376,7 +1375,6 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
{
u32 dev_id, rev_id;
int ret = 1;
- char *s;
pci_read_config_dword(oct->pci_dev, 0, &dev_id);
pci_read_config_dword(oct->pci_dev, 8, &rev_id);
@@ -1386,13 +1384,11 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
case OCTEON_CN68XX_PCIID:
oct->chip_id = OCTEON_CN68XX;
ret = lio_setup_cn68xx_octeon_device(oct);
- s = "CN68XX";
break;
case OCTEON_CN66XX_PCIID:
oct->chip_id = OCTEON_CN66XX;
ret = lio_setup_cn66xx_octeon_device(oct);
- s = "CN66XX";
break;
case OCTEON_CN23XX_PCIID_PF:
@@ -1405,22 +1401,13 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
pci_sriov_set_totalvfs(oct->pci_dev,
oct->sriov_info.max_vfs);
#endif
- s = "CN23XX";
break;
default:
- s = "?";
dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
dev_id);
}
- if (!ret)
- dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
- OCTEON_MAJOR_REV(oct),
- OCTEON_MINOR_REV(oct),
- octeon_get_conf(oct)->card_name,
- LIQUIDIO_VERSION);
-
return ret;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 7a77544a54f5..bbd9bfa4a989 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -32,7 +32,6 @@
MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(LIQUIDIO_VERSION);
static int debug = -1;
module_param(debug, int, 0644);
@@ -2352,8 +2351,8 @@ static int octeon_device_init(struct octeon_device *oct)
}
atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
- dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n",
- LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf);
+ dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF: %d ioqs\n",
+ oct->sriov_info.rings_per_vf);
/* Setup the interrupt handler and record the INT SUM register address*/
if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf))
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index a5e0e9f17959..4da90757cd3f 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -25,17 +25,11 @@
#include "octeon_config.h"
-#define LIQUIDIO_PACKAGE ""
#define LIQUIDIO_BASE_MAJOR_VERSION 1
#define LIQUIDIO_BASE_MINOR_VERSION 7
#define LIQUIDIO_BASE_MICRO_VERSION 2
#define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
__stringify(LIQUIDIO_BASE_MINOR_VERSION)
-#define LIQUIDIO_MICRO_VERSION "." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
-#define LIQUIDIO_VERSION LIQUIDIO_PACKAGE \
- __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
- __stringify(LIQUIDIO_BASE_MINOR_VERSION) \
- "." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
struct lio_version {
u16 major;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index dfc77507b159..0d2831d10f65 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -127,7 +127,7 @@ struct octeon_pci_console_desc {
u32 pad;
/* must be 64 bit aligned here... */
/* Array of addresses of octeon_pci_console structures */
- u64 console_addr_array[0];
+ u64 console_addr_array[];
/* Implicit storage for console_addr_array */
};
@@ -840,17 +840,11 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
return -EINVAL;
}
- if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) {
- dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n",
- LIQUIDIO_PACKAGE, h->version);
- return -EINVAL;
- }
-
- if (memcmp(LIQUIDIO_BASE_VERSION, h->version + strlen(LIQUIDIO_PACKAGE),
+ if (memcmp(LIQUIDIO_BASE_VERSION, h->version,
strlen(LIQUIDIO_BASE_VERSION))) {
dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
LIQUIDIO_BASE_VERSION,
- h->version + strlen(LIQUIDIO_PACKAGE));
+ h->version);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index e9575887a4f8..9d868403d86c 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -28,7 +28,6 @@
#include <asm/octeon/cvmx-agl-defs.h>
#define DRV_NAME "octeon_mgmt"
-#define DRV_VERSION "2.0"
#define DRV_DESCRIPTION \
"Cavium Networks Octeon MII (management) port Network Driver"
@@ -1340,9 +1339,6 @@ static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
static int octeon_mgmt_nway_reset(struct net_device *dev)
@@ -1517,7 +1513,6 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
if (result)
goto err;
- dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
return 0;
err:
@@ -1574,4 +1569,3 @@ module_exit(octeon_mgmt_mod_exit);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("David Daney");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 5e0b16bb95a0..83dabcffc789 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -16,7 +16,6 @@
#include "../common/cavium_ptp.h"
#define DRV_NAME "nicvf"
-#define DRV_VERSION "1.0"
struct nicvf_stat {
char name[ETH_GSTRING_LEN];
@@ -192,7 +191,6 @@ static void nicvf_get_drvinfo(struct net_device *netdev,
struct nicvf *nic = netdev_priv(netdev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 016957285f99..b4b33368698f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -126,8 +126,7 @@ static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
{
- int timeout = NIC_MBOX_MSG_TIMEOUT;
- int sleep = 10;
+ unsigned long timeout;
int ret = 0;
mutex_lock(&nic->rx_mode_mtx);
@@ -137,6 +136,7 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
nicvf_write_to_mbx(nic, mbx);
+ timeout = jiffies + msecs_to_jiffies(NIC_MBOX_MSG_TIMEOUT);
/* Wait for previous message to be acked, timeout 2sec */
while (!nic->pf_acked) {
if (nic->pf_nacked) {
@@ -146,11 +146,10 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
ret = -EINVAL;
break;
}
- msleep(sleep);
+ usleep_range(8000, 10000);
if (nic->pf_acked)
break;
- timeout -= sleep;
- if (!timeout) {
+ if (time_after(jiffies, timeout)) {
netdev_err(nic->netdev,
"PF didn't ACK to mbox msg 0x%02x from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 4ab57d33a87e..069e7413f1ef 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1179,13 +1179,12 @@ void nicvf_sq_disable(struct nicvf *nic, int qidx)
void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
int qidx)
{
- u64 head, tail;
+ u64 head;
struct sk_buff *skb;
struct nicvf *nic = netdev_priv(netdev);
struct sq_hdr_subdesc *hdr;
head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
- tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
while (sq->head != head) {
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index bc2427c49b89..2460451fc48f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -100,8 +100,8 @@
* RED accepts pkt if unused CQE < 2304 & >= 2560
* DROPs pkts if unused CQE < 2304
*/
-#define RQ_PASS_CQ_LVL 192ULL
-#define RQ_DROP_CQ_LVL 184ULL
+#define RQ_PASS_CQ_LVL 224ULL
+#define RQ_DROP_CQ_LVL 216ULL
/* RED and Backpressure levels of RBDR for pkt reception
* For RBDR, level is a measure of fullness i.e 0x0 means empty
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 94b9482f14a5..6475060649e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -55,7 +55,6 @@
#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
#define DRV_NAME "cxgb"
-#define DRV_VERSION "2.2"
#define CH_DEVICE(devid, ssid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 0ccdde366ae1..99736796e1a0 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -429,7 +429,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct adapter *adapter = dev->ml_priv;
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -794,6 +793,9 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
}
static const struct ethtool_ops t1_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
+ ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL,
.get_drvinfo = get_drvinfo,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
@@ -984,8 +986,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adapter *adapter = NULL;
struct port_info *pi;
- pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION);
-
err = pci_enable_device(pdev);
if (err)
return err;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h
index b19e4376ba76..401827b82aa1 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h
@@ -79,7 +79,7 @@ struct ch_mem_range {
uint32_t addr;
uint32_t len;
uint32_t version;
- uint8_t buf[0];
+ uint8_t buf[];
};
struct ch_qset_params {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 883cfa9c4b6d..42c6e9379882 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -105,7 +105,6 @@ static const struct pci_device_id cxgb3_pci_tbl[] = {
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
static int dflt_msg_enable = DFLT_MSG_ENABLE;
@@ -1629,7 +1628,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
spin_unlock(&adapter->stats_lock);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
if (fw_vers)
@@ -2106,6 +2104,7 @@ static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops cxgb_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = get_drvinfo,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
@@ -3210,8 +3209,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adapter *adapter = NULL;
struct port_info *pi;
- pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
-
if (!cxgb3_wq) {
cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
if (!cxgb3_wq) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h b/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
index 852c399a8b0a..68bb5f39f3f1 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h
@@ -1448,7 +1448,7 @@ struct cpl_rdma_terminate {
#endif
__be32 msn;
__be32 mo;
- __u8 data[0];
+ __u8 data[];
};
/* cpl_rdma_terminate.tid_len fields */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/version.h b/drivers/net/ethernet/chelsio/cxgb3/version.h
index 165bfb91487a..b4b2547efc86 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/version.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/version.h
@@ -34,8 +34,6 @@
#define __CHELSIO_VERSION_H
#define DRV_DESC "Chelsio T3 Network Driver"
#define DRV_NAME "cxgb3"
-/* Driver version */
-#define DRV_VERSION "1.1.5-ko"
/* Firmware version */
#define FW_VERSION_MAJOR 7
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index a0e0ae19649f..290c1058069a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -29,7 +29,7 @@ struct clip_tbl {
atomic_t nfree;
struct list_head ce_free_head;
void *cl_list;
- struct list_head hash_list[0];
+ struct list_head hash_list[];
};
enum {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index f5be3ee1bdb4..dcab94cc2dee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -82,7 +82,7 @@ struct cudbg_ulprx_la {
struct cudbg_tp_la {
u32 size;
u32 mode;
- u8 data[0];
+ u8 data[];
};
static const char * const cudbg_region[] = {
@@ -134,7 +134,7 @@ struct cudbg_meminfo {
struct cudbg_cim_pif_la {
int size;
- u8 data[0];
+ u8 data[];
};
struct cudbg_clk_info {
@@ -339,13 +339,13 @@ struct cudbg_qdesc_entry {
u32 qid;
u32 desc_size;
u32 num_desc;
- u8 data[0]; /* Must be last */
+ u8 data[]; /* Must be last */
};
struct cudbg_qdesc_info {
u32 qdesc_entry_size;
u32 num_queues;
- u8 data[0]; /* Must be last */
+ u8 data[]; /* Must be last */
};
#define IREG_NUM_ELEM 4
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 8b7d156f79d3..e46a14f44a6f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -640,6 +640,7 @@ enum { /* adapter flags */
enum {
ULP_CRYPTO_LOOKASIDE = 1 << 0,
ULP_CRYPTO_IPSEC_INLINE = 1 << 1,
+ ULP_CRYPTO_KTLS_INLINE = 1 << 3,
};
struct rx_sw_desc;
@@ -1485,9 +1486,8 @@ static inline unsigned int qtimer_val(const struct adapter *adap,
return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
}
-/* driver version & name used for ethtool_drvinfo */
+/* driver name used for ethtool_drvinfo */
extern char cxgb4_driver_name[];
-extern const char cxgb4_driver_version[];
void t4_os_portmod_changed(struct adapter *adap, int port_id);
void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index de30d61af065..ebed99f3d4cf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -984,7 +984,7 @@ static const char * const devlog_facility_strings[] = {
struct devlog_info {
unsigned int nentries; /* number of entries in log[] */
unsigned int first; /* first [temporal] entry in log[] */
- struct fw_devlog_e log[0]; /* Firmware Device Log */
+ struct fw_devlog_e log[]; /* Firmware Device Log */
};
/* Dump a Firmaware Device Log entry.
@@ -3409,6 +3409,41 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.tls_pdu_rx));
seq_printf(seq, "TLS Keys (DDR) Count: %10u\n",
atomic_read(&adap->chcr_stats.tls_key));
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n");
+ seq_printf(seq, "Tx HW offload contexts added: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_ctx));
+ seq_printf(seq, "Tx connection created: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_connection_open));
+ seq_printf(seq, "Tx connection failed: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_connection_fail));
+ seq_printf(seq, "Tx connection closed: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_connection_close));
+ seq_printf(seq, "Packets passed for encryption : %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_encrypted_packets));
+ seq_printf(seq, "Bytes passed for encryption : %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_encrypted_bytes));
+ seq_printf(seq, "Tx records send: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_send_records));
+ seq_printf(seq, "Tx partial start of records: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_start_pkts));
+ seq_printf(seq, "Tx partial middle of records: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_middle_pkts));
+ seq_printf(seq, "Tx partial end of record: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_end_pkts));
+ seq_printf(seq, "Tx complete records: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_complete_pkts));
+ seq_printf(seq, "TX trim pkts : %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_trimmed_pkts));
+ seq_printf(seq, "Tx out of order packets: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_ooo));
+ seq_printf(seq, "Tx drop pkts before HW offload: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_skip_no_sync_data));
+ seq_printf(seq, "Tx drop not synced packets: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_drop_no_sync_data));
+ seq_printf(seq, "Tx drop bypass req: %20llu\n",
+ atomic64_read(&adap->chcr_stats.ktls_tx_drop_bypass_req));
+#endif
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
index ba95e13d52da..1471cf0deb58 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
@@ -49,7 +49,7 @@ struct seq_tab {
unsigned int rows; /* # of entries */
unsigned char width; /* size in bytes of each entry */
unsigned char skip_first; /* whether the first line is a header */
- char data[0]; /* the table data */
+ char data[]; /* the table data */
};
static inline unsigned int hex2val(char c)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index c837382ee522..9fd496732b2c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -106,6 +106,15 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
"db_empty ",
"write_coal_success ",
"write_coal_fail ",
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ "tx_tls_encrypted_packets",
+ "tx_tls_encrypted_bytes ",
+ "tx_tls_ctx ",
+ "tx_tls_ooo ",
+ "tx_tls_skip_no_sync_data",
+ "tx_tls_drop_no_sync_data",
+ "tx_tls_drop_bypass_req ",
+#endif
};
static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
@@ -170,15 +179,11 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
u32 exprom_vers;
strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->version, cxgb4_driver_version,
- sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
info->regdump_len = get_regs_len(dev);
- if (!adapter->params.fw_vers)
- strcpy(info->fw_version, "N/A");
- else
+ if (adapter->params.fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
@@ -236,6 +241,15 @@ struct adapter_stats {
u64 db_empty;
u64 wc_success;
u64 wc_fail;
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ u64 tx_tls_encrypted_packets;
+ u64 tx_tls_encrypted_bytes;
+ u64 tx_tls_ctx;
+ u64 tx_tls_ooo;
+ u64 tx_tls_skip_no_sync_data;
+ u64 tx_tls_drop_no_sync_data;
+ u64 tx_tls_drop_bypass_req;
+#endif
};
static void collect_sge_port_stats(const struct adapter *adap,
@@ -1580,6 +1594,10 @@ static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
}
static const struct ethtool_ops cxgb_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES |
+ ETHTOOL_COALESCE_TX_USECS_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_link_ksettings = get_link_ksettings,
.set_link_ksettings = set_link_ksettings,
.get_fecparam = get_fecparam,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index fc05248984fc..796555255207 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -438,13 +438,118 @@ int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx,
return get_filter_count(adapter, fidx, hitcnt, bytecnt, hash);
}
-int cxgb4_get_free_ftid(struct net_device *dev, int family)
+static bool cxgb4_filter_prio_in_range(struct tid_info *t, u32 idx, u8 nslots,
+ u32 prio)
+{
+ struct filter_entry *prev_tab, *next_tab, *prev_fe, *next_fe;
+ u32 prev_ftid, next_ftid;
+
+ /* Only insert the rule if both of the following conditions
+ * are met:
+ * 1. The immediate previous rule has priority <= @prio.
+ * 2. The immediate next rule has priority >= @prio.
+ */
+
+ /* High Priority (HPFILTER) region always has higher priority
+ * than normal FILTER region. So, all rules in HPFILTER region
+ * must have prio value <= rules in normal FILTER region.
+ */
+ if (idx < t->nhpftids) {
+ /* Don't insert if there's a rule already present at @idx
+ * in HPFILTER region.
+ */
+ if (test_bit(idx, t->hpftid_bmap))
+ return false;
+
+ next_tab = t->hpftid_tab;
+ next_ftid = find_next_bit(t->hpftid_bmap, t->nhpftids, idx);
+ if (next_ftid >= t->nhpftids) {
+ /* No next entry found in HPFILTER region.
+ * See if there's any next entry in normal
+ * FILTER region.
+ */
+ next_ftid = find_first_bit(t->ftid_bmap, t->nftids);
+ if (next_ftid >= t->nftids)
+ next_ftid = idx;
+ else
+ next_tab = t->ftid_tab;
+ }
+
+ /* Search for the closest previous filter entry in HPFILTER
+ * region. No need to search in normal FILTER region because
+ * there can never be any entry in normal FILTER region whose
+ * prio value is < last entry in HPFILTER region.
+ */
+ prev_ftid = find_last_bit(t->hpftid_bmap, idx);
+ if (prev_ftid >= idx)
+ prev_ftid = idx;
+
+ prev_tab = t->hpftid_tab;
+ } else {
+ idx -= t->nhpftids;
+
+ /* Don't insert if there's a rule already present at @idx
+ * in normal FILTER region.
+ */
+ if (test_bit(idx, t->ftid_bmap))
+ return false;
+
+ prev_tab = t->ftid_tab;
+ prev_ftid = find_last_bit(t->ftid_bmap, idx);
+ if (prev_ftid >= idx) {
+ /* No previous entry found in normal FILTER
+ * region. See if there's any previous entry
+ * in HPFILTER region.
+ */
+ prev_ftid = find_last_bit(t->hpftid_bmap, t->nhpftids);
+ if (prev_ftid >= t->nhpftids)
+ prev_ftid = idx;
+ else
+ prev_tab = t->hpftid_tab;
+ }
+
+ /* Search for the closest next filter entry in normal
+ * FILTER region. No need to search in HPFILTER region
+ * because there can never be any entry in HPFILTER
+ * region whose prio value is > first entry in normal
+ * FILTER region.
+ */
+ next_ftid = find_next_bit(t->ftid_bmap, t->nftids, idx);
+ if (next_ftid >= t->nftids)
+ next_ftid = idx;
+
+ next_tab = t->ftid_tab;
+ }
+
+ next_fe = &next_tab[next_ftid];
+
+ /* See if the filter entry belongs to an IPv6 rule, which
+ * occupy 4 slots on T5 and 2 slots on T6. Adjust the
+ * reference to the previously inserted filter entry
+ * accordingly.
+ */
+ prev_fe = &prev_tab[prev_ftid & ~(nslots - 1)];
+ if (!prev_fe->fs.type)
+ prev_fe = &prev_tab[prev_ftid];
+
+ if ((prev_fe->valid && prev_fe->fs.tc_prio > prio) ||
+ (next_fe->valid && next_fe->fs.tc_prio < prio))
+ return false;
+
+ return true;
+}
+
+int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en,
+ u32 tc_prio)
{
struct adapter *adap = netdev2adap(dev);
struct tid_info *t = &adap->tids;
+ u32 bmap_ftid, max_ftid;
+ struct filter_entry *f;
+ unsigned long *bmap;
bool found = false;
- u8 i, n, cnt;
- int ftid;
+ u8 i, cnt, n;
+ int ftid = 0;
/* IPv4 occupy 1 slot. IPv6 occupy 2 slots on T6 and 4 slots
* on T5.
@@ -456,34 +561,127 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family)
n += 2;
}
- if (n > t->nftids)
- return -ENOMEM;
-
- /* Find free filter slots from the end of TCAM. Appropriate
- * checks must be done by caller later to ensure the prio
- * passed by TC doesn't conflict with prio saved by existing
- * rules in the TCAM.
+ /* There are 3 filter regions available in hardware in
+ * following order of priority:
+ *
+ * 1. High Priority (HPFILTER) region (Highest Priority).
+ * 2. HASH region.
+ * 3. Normal FILTER region (Lowest Priority).
+ *
+ * Entries in HPFILTER and normal FILTER region have index
+ * 0 as the highest priority and the rules will be scanned
+ * in ascending order until either a rule hits or end of
+ * the region is reached.
+ *
+ * All HASH region entries have same priority. The set of
+ * fields to match in headers are pre-determined. The same
+ * set of header match fields must be compulsorily specified
+ * in all the rules wanting to get inserted in HASH region.
+ * Hence, HASH region is an exact-match region. A HASH is
+ * generated for a rule based on the values in the
+ * pre-determined set of header match fields. The generated
+ * HASH serves as an index into the HASH region. There can
+ * never be 2 rules having the same HASH. Hardware will
+ * compute a HASH for every incoming packet based on the
+ * values in the pre-determined set of header match fields
+ * and uses it as an index to check if there's a rule
+ * inserted in the HASH region at the specified index. If
+ * there's a rule inserted, then it's considered as a filter
+ * hit. Otherwise, it's a filter miss and normal FILTER region
+ * is scanned afterwards.
*/
+
spin_lock_bh(&t->ftid_lock);
- ftid = t->nftids - 1;
- while (ftid >= n - 1) {
+
+ ftid = (tc_prio <= t->nhpftids) ? 0 : t->nhpftids;
+ max_ftid = t->nftids + t->nhpftids;
+ while (ftid < max_ftid) {
+ if (ftid < t->nhpftids) {
+ /* If the new rule wants to get inserted into
+ * HPFILTER region, but its prio is greater
+ * than the rule with the highest prio in HASH
+ * region, then reject the rule.
+ */
+ if (t->tc_hash_tids_max_prio &&
+ tc_prio > t->tc_hash_tids_max_prio)
+ break;
+
+ /* If there's not enough slots available
+ * in HPFILTER region, then move on to
+ * normal FILTER region immediately.
+ */
+ if (ftid + n > t->nhpftids) {
+ ftid = t->nhpftids;
+ continue;
+ }
+
+ bmap = t->hpftid_bmap;
+ bmap_ftid = ftid;
+ } else if (hash_en) {
+ /* Ensure priority is >= last rule in HPFILTER
+ * region.
+ */
+ ftid = find_last_bit(t->hpftid_bmap, t->nhpftids);
+ if (ftid < t->nhpftids) {
+ f = &t->hpftid_tab[ftid];
+ if (f->valid && tc_prio < f->fs.tc_prio)
+ break;
+ }
+
+ /* Ensure priority is <= first rule in normal
+ * FILTER region.
+ */
+ ftid = find_first_bit(t->ftid_bmap, t->nftids);
+ if (ftid < t->nftids) {
+ f = &t->ftid_tab[ftid];
+ if (f->valid && tc_prio > f->fs.tc_prio)
+ break;
+ }
+
+ found = true;
+ ftid = t->nhpftids;
+ goto out_unlock;
+ } else {
+ /* If the new rule wants to get inserted into
+ * normal FILTER region, but its prio is less
+ * than the rule with the highest prio in HASH
+ * region, then reject the rule.
+ */
+ if (t->tc_hash_tids_max_prio &&
+ tc_prio < t->tc_hash_tids_max_prio)
+ break;
+
+ if (ftid + n > max_ftid)
+ break;
+
+ bmap = t->ftid_bmap;
+ bmap_ftid = ftid - t->nhpftids;
+ }
+
cnt = 0;
for (i = 0; i < n; i++) {
- if (test_bit(ftid - i, t->ftid_bmap))
+ if (test_bit(bmap_ftid + i, bmap))
break;
cnt++;
}
+
if (cnt == n) {
- ftid &= ~(n - 1);
- found = true;
- break;
+ /* Ensure the new rule's prio doesn't conflict
+ * with existing rules.
+ */
+ if (cxgb4_filter_prio_in_range(t, ftid, n,
+ tc_prio)) {
+ ftid &= ~(n - 1);
+ found = true;
+ break;
+ }
}
- ftid -= n;
+ ftid += n;
}
- spin_unlock_bh(&t->ftid_lock);
- ftid += t->nhpftids;
+out_unlock:
+ spin_unlock_bh(&t->ftid_lock);
return found ? ftid : -ENOMEM;
}
@@ -555,73 +753,6 @@ static void cxgb4_clear_hpftid(struct tid_info *t, int fidx, int family)
spin_unlock_bh(&t->ftid_lock);
}
-bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio)
-{
- struct filter_entry *prev_fe, *next_fe, *tab;
- struct adapter *adap = netdev2adap(dev);
- u32 prev_ftid, next_ftid, max_tid;
- struct tid_info *t = &adap->tids;
- unsigned long *bmap;
- bool valid = true;
-
- if (idx < t->nhpftids) {
- bmap = t->hpftid_bmap;
- tab = t->hpftid_tab;
- max_tid = t->nhpftids;
- } else {
- idx -= t->nhpftids;
- bmap = t->ftid_bmap;
- tab = t->ftid_tab;
- max_tid = t->nftids;
- }
-
- /* Only insert the rule if both of the following conditions
- * are met:
- * 1. The immediate previous rule has priority <= @prio.
- * 2. The immediate next rule has priority >= @prio.
- */
- spin_lock_bh(&t->ftid_lock);
-
- /* Don't insert if there's a rule already present at @idx. */
- if (test_bit(idx, bmap)) {
- valid = false;
- goto out_unlock;
- }
-
- next_ftid = find_next_bit(bmap, max_tid, idx);
- if (next_ftid >= max_tid)
- next_ftid = idx;
-
- next_fe = &tab[next_ftid];
-
- prev_ftid = find_last_bit(bmap, idx);
- if (prev_ftid >= idx)
- prev_ftid = idx;
-
- /* See if the filter entry belongs to an IPv6 rule, which
- * occupy 4 slots on T5 and 2 slots on T6. Adjust the
- * reference to the previously inserted filter entry
- * accordingly.
- */
- if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6) {
- prev_fe = &tab[prev_ftid & ~0x3];
- if (!prev_fe->fs.type)
- prev_fe = &tab[prev_ftid];
- } else {
- prev_fe = &tab[prev_ftid & ~0x1];
- if (!prev_fe->fs.type)
- prev_fe = &tab[prev_ftid];
- }
-
- if ((prev_fe->valid && prio < prev_fe->fs.tc_prio) ||
- (next_fe->valid && prio > next_fe->fs.tc_prio))
- valid = false;
-
-out_unlock:
- spin_unlock_bh(&t->ftid_lock);
- return valid;
-}
-
/* Delete the filter at a specified index. */
static int del_filter_wr(struct adapter *adapter, int fidx)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
index b3e4a645043d..b0751c0611ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
@@ -53,5 +53,4 @@ void clear_all_filters(struct adapter *adapter);
void init_hash_filter(struct adapter *adap);
bool is_filter_exact_match(struct adapter *adap,
struct ch_filter_specification *fs);
-bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio);
#endif /* __CXGB4_FILTER_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 97f90edbc068..75fde0d4d493 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -90,11 +90,6 @@
char cxgb4_driver_name[] = KBUILD_MODNAME;
-#ifdef DRV_VERSION
-#undef DRV_VERSION
-#endif
-#define DRV_VERSION "2.0.0-ko"
-const char cxgb4_driver_version[] = DRV_VERSION;
#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
@@ -137,7 +132,6 @@ const char cxgb4_driver_version[] = DRV_VERSION;
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
MODULE_FIRMWARE(FW4_FNAME);
MODULE_FIRMWARE(FW5_FNAME);
@@ -3626,8 +3620,6 @@ static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
struct adapter *adapter = netdev2adap(dev);
strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->version, cxgb4_driver_version,
- sizeof(info->version));
strlcpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -6086,8 +6078,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int i, err;
u32 whoami;
- printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
-
err = pci_request_regions(pdev, KBUILD_MODNAME);
if (err) {
/* Just info, some other driver may have claimed the device. */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index bb5513bdd293..4a5fa9eba0b6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -544,7 +544,8 @@ static bool valid_pedit_action(struct net_device *dev,
}
int cxgb4_validate_flow_actions(struct net_device *dev,
- struct flow_action *actions)
+ struct flow_action *actions,
+ struct netlink_ext_ack *extack)
{
struct flow_action_entry *act;
bool act_redir = false;
@@ -552,6 +553,9 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
bool act_vlan = false;
int i;
+ if (!flow_action_basic_hw_stats_check(actions, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, actions) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
@@ -631,6 +635,64 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
return 0;
}
+static void cxgb4_tc_flower_hash_prio_add(struct adapter *adap, u32 tc_prio)
+{
+ spin_lock_bh(&adap->tids.ftid_lock);
+ if (adap->tids.tc_hash_tids_max_prio < tc_prio)
+ adap->tids.tc_hash_tids_max_prio = tc_prio;
+ spin_unlock_bh(&adap->tids.ftid_lock);
+}
+
+static void cxgb4_tc_flower_hash_prio_del(struct adapter *adap, u32 tc_prio)
+{
+ struct tid_info *t = &adap->tids;
+ struct ch_tc_flower_entry *fe;
+ struct rhashtable_iter iter;
+ u32 found = 0;
+
+ spin_lock_bh(&t->ftid_lock);
+ /* Bail if the current rule is not the one with the max
+ * prio.
+ */
+ if (t->tc_hash_tids_max_prio != tc_prio)
+ goto out_unlock;
+
+ /* Search for the next rule having the same or next lower
+ * max prio.
+ */
+ rhashtable_walk_enter(&adap->flower_tbl, &iter);
+ do {
+ rhashtable_walk_start(&iter);
+
+ fe = rhashtable_walk_next(&iter);
+ while (!IS_ERR_OR_NULL(fe)) {
+ if (fe->fs.hash &&
+ fe->fs.tc_prio <= t->tc_hash_tids_max_prio) {
+ t->tc_hash_tids_max_prio = fe->fs.tc_prio;
+ found++;
+
+ /* Bail if we found another rule
+ * having the same prio as the
+ * current max one.
+ */
+ if (fe->fs.tc_prio == tc_prio)
+ break;
+ }
+
+ fe = rhashtable_walk_next(&iter);
+ }
+
+ rhashtable_walk_stop(&iter);
+ } while (fe == ERR_PTR(-EAGAIN));
+ rhashtable_walk_exit(&iter);
+
+ if (!found)
+ t->tc_hash_tids_max_prio = 0;
+
+out_unlock:
+ spin_unlock_bh(&t->ftid_lock);
+}
+
int cxgb4_tc_flower_replace(struct net_device *dev,
struct flow_cls_offload *cls)
{
@@ -640,9 +702,10 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
struct ch_tc_flower_entry *ch_flower;
struct ch_filter_specification *fs;
struct filter_ctx ctx;
+ u8 inet_family;
int fidx, ret;
- if (cxgb4_validate_flow_actions(dev, &rule->action))
+ if (cxgb4_validate_flow_actions(dev, &rule->action, extack))
return -EOPNOTSUPP;
if (cxgb4_validate_flow_match(dev, cls))
@@ -660,39 +723,32 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
cxgb4_process_flow_actions(dev, &rule->action, fs);
fs->hash = is_filter_exact_match(adap, fs);
- if (fs->hash) {
- fidx = 0;
- } else {
- u8 inet_family;
-
- inet_family = fs->type ? PF_INET6 : PF_INET;
+ inet_family = fs->type ? PF_INET6 : PF_INET;
- /* Note that TC uses prio 0 to indicate stack to
- * generate automatic prio and hence doesn't pass prio
- * 0 to driver. However, the hardware TCAM index
- * starts from 0. Hence, the -1 here.
- */
- if (cls->common.prio <= (adap->tids.nftids +
- adap->tids.nhpftids)) {
- fidx = cls->common.prio - 1;
- if (fidx < adap->tids.nhpftids)
- fs->prio = 1;
- } else {
- fidx = cxgb4_get_free_ftid(dev, inet_family);
- }
+ /* Get a free filter entry TID, where we can insert this new
+ * rule. Only insert rule if its prio doesn't conflict with
+ * existing rules.
+ */
+ fidx = cxgb4_get_free_ftid(dev, inet_family, fs->hash,
+ cls->common.prio);
+ if (fidx < 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "No free LETCAM index available");
+ ret = -ENOMEM;
+ goto free_entry;
+ }
- /* Only insert FLOWER rule if its priority doesn't
- * conflict with existing rules in the LETCAM.
- */
- if (fidx < 0 ||
- !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) {
- NL_SET_ERR_MSG_MOD(extack,
- "No free LETCAM index available");
- ret = -ENOMEM;
- goto free_entry;
- }
+ if (fidx < adap->tids.nhpftids) {
+ fs->prio = 1;
+ fs->hash = 0;
}
+ /* If the rule can be inserted into HASH region, then ignore
+ * the index to normal FILTER region.
+ */
+ if (fs->hash)
+ fidx = 0;
+
fs->tc_prio = cls->common.prio;
fs->tc_cookie = cls->cookie;
@@ -723,6 +779,9 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
if (ret)
goto del_filter;
+ if (fs->hash)
+ cxgb4_tc_flower_hash_prio_add(adap, cls->common.prio);
+
return 0;
del_filter:
@@ -738,12 +797,17 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
{
struct adapter *adap = netdev2adap(dev);
struct ch_tc_flower_entry *ch_flower;
+ u32 tc_prio;
+ bool hash;
int ret;
ch_flower = ch_flower_lookup(adap, cls->cookie);
if (!ch_flower)
return -ENOENT;
+ hash = ch_flower->fs.hash;
+ tc_prio = ch_flower->fs.tc_prio;
+
ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
if (ret)
goto err;
@@ -756,6 +820,9 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
}
kfree_rcu(ch_flower, rcu);
+ if (hash)
+ cxgb4_tc_flower_hash_prio_del(adap, tc_prio);
+
err:
return ret;
}
@@ -836,7 +903,8 @@ int cxgb4_tc_flower_stats(struct net_device *dev,
ofld_stats->last_used = jiffies;
flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
packets - ofld_stats->packet_count,
- ofld_stats->last_used);
+ ofld_stats->last_used,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
ofld_stats->packet_count = packets;
ofld_stats->byte_count = bytes;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
index e132516e9868..0a30c96b81ff 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
@@ -112,7 +112,8 @@ void cxgb4_process_flow_actions(struct net_device *in,
struct flow_action *actions,
struct ch_filter_specification *fs);
int cxgb4_validate_flow_actions(struct net_device *dev,
- struct flow_action *actions);
+ struct flow_action *actions,
+ struct netlink_ext_ack *extack);
int cxgb4_tc_flower_replace(struct net_device *dev,
struct flow_cls_offload *cls);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index 1b7681a4eb32..c88c47a14fbb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -198,22 +198,14 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
struct ch_filter_specification *fs;
int ret, fidx;
- /* Note that TC uses prio 0 to indicate stack to generate
- * automatic prio and hence doesn't pass prio 0 to driver.
- * However, the hardware TCAM index starts from 0. Hence, the
- * -1 here. 1 slot is enough to create a wildcard matchall
- * VIID rule.
+ /* Get a free filter entry TID, where we can insert this new
+ * rule. Only insert rule if its prio doesn't conflict with
+ * existing rules.
+ *
+ * 1 slot is enough to create a wildcard matchall VIID rule.
*/
- if (cls->common.prio <= (adap->tids.nftids + adap->tids.nhpftids))
- fidx = cls->common.prio - 1;
- else
- fidx = cxgb4_get_free_ftid(dev, PF_INET);
-
- /* Only insert MATCHALL rule if its priority doesn't conflict
- * with existing rules in the LETCAM.
- */
- if (fidx < 0 ||
- !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) {
+ fidx = cxgb4_get_free_ftid(dev, PF_INET, false, cls->common.prio);
+ if (fidx < 0) {
NL_SET_ERR_MSG_MOD(extack,
"No free LETCAM index available");
return -ENOMEM;
@@ -286,7 +278,8 @@ int cxgb4_tc_matchall_replace(struct net_device *dev,
}
ret = cxgb4_validate_flow_actions(dev,
- &cls_matchall->rule->action);
+ &cls_matchall->rule->action,
+ extack);
if (ret)
return ret;
@@ -353,7 +346,8 @@ int cxgb4_tc_matchall_stats(struct net_device *dev,
flow_stats_update(&cls_matchall->stats,
bytes - tc_port_matchall->ingress.bytes,
packets - tc_port_matchall->ingress.packets,
- tc_port_matchall->ingress.last_used);
+ tc_port_matchall->ingress.last_used,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
tc_port_matchall->ingress.packets = packets;
tc_port_matchall->ingress.bytes = bytes;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 269b8d9e25e0..3f3c11e54d97 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -155,9 +155,10 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
struct ch_filter_specification fs;
struct cxgb4_tc_u32_table *t;
struct cxgb4_link *link;
- unsigned int filter_id;
u32 uhtid, link_uhtid;
bool is_ipv6 = false;
+ u8 inet_family;
+ int filter_id;
int ret;
if (!can_tc_u32_offload(dev))
@@ -166,18 +167,15 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
return -EOPNOTSUPP;
- /* Note that TC uses prio 0 to indicate stack to generate
- * automatic prio and hence doesn't pass prio 0 to driver.
- * However, the hardware TCAM index starts from 0. Hence, the
- * -1 here.
- */
- filter_id = TC_U32_NODE(cls->knode.handle) - 1;
+ inet_family = (protocol == htons(ETH_P_IPV6)) ? PF_INET6 : PF_INET;
- /* Only insert U32 rule if its priority doesn't conflict with
- * existing rules in the LETCAM.
+ /* Get a free filter entry TID, where we can insert this new
+ * rule. Only insert rule if its prio doesn't conflict with
+ * existing rules.
*/
- if (filter_id >= adapter->tids.nftids + adapter->tids.nhpftids ||
- !cxgb4_filter_prio_in_range(dev, filter_id, cls->common.prio)) {
+ filter_id = cxgb4_get_free_ftid(dev, inet_family, false,
+ TC_U32_NODE(cls->knode.handle));
+ if (filter_id < 0) {
NL_SET_ERR_MSG_MOD(extack,
"No free LETCAM index available");
return -ENOMEM;
@@ -358,23 +356,65 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
struct cxgb4_link *link = NULL;
struct cxgb4_tc_u32_table *t;
struct filter_entry *f;
+ bool found = false;
u32 handle, uhtid;
+ u8 nslots;
int ret;
if (!can_tc_u32_offload(dev))
return -EOPNOTSUPP;
/* Fetch the location to delete the filter. */
- filter_id = TC_U32_NODE(cls->knode.handle) - 1;
- if (filter_id >= adapter->tids.nftids + adapter->tids.nhpftids)
- return -ERANGE;
+ max_tids = adapter->tids.nhpftids + adapter->tids.nftids;
+
+ spin_lock_bh(&adapter->tids.ftid_lock);
+ filter_id = 0;
+ while (filter_id < max_tids) {
+ if (filter_id < adapter->tids.nhpftids) {
+ i = filter_id;
+ f = &adapter->tids.hpftid_tab[i];
+ if (f->valid && f->fs.tc_cookie == cls->knode.handle) {
+ found = true;
+ break;
+ }
- if (filter_id < adapter->tids.nhpftids)
- f = &adapter->tids.hpftid_tab[filter_id];
- else
- f = &adapter->tids.ftid_tab[filter_id - adapter->tids.nhpftids];
+ i = find_next_bit(adapter->tids.hpftid_bmap,
+ adapter->tids.nhpftids, i + 1);
+ if (i >= adapter->tids.nhpftids) {
+ filter_id = adapter->tids.nhpftids;
+ continue;
+ }
+
+ filter_id = i;
+ } else {
+ i = filter_id - adapter->tids.nhpftids;
+ f = &adapter->tids.ftid_tab[i];
+ if (f->valid && f->fs.tc_cookie == cls->knode.handle) {
+ found = true;
+ break;
+ }
+
+ i = find_next_bit(adapter->tids.ftid_bmap,
+ adapter->tids.nftids, i + 1);
+ if (i >= adapter->tids.nftids)
+ break;
+
+ filter_id = i + adapter->tids.nhpftids;
+ }
+
+ nslots = 0;
+ if (f->fs.type) {
+ nslots++;
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <
+ CHELSIO_T6)
+ nslots += 2;
+ }
+
+ filter_id += nslots;
+ }
+ spin_unlock_bh(&adapter->tids.ftid_lock);
- if (cls->knode.handle != f->fs.tc_cookie)
+ if (!found)
return -ERANGE;
t = adapter->tc_u32;
@@ -407,7 +447,6 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
/* If a link is being deleted, then delete all filters
* associated with the link.
*/
- max_tids = adapter->tids.nftids;
for (i = 0; i < t->size; i++) {
link = &t->table[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
index a4b99edcc339..125868c6770a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
@@ -289,6 +289,6 @@ struct cxgb4_link {
struct cxgb4_tc_u32_table {
unsigned int size; /* number of entries in table */
- struct cxgb4_link table[0]; /* Jump table */
+ struct cxgb4_link table[]; /* Jump table */
};
#endif /* __CXGB4_TC_U32_PARSE_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index cce33d279094..e65b52375dd8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -662,6 +662,25 @@ static int uld_attach(struct adapter *adap, unsigned int uld)
return 0;
}
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+/* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings.
+ * @adap: adapter info
+ * @enable: 1 to enable / 0 to disable ktls settings.
+ */
+static void cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
+{
+ u32 params = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_TX_HW) |
+ FW_PARAMS_PARAM_Y_V(enable));
+ int ret = 0;
+
+ ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &params, &params);
+ /* if fw returns failure, clear the ktls flag */
+ if (ret)
+ adap->params.crypto &= ~ULP_CRYPTO_KTLS_INLINE;
+}
+#endif
+
/* cxgb4_register_uld - register an upper-layer driver
* @type: the ULD type
* @p: the ULD methods
@@ -698,6 +717,12 @@ void cxgb4_register_uld(enum cxgb4_uld type,
}
if (adap->flags & CXGB4_FULL_INIT_DONE)
enable_rx_uld(adap, type);
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ /* send mbox to enable ktls related settings. */
+ if (type == CXGB4_ULD_CRYPTO &&
+ (adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW))
+ cxgb4_set_ktls_feature(adap, 1);
+#endif
if (adap->uld[type].add)
goto free_irq;
ret = setup_sge_txq_uld(adap, type, p);
@@ -750,6 +775,13 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
continue;
cxgb4_shutdown_uld_adapter(adap, type);
+
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ /* send mbox to disable ktls related settings. */
+ if (type == CXGB4_ULD_CRYPTO &&
+ (adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW))
+ cxgb4_set_ktls_feature(adap, 0);
+#endif
}
mutex_unlock(&uld_mutex);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index d9d27bc1ae67..be831317520a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -149,6 +149,8 @@ struct tid_info {
atomic_t conns_in_use;
/* lock for setting/clearing filter bitmap */
spinlock_t ftid_lock;
+
+ unsigned int tc_hash_tids_max_prio;
};
static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
@@ -263,7 +265,8 @@ struct filter_ctx {
struct ch_filter_specification;
-int cxgb4_get_free_ftid(struct net_device *dev, int family);
+int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en,
+ u32 tc_prio);
int __cxgb4_set_filter(struct net_device *dev, int filter_id,
struct ch_filter_specification *fs,
struct filter_ctx *ctx);
@@ -357,6 +360,26 @@ struct chcr_stats_debug {
atomic_t tls_pdu_tx;
atomic_t tls_pdu_rx;
atomic_t tls_key;
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ atomic64_t ktls_tx_connection_open;
+ atomic64_t ktls_tx_connection_fail;
+ atomic64_t ktls_tx_connection_close;
+ atomic64_t ktls_tx_send_records;
+ atomic64_t ktls_tx_end_pkts;
+ atomic64_t ktls_tx_start_pkts;
+ atomic64_t ktls_tx_middle_pkts;
+ atomic64_t ktls_tx_retransmit_pkts;
+ atomic64_t ktls_tx_complete_pkts;
+ atomic64_t ktls_tx_trimmed_pkts;
+ atomic64_t ktls_tx_encrypted_packets;
+ atomic64_t ktls_tx_encrypted_bytes;
+ atomic64_t ktls_tx_ctx;
+ atomic64_t ktls_tx_ooo;
+ atomic64_t ktls_tx_skip_no_sync_data;
+ atomic64_t ktls_tx_drop_no_sync_data;
+ atomic64_t ktls_tx_drop_bypass_req;
+
+#endif
};
#define OCQ_WIN_OFFSET(pdev, vres) \
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 1a16449e9deb..72b37a66c7d8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -59,7 +59,7 @@ struct l2t_data {
rwlock_t lock;
atomic_t nfree; /* number of free entries */
struct l2t_entry *rover; /* starting point for next allocation */
- struct l2t_entry l2tab[0]; /* MUST BE LAST */
+ struct l2t_entry l2tab[]; /* MUST BE LAST */
};
static inline unsigned int vlan_prio(const struct l2t_entry *e)
@@ -700,6 +700,17 @@ static char l2e_state(const struct l2t_entry *e)
}
}
+bool cxgb4_check_l2t_valid(struct l2t_entry *e)
+{
+ bool valid;
+
+ spin_lock(&e->lock);
+ valid = (e->state == L2T_STATE_VALID);
+ spin_unlock(&e->lock);
+ return valid;
+}
+EXPORT_SYMBOL(cxgb4_check_l2t_valid);
+
static int l2t_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 79665bd8f881..340fecb28a13 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -122,6 +122,7 @@ struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
u8 port, u8 *dmac);
struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end);
void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
+bool cxgb4_check_l2t_valid(struct l2t_entry *e);
extern const struct file_operations t4_l2t_fops;
#endif /* __CXGB4_L2T_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index 5cc74a5a1774..5f8b871d79af 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -82,7 +82,7 @@ struct sched_class {
struct sched_table { /* per port scheduling table */
u8 sched_size;
- struct sched_class tab[0];
+ struct sched_class tab[];
};
static inline bool can_sched(struct net_device *dev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index cab3d17e0e1a..f5dd34db4b54 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1418,6 +1418,11 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
#endif /* CHELSIO_IPSEC_INLINE */
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
+ if (skb->decrypted)
+ return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
+#endif /* CHELSIO_TLS_DEVICE */
+
qidx = skb_get_queue_mapping(skb);
if (ptp_enabled) {
spin_lock(&adap->ptp_lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.h b/drivers/net/ethernet/chelsio/cxgb4/smt.h
index 1268d6e93a47..541249d78914 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.h
@@ -66,7 +66,7 @@ struct smt_entry {
struct smt_data {
unsigned int smt_size;
rwlock_t lock;
- struct smt_entry smtab[0];
+ struct smt_entry smtab[];
};
struct smt_data *t4_init_smt(void);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 844fdcf55118..239f678a94ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -1379,8 +1379,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x9608, 0x9638,
0x9640, 0x96f4,
0x9800, 0x9808,
- 0x9820, 0x983c,
- 0x9850, 0x9864,
+ 0x9810, 0x9864,
0x9c00, 0x9c6c,
0x9c80, 0x9cec,
0x9d00, 0x9d6c,
@@ -1389,7 +1388,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x9e80, 0x9eec,
0x9f00, 0x9f6c,
0x9f80, 0xa020,
- 0xd004, 0xd004,
+ 0xd000, 0xd004,
0xd010, 0xd03c,
0xdfc0, 0xdfe0,
0xe000, 0x1106c,
@@ -1430,10 +1429,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1a0b0, 0x1a0e4,
0x1a0ec, 0x1a0f8,
0x1a100, 0x1a108,
- 0x1a114, 0x1a120,
- 0x1a128, 0x1a130,
- 0x1a138, 0x1a138,
- 0x1a190, 0x1a1c4,
+ 0x1a114, 0x1a130,
+ 0x1a138, 0x1a1c4,
0x1a1fc, 0x1a1fc,
0x1e008, 0x1e00c,
0x1e040, 0x1e044,
@@ -2162,8 +2159,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x9640, 0x9704,
0x9710, 0x971c,
0x9800, 0x9808,
- 0x9820, 0x983c,
- 0x9850, 0x9864,
+ 0x9810, 0x9864,
0x9c00, 0x9c6c,
0x9c80, 0x9cec,
0x9d00, 0x9d6c,
@@ -2172,7 +2168,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x9e80, 0x9eec,
0x9f00, 0x9f6c,
0x9f80, 0xa020,
- 0xd004, 0xd03c,
+ 0xd000, 0xd03c,
0xd100, 0xd118,
0xd200, 0xd214,
0xd220, 0xd234,
@@ -2240,10 +2236,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1a0b0, 0x1a0e4,
0x1a0ec, 0x1a0f8,
0x1a100, 0x1a108,
- 0x1a114, 0x1a120,
- 0x1a128, 0x1a130,
- 0x1a138, 0x1a138,
- 0x1a190, 0x1a1c4,
+ 0x1a114, 0x1a130,
+ 0x1a138, 0x1a1c4,
0x1a1fc, 0x1a1fc,
0x1e008, 0x1e00c,
0x1e040, 0x1e044,
@@ -4480,7 +4474,7 @@ static void tp_intr_handler(struct adapter *adapter)
*/
static void sge_intr_handler(struct adapter *adapter)
{
- u64 v;
+ u32 v = 0, perr;
u32 err;
static const struct intr_info sge_intr_info[] = {
@@ -4515,13 +4509,29 @@ static void sge_intr_handler(struct adapter *adapter)
{ 0 }
};
- v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
- ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
- if (v) {
- dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
- (unsigned long long)v);
- t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
- t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
+ perr = t4_read_reg(adapter, SGE_INT_CAUSE1_A);
+ if (perr) {
+ v |= perr;
+ dev_alert(adapter->pdev_dev, "SGE Cause1 Parity Error %#x\n",
+ perr);
+ }
+
+ perr = t4_read_reg(adapter, SGE_INT_CAUSE2_A);
+ if (perr) {
+ v |= perr;
+ dev_alert(adapter->pdev_dev, "SGE Cause2 Parity Error %#x\n",
+ perr);
+ }
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) >= CHELSIO_T5) {
+ perr = t4_read_reg(adapter, SGE_INT_CAUSE5_A);
+ /* Parity error (CRC) for err_T_RxCRC is trivial, ignore it */
+ perr &= ~ERR_T_RXCRC_F;
+ if (perr) {
+ v |= perr;
+ dev_alert(adapter->pdev_dev,
+ "SGE Cause5 Parity Error %#x\n", perr);
+ }
}
v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 575c6abcdae7..fed5f93bf620 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -47,6 +47,7 @@ enum {
CPL_CLOSE_LISTSRV_REQ = 0x9,
CPL_ABORT_REQ = 0xA,
CPL_ABORT_RPL = 0xB,
+ CPL_TX_DATA = 0xC,
CPL_RX_DATA_ACK = 0xD,
CPL_TX_PKT = 0xE,
CPL_L2T_WRITE_REQ = 0x12,
@@ -705,6 +706,14 @@ struct cpl_set_tcb_field {
__be64 val;
};
+struct cpl_set_tcb_field_core {
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+ __be16 word_cookie;
+ __be64 mask;
+ __be64 val;
+};
+
/* cpl_set_tcb_field.word_cookie fields */
#define TCB_WORD_S 0
#define TCB_WORD_V(x) ((x) << TCB_WORD_S)
@@ -1462,6 +1471,16 @@ struct cpl_tx_data {
#define TX_FORCE_S 13
#define TX_FORCE_V(x) ((x) << TX_FORCE_S)
+#define TX_DATA_MSS_S 16
+#define TX_DATA_MSS_M 0xFFFF
+#define TX_DATA_MSS_V(x) ((x) << TX_DATA_MSS_S)
+#define TX_DATA_MSS_G(x) (((x) >> TX_DATA_MSS_S) & TX_DATA_MSS_M)
+
+#define TX_LENGTH_S 0
+#define TX_LENGTH_M 0xFFFF
+#define TX_LENGTH_V(x) ((x) << TX_LENGTH_S)
+#define TX_LENGTH_G(x) (((x) >> TX_LENGTH_S) & TX_LENGTH_M)
+
#define T6_TX_FORCE_S 20
#define T6_TX_FORCE_V(x) ((x) << T6_TX_FORCE_S)
#define T6_TX_FORCE_F T6_TX_FORCE_V(1U)
@@ -1471,6 +1490,15 @@ struct cpl_tx_data {
#define TX_SHOVE_S 14
#define TX_SHOVE_V(x) ((x) << TX_SHOVE_S)
+#define TX_SHOVE_F TX_SHOVE_V(1U)
+
+#define TX_BYPASS_S 21
+#define TX_BYPASS_V(x) ((x) << TX_BYPASS_S)
+#define TX_BYPASS_F TX_BYPASS_V(1U)
+
+#define TX_PUSH_S 22
+#define TX_PUSH_V(x) ((x) << TX_PUSH_S)
+#define TX_PUSH_F TX_PUSH_V(1U)
#define TX_ULP_MODE_S 10
#define TX_ULP_MODE_M 0x7
@@ -1511,7 +1539,7 @@ struct ulptx_sgl {
__be32 cmd_nsge;
__be32 len0;
__be64 addr0;
- struct ulptx_sge_pair sge[0];
+ struct ulptx_sge_pair sge[];
};
struct ulptx_idata {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a957a6e4d4c4..bb20e50ddb84 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -487,6 +487,12 @@
#define ERROR_QID_M 0x1ffffU
#define ERROR_QID_G(x) (((x) >> ERROR_QID_S) & ERROR_QID_M)
+#define SGE_INT_CAUSE5_A 0x110c
+
+#define ERR_T_RXCRC_S 31
+#define ERR_T_RXCRC_V(x) ((x) << ERR_T_RXCRC_S)
+#define ERR_T_RXCRC_F ERR_T_RXCRC_V(1U)
+
#define HP_INT_THRESH_S 28
#define HP_INT_THRESH_M 0xfU
#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
index 1b9afb192f7f..50232e063f49 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
@@ -35,6 +35,11 @@
#ifndef __T4_TCB_H
#define __T4_TCB_H
+#define TCB_L2T_IX_W 0
+#define TCB_L2T_IX_S 12
+#define TCB_L2T_IX_M 0xfffULL
+#define TCB_L2T_IX_V(x) ((x) << TCB_L2T_IX_S)
+
#define TCB_SMAC_SEL_W 0
#define TCB_SMAC_SEL_S 24
#define TCB_SMAC_SEL_M 0xffULL
@@ -45,11 +50,6 @@
#define TCB_T_FLAGS_M 0xffffffffffffffffULL
#define TCB_T_FLAGS_V(x) ((__u64)(x) << TCB_T_FLAGS_S)
-#define TCB_RQ_START_W 30
-#define TCB_RQ_START_S 0
-#define TCB_RQ_START_M 0x3ffffffULL
-#define TCB_RQ_START_V(x) ((x) << TCB_RQ_START_S)
-
#define TF_CCTRL_ECE_S 60
#define TF_CCTRL_CWR_S 61
#define TF_CCTRL_RFR_S 62
@@ -59,6 +59,11 @@
#define TCB_RSS_INFO_M 0x3ffULL
#define TCB_RSS_INFO_V(x) ((x) << TCB_RSS_INFO_S)
+#define TCB_T_STATE_W 3
+#define TCB_T_STATE_S 16
+#define TCB_T_STATE_M 0xfULL
+#define TCB_T_STATE_V(x) ((x) << TCB_T_STATE_S)
+
#define TCB_TIMESTAMP_W 5
#define TCB_TIMESTAMP_S 0
#define TCB_TIMESTAMP_M 0xffffffffULL
@@ -69,13 +74,60 @@
#define TCB_RTT_TS_RECENT_AGE_M 0xffffffffULL
#define TCB_RTT_TS_RECENT_AGE_V(x) ((x) << TCB_RTT_TS_RECENT_AGE_S)
+#define TCB_T_RTSEQ_RECENT_W 7
+#define TCB_T_RTSEQ_RECENT_S 0
+#define TCB_T_RTSEQ_RECENT_M 0xffffffffULL
+#define TCB_T_RTSEQ_RECENT_V(x) ((x) << TCB_T_RTSEQ_RECENT_S)
+
+#define TCB_TX_MAX_W 9
+#define TCB_TX_MAX_S 0
+#define TCB_TX_MAX_M 0xffffffffULL
+#define TCB_TX_MAX_V(x) ((x) << TCB_TX_MAX_S)
+
#define TCB_SND_UNA_RAW_W 10
+#define TCB_SND_UNA_RAW_S 0
+#define TCB_SND_UNA_RAW_M 0xfffffffULL
+#define TCB_SND_UNA_RAW_V(x) ((x) << TCB_SND_UNA_RAW_S)
+
+#define TCB_SND_NXT_RAW_W 10
+#define TCB_SND_NXT_RAW_S 28
+#define TCB_SND_NXT_RAW_M 0xfffffffULL
+#define TCB_SND_NXT_RAW_V(x) ((x) << TCB_SND_NXT_RAW_S)
+
+#define TCB_SND_MAX_RAW_W 11
+#define TCB_SND_MAX_RAW_S 24
+#define TCB_SND_MAX_RAW_M 0xfffffffULL
+#define TCB_SND_MAX_RAW_V(x) ((x) << TCB_SND_MAX_RAW_S)
+
+#define TCB_RCV_NXT_W 16
+#define TCB_RCV_NXT_S 10
+#define TCB_RCV_NXT_M 0xffffffffULL
+#define TCB_RCV_NXT_V(x) ((x) << TCB_RCV_NXT_S)
+
+#define TCB_RCV_WND_W 17
+#define TCB_RCV_WND_S 10
+#define TCB_RCV_WND_M 0xffffffULL
+#define TCB_RCV_WND_V(x) ((x) << TCB_RCV_WND_S)
+
#define TCB_RX_FRAG2_PTR_RAW_W 27
#define TCB_RX_FRAG3_LEN_RAW_W 29
#define TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W 30
#define TCB_PDU_HDR_LEN_W 31
+#define TCB_RQ_START_W 30
+#define TCB_RQ_START_S 0
+#define TCB_RQ_START_M 0x3ffffffULL
+#define TCB_RQ_START_V(x) ((x) << TCB_RQ_START_S)
+
#define TF_RX_PDU_OUT_S 49
#define TF_RX_PDU_OUT_V(x) ((__u64)(x) << TF_RX_PDU_OUT_S)
+#define TF_CORE_BYPASS_S 63
+#define TF_CORE_BYPASS_V(x) ((__u64)(x) << TF_CORE_BYPASS_S)
+#define TF_CORE_BYPASS_F TF_CORE_BYPASS_V(1)
+
+#define TF_NON_OFFLOAD_S 1
+#define TF_NON_OFFLOAD_V(x) ((x) << TF_NON_OFFLOAD_S)
+#define TF_NON_OFFLOAD_F TF_NON_OFFLOAD_V(1)
+
#endif /* __T4_TCB_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index accad1101ad1..68fe734b9b37 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -737,7 +737,7 @@ struct fw_flowc_mnemval {
struct fw_flowc_wr {
__be32 op_to_nparams;
__be32 flowid_len16;
- struct fw_flowc_mnemval mnemval[0];
+ struct fw_flowc_mnemval mnemval[];
};
#define FW_FLOWC_WR_NPARAMS_S 0
@@ -1205,6 +1205,7 @@ enum fw_caps_config_crypto {
FW_CAPS_CONFIG_CRYPTO_LOOKASIDE = 0x00000001,
FW_CAPS_CONFIG_TLS_INLINE = 0x00000002,
FW_CAPS_CONFIG_IPSEC_INLINE = 0x00000004,
+ FW_CAPS_CONFIG_TX_TLS_HW = 0x00000008,
};
enum fw_caps_config_fcoe {
@@ -1328,6 +1329,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A,
FW_PARAMS_PARAM_DEV_NUM_TM_CLASS = 0x2B,
FW_PARAMS_PARAM_DEV_FILTER = 0x2E,
+ FW_PARAMS_PARAM_DEV_KTLS_TX_HW = 0x31,
};
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index f4d41f968afa..9cc3541a7e1c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -55,7 +55,6 @@
/*
* Generic information about the driver.
*/
-#define DRV_VERSION "2.0.0-ko"
#define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
/*
@@ -1556,7 +1555,6 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
struct adapter *adapter = netdev2adap(dev);
strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
@@ -1921,6 +1919,8 @@ static void cxgb4vf_get_wol(struct net_device *dev,
NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
static const struct ethtool_ops cxgb4vf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_link_ksettings = cxgb4vf_get_link_ksettings,
.get_fecparam = cxgb4vf_get_fecparam,
.get_drvinfo = cxgb4vf_get_drvinfo,
@@ -2934,12 +2934,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
unsigned int pf;
/*
- * Print our driver banner the first time we're called to initialize a
- * device.
- */
- pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
-
- /*
* Initialize generic PCI device state.
*/
err = pci_enable_device(pdev);
@@ -3454,7 +3448,6 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
static struct pci_driver cxgb4vf_driver = {
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 21034536c9c5..854d87e1125c 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -35,7 +35,6 @@
*/
#define DRV_NAME "libcxgb"
-#define DRV_VERSION "1.0.0-ko"
#define pr_fmt(fmt) DRV_NAME ": " fmt
#include <linux/kernel.h>
@@ -530,5 +529,4 @@ EXPORT_SYMBOL(cxgbi_tagmask_set);
MODULE_AUTHOR("Chelsio Communications");
MODULE_DESCRIPTION("Chelsio common library");
-MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h
index 7b02c200dd1e..1b4156461ba1 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h
@@ -122,7 +122,7 @@ struct cxgbi_ppm_pool {
unsigned int base; /* base index */
unsigned int next; /* next possible free index */
spinlock_t lock; /* ppm pool lock */
- unsigned long bmap[0];
+ unsigned long bmap[];
} ____cacheline_aligned_in_smp;
struct cxgbi_ppm {
@@ -145,7 +145,7 @@ struct cxgbi_ppm {
unsigned int next;
unsigned int max_index_in_edram;
unsigned long *ppod_bmap;
- struct cxgbi_ppod_data ppod_data[0];
+ struct cxgbi_ppod_data ppod_data[];
};
#define DDP_THRESHOLD 512
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index f37c9a08c4cf..9f5e5ec69991 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -24,7 +24,6 @@
#include <linux/platform_data/eth-ep93xx.h>
#define DRV_MODULE_NAME "ep93xx-eth"
-#define DRV_MODULE_VERSION "0.1"
#define RX_QUEUE_ENTRIES 64
#define TX_QUEUE_ENTRIES 8
@@ -691,7 +690,6 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int ep93xx_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 0dd64acd2a3f..18f3aeb88f22 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,8 +33,6 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.3.0.53"
-#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index ebd5c2cf1efe..4d8e0aa447fb 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -147,7 +147,6 @@ static void enic_get_drvinfo(struct net_device *netdev,
return;
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, fw_info->fw_version,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
@@ -324,25 +323,6 @@ static int enic_coalesce_valid(struct enic *enic,
u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
ec->rx_coalesce_usecs_low);
- if (ec->rx_max_coalesced_frames ||
- ec->rx_coalesce_usecs_irq ||
- ec->rx_max_coalesced_frames_irq ||
- ec->tx_max_coalesced_frames ||
- ec->tx_coalesce_usecs_irq ||
- ec->tx_max_coalesced_frames_irq ||
- ec->stats_block_coalesce_usecs ||
- ec->use_adaptive_tx_coalesce ||
- ec->pkt_rate_low ||
- ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low ||
- ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high ||
- ec->rx_max_coalesced_frames_high ||
- ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high ||
- ec->rate_sample_interval)
- return -EINVAL;
-
if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
ec->tx_coalesce_usecs)
return -EINVAL;
@@ -636,6 +616,10 @@ static int enic_get_ts_info(struct net_device *netdev,
}
static const struct ethtool_ops enic_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
+ ETHTOOL_COALESCE_RX_USECS_LOW |
+ ETHTOOL_COALESCE_RX_USECS_HIGH,
.get_drvinfo = enic_get_drvinfo,
.get_msglevel = enic_get_msglevel,
.set_msglevel = enic_set_msglevel,
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index ddf60dc9ad16..cd5fe4f6b54c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -80,7 +80,6 @@ static const struct pci_device_id enic_id_table[] = {
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, enic_id_table);
#define ENIC_LARGE_PKT_THRESHOLD 1000
@@ -696,8 +695,7 @@ static void enic_preload_tcp_csum(struct sk_buff *skb)
tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
}
}
@@ -3056,8 +3054,6 @@ static struct pci_driver enic_driver = {
static int __init enic_init_module(void)
{
- pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
-
return pci_register_driver(&enic_driver);
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index fef5a0a0663d..fcc4a3ccdd94 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -541,7 +541,7 @@ struct vnic_devcmd_notify {
struct vnic_devcmd_provinfo {
u8 oui[3];
u8 type;
- u8 data[0];
+ u8 data[];
};
/* These are used in flags field of different filters to denote
@@ -648,9 +648,9 @@ enum {
#define FILTER_MAX_BUF_SIZE 100
struct filter_tlv {
- u_int32_t type;
- u_int32_t length;
- u_int32_t val[0];
+ u32 type;
+ u32 length;
+ u32 val[];
};
enum {
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.h b/drivers/net/ethernet/cisco/enic/vnic_vic.h
index 9ef81f148351..057776908828 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_vic.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.h
@@ -59,7 +59,7 @@ struct vic_provinfo {
u16 type;
u16 length;
u8 value[0];
- } tlv[0];
+ } tlv[];
} __packed;
#define VIC_PROVINFO_ADD_TLV(vp, tlvtype, tlvlen, data) \
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index f30fa8e6ef80..5bff5c2be88b 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -44,7 +44,6 @@
#include "gemini.h"
#define DRV_NAME "gmac-gemini"
-#define DRV_VERSION "1.0"
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
static int debug = -1;
@@ -2204,7 +2203,6 @@ static void gmac_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
strcpy(info->bus_info, netdev->dev_id ? "1" : "0");
}
@@ -2224,6 +2222,8 @@ static const struct net_device_ops gmac_351x_ops = {
};
static const struct ethtool_ops gmac_351x_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_sset_count = gmac_get_sset_count,
.get_strings = gmac_get_strings,
.get_ethtool_stats = gmac_get_ethtool_stats,
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index e94ae9b94dbf..7f7705138262 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -42,7 +42,6 @@
#define DM9000_PHY 0x40 /* PHY address 0x01 */
#define CARDNAME "dm9000"
-#define DRV_VERSION "1.31"
/*
* Transmit timeout, default 5 seconds.
@@ -543,7 +542,6 @@ static void dm9000_get_drvinfo(struct net_device *dev,
struct board_info *dm = to_dm9000_board(dev);
strlcpy(info->driver, CARDNAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 42b798a3fad4..592454f444ce 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -30,7 +30,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "de2104x"
-#define DRV_VERSION "0.7"
#define DRV_RELDATE "Mar 17, 2004"
#include <linux/module.h>
@@ -52,14 +51,9 @@
#include <linux/uaccess.h>
#include <asm/unaligned.h>
-/* These identify the driver base version and may not be removed. */
-static char version[] =
-"PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")";
-
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
static int debug = -1;
module_param (debug, int, 0);
@@ -1603,7 +1597,6 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
struct de_private *de = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
}
@@ -1980,11 +1973,6 @@ static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
board_idx++;
-#ifndef MODULE
- if (board_idx == 0)
- pr_info("%s\n", version);
-#endif
-
/* allocate a new ethernet device structure, and fill in defaults */
dev = alloc_etherdev(sizeof(struct de_private));
if (!dev)
@@ -2196,9 +2184,6 @@ static struct pci_driver de_driver = {
static int __init de_init (void)
{
-#ifdef MODULE
- pr_info("%s\n", version);
-#endif
return pci_register_driver(&de_driver);
}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 32d470d4122a..c1884fc9ad32 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -56,8 +56,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "dmfe"
-#define DRV_VERSION "1.36.4"
-#define DRV_RELDATE "2002-01-17"
#include <linux/module.h>
#include <linux/kernel.h>
@@ -280,10 +278,6 @@ enum dmfe_CR6_bits {
};
/* Global variable declaration ----------------------------- */
-static int printed_version;
-static const char version[] =
- "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
-
static int dmfe_debug;
static unsigned char dmfe_media_mode = DMFE_AUTO;
static u32 dmfe_cr6_user_set;
@@ -364,9 +358,6 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
DMFE_DBUG(0, "dmfe_init_one()", 0);
- if (!printed_version++)
- pr_info("%s\n", version);
-
/*
* SPARC on-board DM910x chips should be handled by the main
* tulip driver, except for early DM9100s.
@@ -1081,7 +1072,6 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
struct dmfe_board_info *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
@@ -2177,7 +2167,6 @@ static struct pci_driver dmfe_driver = {
MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
module_param(debug, int, 0);
module_param(mode, byte, 0);
@@ -2204,9 +2193,6 @@ static int __init dmfe_init_module(void)
{
int rc;
- pr_info("%s\n", version);
- printed_version = 1;
-
DMFE_DBUG(0, "init_module() ", debug);
if (debug)
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
index b458140aeaef..815907259048 100644
--- a/drivers/net/ethernet/dec/tulip/tulip.h
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -381,7 +381,7 @@ struct mediatable {
unsigned has_reset:6;
u32 csr15dir;
u32 csr15val; /* 21143 NWay setting. */
- struct medialeaf mleaf[0];
+ struct medialeaf mleaf[];
};
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 9e9d9eee29d9..48ea658aa1a6 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -12,13 +12,6 @@
#define pr_fmt(fmt) "tulip: " fmt
#define DRV_NAME "tulip"
-#ifdef CONFIG_TULIP_NAPI
-#define DRV_VERSION "1.1.15-NAPI" /* Keep at least for test */
-#else
-#define DRV_VERSION "1.1.15"
-#endif
-#define DRV_RELDATE "Feb 27, 2007"
-
#include <linux/module.h>
#include <linux/pci.h>
@@ -37,9 +30,6 @@
#include <asm/prom.h>
#endif
-static char version[] =
- "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
-
/* A few user-configurable values. */
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
@@ -109,7 +99,6 @@ static int csr0;
MODULE_AUTHOR("The Linux Kernel Team");
MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
module_param(tulip_debug, int, 0);
module_param(max_interrupt_work, int, 0);
module_param(rx_copybreak, int, 0);
@@ -868,7 +857,6 @@ static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
{
struct tulip_private *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
@@ -1314,11 +1302,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
unsigned int eeprom_missing = 0;
unsigned int force_csr0 = 0;
-#ifndef MODULE
- if (tulip_debug > 0)
- printk_once(KERN_INFO "%s", version);
-#endif
-
board_idx++;
/*
@@ -1800,14 +1783,13 @@ static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
void __iomem *ioaddr = tp->base_addr;
if (tp->flags & COMET_PM) {
-
unsigned int tmp;
-
+
tmp = ioread32(ioaddr + CSR18);
tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
tmp |= comet_csr18_pm_mode;
iowrite32(tmp, ioaddr + CSR18);
-
+
/* Set the Wake-up Control/Status Register to the given WOL options*/
tmp = ioread32(ioaddr + CSR13);
tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
@@ -1969,10 +1951,6 @@ static struct pci_driver tulip_driver = {
static int __init tulip_init (void)
{
-#ifdef MODULE
- pr_info("%s", version);
-#endif
-
if (!csr0) {
pr_warn("tulip: unknown CPU architecture, using default csr0\n");
/* default to 8 longword cache line alignment */
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 117ffe08800d..f726436b1985 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -7,8 +7,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "uli526x"
-#define DRV_VERSION "0.9.3"
-#define DRV_RELDATE "2005-7-29"
#include <linux/module.h>
@@ -196,10 +194,6 @@ enum uli526x_CR6_bits {
};
/* Global variable declaration ----------------------------- */
-static int printed_version;
-static const char version[] =
- "ULi M5261/M5263 net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
-
static int uli526x_debug;
static unsigned char uli526x_media_mode = ULI526X_AUTO;
static u32 uli526x_cr6_user_set;
@@ -282,9 +276,6 @@ static int uli526x_init_one(struct pci_dev *pdev,
ULI526X_DBUG(0, "uli526x_init_one()", 0);
- if (!printed_version++)
- pr_info("%s\n", version);
-
/* Init network device */
dev = alloc_etherdev(sizeof(*db));
if (dev == NULL)
@@ -972,7 +963,6 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct uli526x_board_info *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
@@ -1799,9 +1789,6 @@ MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8
static int __init uli526x_init_module(void)
{
- pr_info("%s\n", version);
- printed_version = 1;
-
ULI526X_DBUG(0, "init_module() ", debug);
if (debug)
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 7f136488e67c..4d5e4fa53023 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -47,9 +47,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "winbond-840"
-#define DRV_VERSION "1.01-e"
-#define DRV_RELDATE "Sep-11-2006"
-
/* Automatically extracted configuration info:
probe-func: winbond840_probe
@@ -139,16 +136,9 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
#undef PKT_BUF_SZ /* tulip.h also defines this */
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
-/* These identify the driver base version and may not be removed. */
-static const char version[] __initconst =
- "v" DRV_VERSION " (2.4 port) "
- DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
- " http://www.scyld.com/network/drivers.html\n";
-
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
module_param(max_interrupt_work, int, 0);
module_param(debug, int, 0);
@@ -1385,7 +1375,6 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
struct netdev_private *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
@@ -1650,7 +1639,6 @@ static struct pci_driver w840_driver = {
static int __init w840_init(void)
{
- printk(version);
return pci_register_driver(&w840_driver);
}
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 26c5da032b1e..643090555cc7 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -8,8 +8,6 @@
*/
#define DRV_NAME "DL2000/TC902x-based linux driver"
-#define DRV_VERSION "v1.19"
-#define DRV_RELDATE "2007/08/12"
#include "dl2k.h"
#include <linux/dma-mapping.h>
@@ -20,8 +18,6 @@
#define dr16(reg) ioread16(ioaddr + (reg))
#define dr8(reg) ioread8(ioaddr + (reg))
-static char version[] =
- KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
#define MAX_UNITS 8
static int mtu[MAX_UNITS];
static int vlan[MAX_UNITS];
@@ -113,13 +109,9 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
int chip_idx = ent->driver_data;
int err, irq;
void __iomem *ioaddr;
- static int version_printed;
void *ring_space;
dma_addr_t ring_dma;
- if (!version_printed++)
- printk ("%s", version);
-
err = pci_enable_device (pdev);
if (err)
return err;
@@ -1244,7 +1236,6 @@ static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
struct netdev_private *np = netdev_priv(dev);
strlcpy(info->driver, "dl2k", sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index b91387c456ba..dc566fcc3ba9 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -23,9 +23,6 @@
*/
#define DRV_NAME "sundance"
-#define DRV_VERSION "1.2"
-#define DRV_RELDATE "11-Sep-2006"
-
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
@@ -101,11 +98,6 @@ static char *media[MAX_UNITS];
#include <linux/ethtool.h>
#include <linux/mii.h>
-/* These identify the driver base version and may not be removed. */
-static const char version[] =
- KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
- " Written by Donald Becker\n";
-
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
MODULE_LICENSE("GPL");
@@ -516,13 +508,6 @@ static int sundance_probe1(struct pci_dev *pdev,
#endif
int phy, phy_end, phy_idx = 0;
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(version);
-#endif
-
if (pci_enable_device(pdev))
return -EIO;
pci_set_master(pdev);
@@ -1657,7 +1642,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
@@ -2010,10 +1994,6 @@ static struct pci_driver sundance_driver = {
static int __init sundance_init(void)
{
-/* when a module, this is printed whether or not devices are found in probe */
-#ifdef MODULE
- printk(version);
-#endif
return pci_register_driver(&sundance_driver);
}
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 5f8fa1145db6..057a508dd6e2 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -729,7 +729,6 @@ static void dnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, "0", sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/dnet.h b/drivers/net/ethernet/dnet.h
index 8af6c0705ab3..030724484b49 100644
--- a/drivers/net/ethernet/dnet.h
+++ b/drivers/net/ethernet/dnet.h
@@ -8,7 +8,6 @@
#define _DNET_H
#define DRV_NAME "dnet"
-#define DRV_VERSION "0.9.1"
#define PFX DRV_NAME ": "
/* Register access macros */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index cf3e6f2892ff..6e9022083004 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -33,7 +33,6 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "12.0.0.0"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 022a54a1805b..d6ed1d943762 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -221,7 +221,6 @@ static void be_get_drvinfo(struct net_device *netdev,
struct be_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
strlcpy(drvinfo->fw_version, adapter->fw_ver,
sizeof(drvinfo->fw_version));
@@ -1409,6 +1408,9 @@ static int be_set_priv_flags(struct net_device *netdev, u32 flags)
}
const struct ethtool_ops be_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_USECS_LOW_HIGH,
.get_drvinfo = be_get_drvinfo,
.get_wol = be_get_wol,
.set_wol = be_set_wol,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 56f59db6ebf2..a7ac23a6862b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -21,8 +21,7 @@
#include <net/busy_poll.h>
#include <net/vxlan.h>
-MODULE_VERSION(DRV_VER);
-MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
+MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("GPL");
@@ -5949,8 +5948,6 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
struct net_device *netdev;
int status = 0;
- dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
-
status = pci_enable_device(pdev);
if (status)
goto do_none;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 4572797f00d7..835b7816e372 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -30,7 +30,6 @@
#include "ftgmac100.h"
#define DRV_NAME "ftgmac100"
-#define DRV_VERSION "0.7"
/* Arbitrary values, I am not sure the HW has limits */
#define MAX_RX_QUEUE_ENTRIES 1024
@@ -1150,7 +1149,6 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
@@ -1757,9 +1755,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
struct device_node *np;
int err = 0;
- if (!pdev)
- return -ENODEV;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 6c247cbbd23e..32cf54f0e35b 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -23,7 +23,6 @@
#include "ftmac100.h"
#define DRV_NAME "ftmac100"
-#define DRV_VERSION "0.2"
#define RX_QUEUE_ENTRIES 128 /* must be power of 2 */
#define TX_QUEUE_ENTRIES 16 /* must be power of 2 */
@@ -809,7 +808,6 @@ static void ftmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
@@ -1184,7 +1182,6 @@ static struct platform_driver ftmac100_driver = {
*****************************************************************************/
static int __init ftmac100_init(void)
{
- pr_info("Loading version " DRV_VERSION " ...\n");
return platform_driver_register(&ftmac100_driver);
}
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 84f10970299a..73e896a7d8fd 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -25,8 +25,6 @@
*/
#define DRV_NAME "fealnx"
-#define DRV_VERSION "2.52"
-#define DRV_RELDATE "Sep-11-2006"
static int debug; /* 1-> print debug message */
static int max_interrupt_work = 20;
@@ -91,11 +89,6 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
#include <linux/uaccess.h>
#include <asm/byteorder.h>
-/* These identify the driver base version and may not be removed. */
-static const char version[] =
- KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n";
-
-
/* This driver was written to use PCI memory space, however some x86 systems
work only with I/O space accesses. */
#ifndef __alpha__
@@ -495,13 +488,6 @@ static int fealnx_init_one(struct pci_dev *pdev,
int bar = 1;
#endif
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(version);
-#endif
-
card_idx++;
sprintf(boardname, "fealnx%d", card_idx);
@@ -1809,7 +1795,6 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
struct netdev_private *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
@@ -1950,11 +1935,6 @@ static struct pci_driver fealnx_driver = {
static int __init fealnx_init(void)
{
-/* when a module, this is printed whether or not devices are found in probe */
-#ifdef MODULE
- printk(version);
-#endif
-
return pci_register_driver(&fealnx_driver);
}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index ca74a684a904..2cd1f8efdfa3 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -259,8 +259,20 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->features |= net_dev->hw_features;
net_dev->vlan_features = net_dev->features;
- memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ if (is_valid_ether_addr(mac_addr)) {
+ memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ } else {
+ eth_hw_addr_random(net_dev);
+ err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
+ (enet_addr_t *)net_dev->dev_addr);
+ if (err) {
+ dev_err(dev, "Failed to set random MAC address\n");
+ return -EINVAL;
+ }
+ dev_info(dev, "Using random MAC address: %pM\n",
+ net_dev->dev_addr);
+ }
net_dev->ethtool_ops = &dpaa_ethtool_ops;
@@ -2050,7 +2062,7 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
}
#ifdef CONFIG_DPAA_ERRATUM_A050385
-int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s)
+static int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
struct sk_buff *new_skb, *skb = *s;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 66d150872d48..9db2a02fb531 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -106,19 +106,8 @@ static int dpaa_set_link_ksettings(struct net_device *net_dev,
static void dpaa_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *drvinfo)
{
- int len;
-
strlcpy(drvinfo->driver, KBUILD_MODNAME,
sizeof(drvinfo->driver));
- len = snprintf(drvinfo->version, sizeof(drvinfo->version),
- "%X", 0);
- len = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%X", 0);
-
- if (len >= sizeof(drvinfo->fw_version)) {
- /* Truncated output */
- netdev_notice(net_dev, "snprintf() = %d\n", len);
- }
strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
sizeof(drvinfo->bus_info));
}
@@ -536,7 +525,6 @@ static int dpaa_get_coalesce(struct net_device *dev,
c->rx_coalesce_usecs = period;
c->rx_max_coalesced_frames = thresh;
- c->use_adaptive_rx_coalesce = false;
return 0;
}
@@ -551,9 +539,6 @@ static int dpaa_set_coalesce(struct net_device *dev,
u8 thresh, prev_thresh;
int cpu, res;
- if (c->use_adaptive_rx_coalesce)
- return -EINVAL;
-
period = c->rx_coalesce_usecs;
thresh = c->rx_max_coalesced_frames;
@@ -593,6 +578,8 @@ revert_values:
}
const struct ethtool_ops dpaa_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_drvinfo = dpaa_get_drvinfo,
.get_msglevel = dpaa_get_msglevel,
.set_msglevel = dpaa_set_msglevel,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 7ff147e89426..b6c46639aa4c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1704,10 +1704,15 @@ static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
if (cmd == SIOCSHWTSTAMP)
return dpaa2_eth_ts_ioctl(dev, rq, cmd);
- return -EINVAL;
+ if (priv->mac)
+ return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
+
+ return -EOPNOTSUPP;
}
static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 96676abcebd5..94347c695233 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -79,6 +79,16 @@ static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
sizeof(drvinfo->bus_info));
}
+static int dpaa2_eth_nway_reset(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ if (priv->mac)
+ return phylink_ethtool_nway_reset(priv->mac->phylink);
+
+ return -EOPNOTSUPP;
+}
+
static int
dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *link_settings)
@@ -761,6 +771,7 @@ static int dpaa2_eth_get_ts_info(struct net_device *dev,
const struct ethtool_ops dpaa2_ethtool_ops = {
.get_drvinfo = dpaa2_eth_get_drvinfo,
+ .nway_reset = dpaa2_eth_nway_reset,
.get_link = ethtool_op_get_link,
.get_link_ksettings = dpaa2_eth_get_link_ksettings,
.set_link_ksettings = dpaa2_eth_set_link_ksettings,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 84233e467ed1..3ee236c5fc37 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -123,49 +123,60 @@ static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
struct dpmac_link_state *dpmac_state = &mac->state;
int err;
- if (state->speed != SPEED_UNKNOWN)
- dpmac_state->rate = state->speed;
-
- if (state->duplex != DUPLEX_UNKNOWN) {
- if (!state->duplex)
- dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
- else
- dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
- }
-
if (state->an_enabled)
dpmac_state->options |= DPMAC_LINK_OPT_AUTONEG;
else
dpmac_state->options &= ~DPMAC_LINK_OPT_AUTONEG;
- if (state->pause & MLO_PAUSE_RX)
- dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
- else
- dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
-
- if (!!(state->pause & MLO_PAUSE_RX) ^ !!(state->pause & MLO_PAUSE_TX))
- dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
- else
- dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
-
err = dpmac_set_link_state(mac->mc_io, 0,
mac->mc_dev->mc_handle, dpmac_state);
if (err)
- netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+ netdev_err(mac->net_dev, "%s: dpmac_set_link_state() = %d\n",
+ __func__, err);
}
-static void dpaa2_mac_link_up(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface, struct phy_device *phy)
+static void dpaa2_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
struct dpmac_link_state *dpmac_state = &mac->state;
int err;
dpmac_state->up = 1;
+
+ if (mac->if_link_type == DPMAC_LINK_TYPE_PHY) {
+ /* If the DPMAC is configured for PHY mode, we need
+ * to pass the link parameters to the MC firmware.
+ */
+ dpmac_state->rate = speed;
+
+ if (duplex == DUPLEX_HALF)
+ dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
+ else if (duplex == DUPLEX_FULL)
+ dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
+
+ /* This is lossy; the firmware really should take the pause
+ * enablement status rather than pause/asym pause status.
+ */
+ if (rx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
+
+ if (rx_pause ^ tx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
+ }
+
err = dpmac_set_link_state(mac->mc_io, 0,
mac->mc_dev->mc_handle, dpmac_state);
if (err)
- netdev_err(mac->net_dev, "dpmac_set_link_state() = %d\n", err);
+ netdev_err(mac->net_dev, "%s: dpmac_set_link_state() = %d\n",
+ __func__, err);
}
static void dpaa2_mac_link_down(struct phylink_config *config,
@@ -238,6 +249,8 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
goto err_close_dpmac;
}
+ mac->if_link_type = attr.link_type;
+
dpmac_node = dpaa2_mac_get_node(attr.id);
if (!dpmac_node) {
netdev_err(net_dev, "No dpmac@%d node found.\n", attr.id);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 4da8079b9155..2130d9c7d40e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -20,6 +20,7 @@ struct dpaa2_mac {
struct phylink_config phylink_config;
struct phylink *phylink;
phy_interface_t if_mode;
+ enum dpmac_link_type if_link_type;
};
bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index fe942de19597..2b43848e1363 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config FSL_ENETC
tristate "ENETC PF driver"
- depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ depends on PCI && PCI_MSI
select FSL_ENETC_MDIO
select PHYLIB
help
@@ -13,7 +13,7 @@ config FSL_ENETC
config FSL_ENETC_VF
tristate "ENETC VF driver"
- depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ depends on PCI && PCI_MSI
select PHYLIB
help
This driver supports NXP ENETC gigabit ethernet controller PCIe
@@ -23,7 +23,7 @@ config FSL_ENETC_VF
config FSL_ENETC_MDIO
tristate "ENETC MDIO driver"
- depends on PCI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ depends on PCI
help
This driver supports NXP ENETC Central MDIO controller as a PCIe
physical function (PF) device.
@@ -42,16 +42,6 @@ config FSL_ENETC_PTP_CLOCK
If compiled as module (M), the module name is fsl-enetc-ptp.
-config FSL_ENETC_HW_TIMESTAMPING
- bool "ENETC hardware timestamping support"
- depends on FSL_ENETC || FSL_ENETC_VF
- help
- Enable hardware timestamping support on the Ethernet packets
- using the SO_TIMESTAMPING API. Because the RX BD ring dynamic
- allocation has not been supported and it is too expensive to use
- extended RX BDs if timestamping is not used, this option enables
- extended RX BDs in order to support hardware timestamping.
-
config FSL_ENETC_QOS
bool "ENETC hardware Time-sensitive Network support"
depends on (FSL_ENETC || FSL_ENETC_VF) && (NET_SCH_TAPRIO || NET_SCH_CBS)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 1f79e36116a3..ccf2611f4a20 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -451,7 +451,7 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
i = rx_ring->next_to_use;
rx_swbd = &rx_ring->rx_swbd[i];
- rxbd = ENETC_RXBD(*rx_ring, i);
+ rxbd = enetc_rxbd(rx_ring, i);
for (j = 0; j < buff_cnt; j++) {
/* try reuse page */
@@ -468,13 +468,12 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
/* clear 'R" as well */
rxbd->r.lstatus = 0;
+ rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
rx_swbd++;
- rxbd++;
i++;
if (unlikely(i == rx_ring->bd_count)) {
i = 0;
rx_swbd = rx_ring->rx_swbd;
- rxbd = ENETC_RXBD(*rx_ring, 0);
}
}
@@ -488,7 +487,7 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
return j;
}
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
static void enetc_get_rx_tstamp(struct net_device *ndev,
union enetc_rx_bd *rxbd,
struct sk_buff *skb)
@@ -502,7 +501,8 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
lo = enetc_rd(hw, ENETC_SICTR0);
hi = enetc_rd(hw, ENETC_SICTR1);
- tstamp_lo = le32_to_cpu(rxbd->r.tstamp);
+ rxbd = enetc_rxbd_ext(rxbd);
+ tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
if (lo <= tstamp_lo)
hi -= 1;
@@ -516,7 +516,7 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
static void enetc_get_offloads(struct enetc_bdr *rx_ring,
union enetc_rx_bd *rxbd, struct sk_buff *skb)
{
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
#endif
/* TODO: hashing */
@@ -533,7 +533,7 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(rxbd->r.vlan_opt));
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (priv->active_offloads & ENETC_F_RX_TSTAMP)
enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
#endif
@@ -655,7 +655,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
cleaned_cnt -= count;
}
- rxbd = ENETC_RXBD(*rx_ring, i);
+ rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
if (!bd_status)
break;
@@ -670,12 +670,10 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
enetc_get_offloads(rx_ring, rxbd, skb);
cleaned_cnt++;
- rxbd++;
- i++;
- if (unlikely(i == rx_ring->bd_count)) {
+
+ rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
+ if (unlikely(++i == rx_ring->bd_count))
i = 0;
- rxbd = ENETC_RXBD(*rx_ring, 0);
- }
if (unlikely(bd_status &
ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
@@ -683,12 +681,10 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
dma_rmb();
bd_status = le32_to_cpu(rxbd->r.lstatus);
- rxbd++;
- i++;
- if (unlikely(i == rx_ring->bd_count)) {
+
+ rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
+ if (unlikely(++i == rx_ring->bd_count))
i = 0;
- rxbd = ENETC_RXBD(*rx_ring, 0);
- }
}
rx_ring->ndev->stats.rx_dropped++;
@@ -710,12 +706,10 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
cleaned_cnt++;
- rxbd++;
- i++;
- if (unlikely(i == rx_ring->bd_count)) {
+
+ rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
+ if (unlikely(++i == rx_ring->bd_count))
i = 0;
- rxbd = ENETC_RXBD(*rx_ring, 0);
- }
}
rx_byte_cnt += skb->len;
@@ -845,15 +839,19 @@ static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
enetc_free_txbdr(priv->tx_ring[i]);
}
-static int enetc_alloc_rxbdr(struct enetc_bdr *rxr)
+static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
{
+ size_t size = sizeof(union enetc_rx_bd);
int err;
rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
if (!rxr->rx_swbd)
return -ENOMEM;
- err = enetc_dma_alloc_bdr(rxr, sizeof(union enetc_rx_bd));
+ if (extended)
+ size *= 2;
+
+ err = enetc_dma_alloc_bdr(rxr, size);
if (err) {
vfree(rxr->rx_swbd);
return err;
@@ -862,6 +860,7 @@ static int enetc_alloc_rxbdr(struct enetc_bdr *rxr)
rxr->next_to_clean = 0;
rxr->next_to_use = 0;
rxr->next_to_alloc = 0;
+ rxr->ext_en = extended;
return 0;
}
@@ -881,10 +880,11 @@ static void enetc_free_rxbdr(struct enetc_bdr *rxr)
static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
{
+ bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
int i, err;
for (i = 0; i < priv->num_rx_rings; i++) {
- err = enetc_alloc_rxbdr(priv->rx_ring[i]);
+ err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
if (err)
goto fail;
@@ -1174,9 +1174,10 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1);
rbmr = ENETC_RBMR_EN;
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
- rbmr |= ENETC_RBMR_BDS;
-#endif
+
+ if (rx_ring->ext_en)
+ rbmr |= ENETC_RBMR_BDS;
+
if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
rbmr |= ENETC_RBMR_VTE;
@@ -1577,11 +1578,12 @@ int enetc_set_features(struct net_device *ndev,
return 0;
}
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct hwtstamp_config config;
+ int ao;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
@@ -1597,6 +1599,7 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
return -ERANGE;
}
+ ao = priv->active_offloads;
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
@@ -1606,6 +1609,11 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_ALL;
}
+ if (netif_running(ndev) && ao != priv->active_offloads) {
+ enetc_close(ndev);
+ enetc_open(ndev);
+ }
+
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -1632,7 +1640,7 @@ static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (cmd == SIOCSHWTSTAMP)
return enetc_hwtstamp_set(ndev, rq);
if (cmd == SIOCGHWTSTAMP)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index dd4a227ffc7a..56c43f35b633 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -73,6 +73,7 @@ struct enetc_bdr {
dma_addr_t bd_dma_base;
u8 tsd_enable; /* Time specific departure */
+ bool ext_en; /* enable h/w descriptor extensions */
} ____cacheline_aligned_in_smp;
static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
@@ -104,7 +105,37 @@ struct enetc_cbdr {
};
#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
-#define ENETC_RXBD(BDR, i) (&(((union enetc_rx_bd *)((BDR).bd_base))[i]))
+
+static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
+{
+ int hw_idx = i;
+
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ if (rx_ring->ext_en)
+ hw_idx = 2 * i;
+#endif
+ return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
+}
+
+static inline union enetc_rx_bd *enetc_rxbd_next(struct enetc_bdr *rx_ring,
+ union enetc_rx_bd *rxbd,
+ int i)
+{
+ rxbd++;
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
+ if (rx_ring->ext_en)
+ rxbd++;
+#endif
+ if (unlikely(++i == rx_ring->bd_count))
+ rxbd = rx_ring->bd_base;
+
+ return rxbd;
+}
+
+static inline union enetc_rx_bd *enetc_rxbd_ext(union enetc_rx_bd *rxbd)
+{
+ return ++rxbd;
+}
struct enetc_msg_swbd {
void *vaddr;
@@ -163,7 +194,7 @@ struct enetc_int_vector {
char name[ENETC_INT_NAME_MAX];
struct enetc_bdr rx_ring ____cacheline_aligned_in_smp;
- struct enetc_bdr tx_ring[0];
+ struct enetc_bdr tx_ring[];
};
struct enetc_cls_rule {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 301ee0dde02d..34bd1f3fb415 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -195,15 +195,21 @@ static const char tx_ring_stats[][ETH_GSTRING_LEN] = {
static int enetc_get_sset_count(struct net_device *ndev, int sset)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int len;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
- if (sset == ETH_SS_STATS)
- return ARRAY_SIZE(enetc_si_counters) +
- ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings +
- ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings +
- (enetc_si_is_pf(priv->si) ?
- ARRAY_SIZE(enetc_port_counters) : 0);
+ len = ARRAY_SIZE(enetc_si_counters) +
+ ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings +
+ ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings;
- return -EOPNOTSUPP;
+ if (!enetc_si_is_pf(priv->si))
+ return len;
+
+ len += ARRAY_SIZE(enetc_port_counters);
+
+ return len;
}
static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
@@ -568,7 +574,7 @@ static int enetc_get_ts_info(struct net_device *ndev,
info->phc_index = -1;
}
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 62554f28ce07..2a6523136947 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -418,9 +418,6 @@ union enetc_rx_bd {
struct {
__le64 addr;
u8 reserved[8];
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
- u8 reserved1[16];
-#endif
} w;
struct {
__le16 inet_csum;
@@ -435,11 +432,11 @@ union enetc_rx_bd {
};
__le32 lstatus;
};
-#ifdef CONFIG_FSL_ENETC_HW_TIMESTAMPING
+ } r;
+ struct {
__le32 tstamp;
u8 reserved[12];
-#endif
- } r;
+ } ext;
};
#define ENETC_RXBD_LSTATUS_R BIT(30)
@@ -588,7 +585,7 @@ struct tgs_gcl_data {
__le32 bth;
__le32 ct;
__le32 cte;
- struct gce entry[0];
+ struct gce entry[];
};
struct enetc_cbd {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index fc0d7d99e9a1..85e2b741df41 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -7,14 +7,7 @@
#include <linux/of_net.h>
#include "enetc_pf.h"
-#define ENETC_DRV_VER_MAJ 1
-#define ENETC_DRV_VER_MIN 0
-
-#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \
- __stringify(ENETC_DRV_VER_MIN)
-static const char enetc_drv_ver[] = ENETC_DRV_VER_STR;
#define ENETC_DRV_NAME_STR "ENETC PF driver"
-static const char enetc_drv_name[] = ENETC_DRV_NAME_STR;
static void enetc_pf_get_primary_mac_addr(struct enetc_hw *hw, int si, u8 *addr)
{
@@ -803,11 +796,6 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
struct device_node *mdio_np;
int err;
- if (!np) {
- dev_err(priv->dev, "missing ENETC port node\n");
- return -ENODEV;
- }
-
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!priv->phy_node) {
if (!of_phy_is_fixed_link(np)) {
@@ -929,9 +917,6 @@ static int enetc_pf_probe(struct pci_dev *pdev,
netif_carrier_off(ndev);
- netif_info(priv, probe, ndev, "%s v%s\n",
- enetc_drv_name, enetc_drv_ver);
-
return 0;
err_reg_netdev:
@@ -959,9 +944,6 @@ static void enetc_pf_remove(struct pci_dev *pdev)
enetc_sriov_configure(pdev, 0);
priv = netdev_priv(si->ndev);
- netif_info(priv, drv, si->ndev, "%s v%s remove\n",
- enetc_drv_name, enetc_drv_ver);
-
unregister_netdev(si->ndev);
enetc_mdio_remove(pf);
@@ -995,4 +977,3 @@ module_pci_driver(enetc_pf_driver);
MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(ENETC_DRV_VER_STR);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index ebd21bf4cfa1..f14576212a0e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -4,14 +4,7 @@
#include <linux/module.h>
#include "enetc.h"
-#define ENETC_DRV_VER_MAJ 1
-#define ENETC_DRV_VER_MIN 0
-
-#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \
- __stringify(ENETC_DRV_VER_MIN)
-static const char enetc_drv_ver[] = ENETC_DRV_VER_STR;
#define ENETC_DRV_NAME_STR "ENETC VF driver"
-static const char enetc_drv_name[] = ENETC_DRV_NAME_STR;
/* Messaging */
static void enetc_msg_vsi_write_msg(struct enetc_hw *hw,
@@ -201,9 +194,6 @@ static int enetc_vf_probe(struct pci_dev *pdev,
netif_carrier_off(ndev);
- netif_info(priv, probe, ndev, "%s v%s\n",
- enetc_drv_name, enetc_drv_ver);
-
return 0;
err_reg_netdev:
@@ -225,8 +215,6 @@ static void enetc_vf_remove(struct pci_dev *pdev)
struct enetc_ndev_priv *priv;
priv = netdev_priv(si->ndev);
- netif_info(priv, drv, si->ndev, "%s v%s remove\n",
- enetc_drv_name, enetc_drv_ver);
unregister_netdev(si->ndev);
enetc_free_msix(priv);
@@ -254,4 +242,3 @@ module_pci_driver(enetc_vf_driver);
MODULE_DESCRIPTION(ENETC_DRV_NAME_STR);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(ENETC_DRV_VER_STR);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index f79e57f735b3..bd898f5b4da5 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -584,7 +584,7 @@ struct fec_enet_private {
int pps_enable;
unsigned int next_counter;
- u64 ethtool_stats[0];
+ u64 ethtool_stats[];
};
void fec_ptp_init(struct platform_device *pdev, int irq_idx);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 23c5fef2f1ad..c1c267b61647 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2128,7 +2128,6 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
strlcpy(info->driver, fep->pdev->dev.driver->name,
sizeof(info->driver));
- strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
}
@@ -2642,6 +2641,8 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops fec_enet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = fec_enet_get_drvinfo,
.get_regs_len = fec_enet_get_regs_len,
.get_regs = fec_enet_get_regs,
@@ -3793,6 +3794,7 @@ static struct platform_driver fec_driver = {
.name = DRIVER_NAME,
.pm = &fec_pm_ops,
.of_match_table = fec_dt_ids,
+ .suppress_bind_attrs = true,
},
.id_table = fec_devtype,
.probe = fec_probe,
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 1ca543ac8f2c..004c266802a8 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -366,13 +366,26 @@ static void set_dflts(struct dtsec_cfg *cfg)
cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
}
+static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
+{
+ u32 tmp;
+
+ tmp = (u32)((adr[5] << 24) |
+ (adr[4] << 16) | (adr[3] << 8) | adr[2]);
+ iowrite32be(tmp, &regs->macstnaddr1);
+
+ tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
+ iowrite32be(tmp, &regs->macstnaddr2);
+}
+
static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
- phy_interface_t iface, u16 iface_speed, u8 *macaddr,
+ phy_interface_t iface, u16 iface_speed, u64 addr,
u32 exception_mask, u8 tbi_addr)
{
bool is_rgmii, is_sgmii, is_qsgmii;
- int i;
+ enet_addr_t eth_addr;
u32 tmp;
+ int i;
/* Soft reset */
iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
@@ -501,12 +514,10 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
iowrite32be(0xffffffff, &regs->ievent);
- tmp = (u32)((macaddr[5] << 24) |
- (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]);
- iowrite32be(tmp, &regs->macstnaddr1);
-
- tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16));
- iowrite32be(tmp, &regs->macstnaddr2);
+ if (addr) {
+ MAKE_ENET_ADDR_FROM_UINT64(addr, eth_addr);
+ set_mac_address(regs, (u8 *)eth_addr);
+ }
/* HASH */
for (i = 0; i < NUM_OF_HASH_REGS; i++) {
@@ -519,18 +530,6 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
return 0;
}
-static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
-{
- u32 tmp;
-
- tmp = (u32)((adr[5] << 24) |
- (adr[4] << 16) | (adr[3] << 8) | adr[2]);
- iowrite32be(tmp, &regs->macstnaddr1);
-
- tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
- iowrite32be(tmp, &regs->macstnaddr2);
-}
-
static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
bool enable)
{
@@ -556,10 +555,6 @@ static int check_init_parameters(struct fman_mac *dtsec)
pr_err("1G MAC driver supports 1G or lower speeds\n");
return -EINVAL;
}
- if (dtsec->addr == 0) {
- pr_err("Ethernet MAC Must have a valid MAC Address\n");
- return -EINVAL;
- }
if ((dtsec->dtsec_drv_param)->rx_prepend >
MAX_PACKET_ALIGNMENT) {
pr_err("packetAlignmentPadding can't be > than %d\n",
@@ -1391,9 +1386,8 @@ int dtsec_init(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct dtsec_cfg *dtsec_drv_param;
- int err;
u16 max_frm_ln;
- enet_addr_t eth_addr;
+ int err;
if (is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
@@ -1410,10 +1404,8 @@ int dtsec_init(struct fman_mac *dtsec)
dtsec_drv_param = dtsec->dtsec_drv_param;
- MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
-
err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
- dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions,
+ dtsec->max_speed, dtsec->addr, dtsec->exceptions,
dtsec->tbiphy->mdio.addr);
if (err) {
free_init_resources(dtsec);
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 0d2b4ab01f24..a5500ede4070 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -596,10 +596,6 @@ static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac)
static int check_init_parameters(struct fman_mac *memac)
{
- if (memac->addr == 0) {
- pr_err("Ethernet MAC must have a valid MAC address\n");
- return -EINVAL;
- }
if (!memac->exception_cb) {
pr_err("Uninitialized exception handler\n");
return -EINVAL;
@@ -1057,8 +1053,10 @@ int memac_init(struct fman_mac *memac)
}
/* MAC Address */
- MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
- add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0);
+ if (memac->addr != 0) {
+ MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
+ add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0);
+ }
fixed_link = memac_drv_param->fixed_link;
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index f75b9c11b2d2..8c7eb878d5b4 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -273,10 +273,6 @@ static int check_init_parameters(struct fman_mac *tgec)
pr_err("10G MAC driver only support 10G speed\n");
return -EINVAL;
}
- if (tgec->addr == 0) {
- pr_err("Ethernet 10G MAC Must have valid MAC Address\n");
- return -EINVAL;
- }
if (!tgec->exception_cb) {
pr_err("uninitialized exception_cb\n");
return -EINVAL;
@@ -706,8 +702,10 @@ int tgec_init(struct fman_mac *tgec)
cfg = tgec->cfg;
- MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
- set_mac_address(tgec->regs, (u8 *)eth_addr);
+ if (tgec->addr) {
+ MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
+ set_mac_address(tgec->regs, (u8 *)eth_addr);
+ }
/* interrupts */
/* FM_10G_REM_N_LCL_FLT_EX_10GMAC_ERRATA_SW005 Errata workaround */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 55f2122c3217..43427c5b9396 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -724,12 +724,10 @@ static int mac_probe(struct platform_device *_of_dev)
/* Get the MAC address */
mac_addr = of_get_mac_address(mac_node);
- if (IS_ERR(mac_addr)) {
- dev_err(dev, "of_get_mac_address(%pOF) failed\n", mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
- }
- ether_addr_copy(mac_dev->addr, mac_addr);
+ if (IS_ERR(mac_addr))
+ dev_warn(dev, "of_get_mac_address(%pOF) failed\n", mac_node);
+ else
+ ether_addr_copy(mac_dev->addr, mac_addr);
/* Get the port handles */
nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL);
@@ -855,7 +853,8 @@ static int mac_probe(struct platform_device *_of_dev)
if (err < 0)
dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
- dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
+ if (!IS_ERR(mac_addr))
+ dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev);
if (IS_ERR(priv->eth_dev)) {
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index add61fed33ee..ce85feaac357 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -53,7 +53,6 @@
MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
MODULE_DESCRIPTION("Freescale Ethernet Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
module_param(fs_enet_debug, int, 0);
@@ -790,7 +789,6 @@ static void fs_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int fs_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 195fae6aec4a..5ff2634bee2f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -190,8 +190,6 @@ void fs_cleanup_bds(struct net_device *dev);
#define DRV_MODULE_NAME "fs_enet"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.1"
-#define DRV_MODULE_RELDATE "Sep 22, 2014"
/***************************************************************************/
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index f7e5cafe89a9..b3c69e9038ea 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -103,8 +103,6 @@
#define TX_TIMEOUT (5*HZ)
-const char gfar_driver_version[] = "2.0";
-
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 432c6a818ae5..8ced783f5302 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -68,7 +68,6 @@ struct ethtool_rx_list {
#define RXBUF_ALIGNMENT 64
#define DRV_NAME "gfar-enet"
-extern const char gfar_driver_version[];
/* MAXIMUM NUMBER OF QUEUES SUPPORTED */
#define MAX_TX_QS 0x8
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 3c8e4e2efc07..cc7d4f93da54 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -164,10 +164,6 @@ static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, gfar_driver_version,
- sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
}
/* Return the length of the register structure */
@@ -276,35 +272,6 @@ static int gfar_gcoalesce(struct net_device *dev,
cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
cvals->tx_max_coalesced_frames = txcount;
- cvals->use_adaptive_rx_coalesce = 0;
- cvals->use_adaptive_tx_coalesce = 0;
-
- cvals->pkt_rate_low = 0;
- cvals->rx_coalesce_usecs_low = 0;
- cvals->rx_max_coalesced_frames_low = 0;
- cvals->tx_coalesce_usecs_low = 0;
- cvals->tx_max_coalesced_frames_low = 0;
-
- /* When the packet rate is below pkt_rate_high but above
- * pkt_rate_low (both measured in packets per second) the
- * normal {rx,tx}_* coalescing parameters are used.
- */
-
- /* When the packet rate is (measured in packets per second)
- * is above pkt_rate_high, the {rx,tx}_*_high parameters are
- * used.
- */
- cvals->pkt_rate_high = 0;
- cvals->rx_coalesce_usecs_high = 0;
- cvals->rx_max_coalesced_frames_high = 0;
- cvals->tx_coalesce_usecs_high = 0;
- cvals->tx_max_coalesced_frames_high = 0;
-
- /* How often to do adaptive coalescing packet rate sampling,
- * measured in seconds. Must not be zero.
- */
- cvals->rate_sample_interval = 0;
-
return 0;
}
@@ -1507,6 +1474,8 @@ static int gfar_get_ts_info(struct net_device *dev,
}
const struct ethtool_ops gfar_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = gfar_gdrvinfo,
.get_regs_len = gfar_reglen,
.get_regs = gfar_get_regs,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 0d101c00286f..6e5f6dd169b5 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3990,5 +3990,4 @@ module_exit(ucc_geth_exit);
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION(DRV_DESC);
-MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index a86a42131fc7..3fe903972195 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -26,7 +26,6 @@
#define DRV_DESC "QE UCC Gigabit Ethernet Controller"
#define DRV_NAME "ucc_geth"
-#define DRV_VERSION "1.1"
#define NUM_TX_QUEUES 8
#define NUM_RX_QUEUES 8
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index dfebacf443fc..14c08a868190 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -334,8 +334,6 @@ uec_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index d9718b87279d..12f6c2442a7a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -811,20 +811,6 @@ static int hip04_set_coalesce(struct net_device *netdev,
{
struct hip04_priv *priv = netdev_priv(netdev);
- /* Check not supported parameters */
- if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
- (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
- (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
- (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
- (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
- (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
- (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
- (ec->rx_max_coalesced_frames_high) || (ec->rx_coalesce_usecs) ||
- (ec->tx_max_coalesced_frames_irq) ||
- (ec->stats_block_coalesce_usecs) ||
- (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
- return -EOPNOTSUPP;
-
if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
(ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
@@ -845,6 +831,8 @@ static void hip04_get_drvinfo(struct net_device *netdev,
}
static const struct ethtool_ops hip04_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES,
.get_coalesce = hip04_get_coalesce,
.set_coalesce = hip04_set_coalesce,
.get_drvinfo = hip04_get_drvinfo,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index 2721f1f1ab42..0f0e16f9afc0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -92,7 +92,7 @@ struct ppe_common_cb {
u8 comm_index; /*ppe_common index*/
u32 ppe_num;
- struct hns_ppe_cb ppe_cb[0];
+ struct hns_ppe_cb ppe_cb[];
};
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 3741befb914e..a9f805925699 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -108,7 +108,7 @@ struct rcb_common_cb {
u32 ring_num;
u32 desc_num; /* desc num per queue*/
- struct ring_pair_cb ring_pair_cb[0];
+ struct ring_pair_cb ring_pair_cb[];
};
int hns_rcb_buf_size2type(u32 buf_size);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 717fccc2efba..49624acf2473 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1264,6 +1264,11 @@ static int hns_get_rxnfc(struct net_device *netdev,
}
static const struct ethtool_ops hns_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_USECS_LOW_HIGH |
+ ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH,
.get_drvinfo = hns_nic_get_drvinfo,
.get_link = hns_nic_get_link,
.get_ringparam = hns_get_ringparam,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index d87158acdf6f..948e67ef30fd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -7,8 +7,6 @@
#include <linux/mutex.h>
#include <linux/types.h>
-#define HCLGE_MBX_VF_MSG_DATA_NUM 16
-
enum HCLGE_MBX_OPCODE {
HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */
HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset*/
@@ -72,10 +70,15 @@ enum hclge_mbx_vlan_cfg_subcode {
HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */
};
-#define HCLGE_MBX_MAX_MSG_SIZE 16
+#define HCLGE_MBX_MAX_MSG_SIZE 14
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
-#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3
-#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3
+#define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4
+
+struct hclge_ring_chain_param {
+ u8 ring_type;
+ u8 tqp_index;
+ u8 int_gl_index;
+};
struct hclgevf_mbx_resp_status {
struct mutex mbx_mutex; /* protects against contending sync cmd resp */
@@ -85,6 +88,41 @@ struct hclgevf_mbx_resp_status {
u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE];
};
+struct hclge_respond_to_vf_msg {
+ int status;
+ u8 data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ u16 len;
+};
+
+struct hclge_vf_to_pf_msg {
+ u8 code;
+ union {
+ struct {
+ u8 subcode;
+ u8 data[HCLGE_MBX_MAX_MSG_SIZE];
+ };
+ struct {
+ u8 en_bc;
+ u8 en_uc;
+ u8 en_mc;
+ };
+ struct {
+ u8 vector_id;
+ u8 ring_num;
+ struct hclge_ring_chain_param
+ param[HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM];
+ };
+ };
+};
+
+struct hclge_pf_to_vf_msg {
+ u16 code;
+ u16 vf_mbx_msg_code;
+ u16 vf_mbx_msg_subcode;
+ u16 resp_status;
+ u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+};
+
struct hclge_mbx_vf_to_pf_cmd {
u8 rsv;
u8 mbx_src_vfid; /* Auto filled by IMP */
@@ -92,17 +130,17 @@ struct hclge_mbx_vf_to_pf_cmd {
u8 rsv1[1];
u8 msg_len;
u8 rsv2[3];
- u8 msg[HCLGE_MBX_MAX_MSG_SIZE];
+ struct hclge_vf_to_pf_msg msg;
};
-#define HCLGE_MBX_NEED_RESP_BIT BIT(0)
+#define HCLGE_MBX_NEED_RESP_B 0
struct hclge_mbx_pf_to_vf_cmd {
u8 dest_vfid;
u8 rsv[3];
u8 msg_len;
u8 rsv1[3];
- u16 msg[8];
+ struct hclge_pf_to_vf_msg msg;
};
struct hclge_vf_rst_cmd {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index a3e4081b84ba..5587605d6deb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -78,6 +78,7 @@
enum hns_desc_type {
DESC_TYPE_SKB,
+ DESC_TYPE_FRAGLIST_SKB,
DESC_TYPE_PAGE,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 1d4ffc5f408a..e1d88095a77e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -260,6 +260,8 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump m7 info\n");
dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n");
dev_info(&h->pdev->dev, "dump mac tnl status\n");
+ dev_info(&h->pdev->dev, "dump loopback\n");
+ dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index a7f40aa1a0ea..da98fd7c8eca 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1107,6 +1107,10 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
return ret;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ } else if (type == DESC_TYPE_FRAGLIST_SKB) {
+ struct sk_buff *skb = (struct sk_buff *)priv;
+
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
} else {
frag = (skb_frag_t *)priv;
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
@@ -1144,8 +1148,9 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
desc_cb->priv = priv;
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
- desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
- DESC_TYPE_SKB : DESC_TYPE_PAGE;
+ desc_cb->type = ((type == DESC_TYPE_FRAGLIST_SKB ||
+ type == DESC_TYPE_SKB) && !k) ?
+ type : DESC_TYPE_PAGE;
/* now, fill the descriptor */
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
@@ -1354,7 +1359,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
ring_ptr_move_bw(ring, next_to_use);
/* unmap the descriptor dma address */
- if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
+ if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB ||
+ ring->desc_cb[ring->next_to_use].type ==
+ DESC_TYPE_FRAGLIST_SKB)
dma_unmap_single(dev,
ring->desc_cb[ring->next_to_use].dma,
ring->desc_cb[ring->next_to_use].length,
@@ -1447,7 +1454,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
goto out;
skb_walk_frags(skb, frag_skb) {
- ret = hns3_fill_skb_to_desc(ring, frag_skb, DESC_TYPE_PAGE);
+ ret = hns3_fill_skb_to_desc(ring, frag_skb,
+ DESC_TYPE_FRAGLIST_SKB);
if (unlikely(ret < 0))
goto fill_err;
@@ -2228,7 +2236,7 @@ static void hns3_reset_prepare(struct pci_dev *pdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
- dev_info(&pdev->dev, "hns3 flr prepare\n");
+ dev_info(&pdev->dev, "FLR prepare\n");
if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
ae_dev->ops->flr_prepare(ae_dev);
}
@@ -2237,7 +2245,7 @@ static void hns3_reset_done(struct pci_dev *pdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
- dev_info(&pdev->dev, "hns3 flr done\n");
+ dev_info(&pdev->dev, "FLR done\n");
if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
ae_dev->ops->flr_done(ae_dev);
}
@@ -2356,7 +2364,7 @@ static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb)
{
- if (cb->type == DESC_TYPE_SKB)
+ if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB)
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
else if (cb->length)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index c03856e63320..28b81f24afa1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -736,7 +736,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
if (ops->get_media_type)
ops->get_media_type(handle, &media_type, &module_type);
- if (cmd->base.duplex != DUPLEX_FULL &&
+ if (cmd->base.duplex == DUPLEX_HALF &&
media_type != HNAE3_MEDIA_TYPE_COPPER) {
netdev_err(netdev,
"only copper port supports half duplex!");
@@ -1390,7 +1390,13 @@ static int hns3_set_fecparam(struct net_device *netdev,
return ops->set_fec(handle, fec_mode);
}
+#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
+ ETHTOOL_COALESCE_USE_ADAPTIVE | \
+ ETHTOOL_COALESCE_RX_USECS_HIGH | \
+ ETHTOOL_COALESCE_TX_USECS_HIGH)
+
static const struct ethtool_ops hns3vf_ethtool_ops = {
+ .supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
.get_drvinfo = hns3_get_drvinfo,
.get_ringparam = hns3_get_ringparam,
.set_ringparam = hns3_set_ringparam,
@@ -1416,6 +1422,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
};
static const struct ethtool_ops hns3_ethtool_ops = {
+ .supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
.get_link = hns3_get_link,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 67fad80035d3..17228288d4df 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -87,7 +87,7 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
entries_per_desc = ARRAY_SIZE(desc[0].data);
index = offset % entries_per_desc;
- return (int)desc[offset / entries_per_desc].data[index];
+ return le32_to_cpu(desc[offset / entries_per_desc].data[index]);
}
static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
@@ -145,10 +145,8 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
buf_len = sizeof(struct hclge_desc) * bd_num;
desc_src = kzalloc(buf_len, GFP_KERNEL);
- if (!desc_src) {
- dev_err(&hdev->pdev->dev, "call kzalloc failed\n");
+ if (!desc_src)
return;
- }
desc = desc_src;
ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
@@ -179,6 +177,7 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_dbg_bitmap_cmd *bitmap;
+ enum hclge_opcode_type cmd;
int rq_id, pri_id, qset_id;
int port_id, nq_id, pg_id;
struct hclge_desc desc[2];
@@ -193,10 +192,10 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
return;
}
- ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1,
- HCLGE_OPC_QSET_DFX_STS);
+ cmd = HCLGE_OPC_QSET_DFX_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0);
@@ -204,48 +203,53 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2);
dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3);
- ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, HCLGE_OPC_PRI_DFX_STS);
+ cmd = HCLGE_OPC_PRI_DFX_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0);
dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1);
dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2);
- ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, HCLGE_OPC_PG_DFX_STS);
+ cmd = HCLGE_OPC_PG_DFX_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0);
dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1);
dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2);
- ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
- HCLGE_OPC_PORT_DFX_STS);
+ cmd = HCLGE_OPC_PORT_DFX_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0);
dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1);
- ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_NQ_CNT);
+ cmd = HCLGE_OPC_SCH_NQ_CNT;
+ ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
- ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT);
+ cmd = HCLGE_OPC_SCH_RQ_CNT;
+ ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
- ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS);
+ cmd = HCLGE_OPC_TM_INTERNAL_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1]));
dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2]));
@@ -257,18 +261,18 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0]));
dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1]));
- ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
- HCLGE_OPC_TM_INTERNAL_CNT);
+ cmd = HCLGE_OPC_TM_INTERNAL_CNT;
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1]));
dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2]));
- ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
- HCLGE_OPC_TM_INTERNAL_STS_1);
+ cmd = HCLGE_OPC_TM_INTERNAL_STS_1;
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
if (ret)
- return;
+ goto err_dcb_cmd_send;
dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1]));
dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2]));
@@ -277,6 +281,12 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
le32_to_cpu(desc[0].data[4]));
dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n",
le32_to_cpu(desc[0].data[5]));
+ return;
+
+err_dcb_cmd_send:
+ dev_err(&hdev->pdev->dev,
+ "failed to dump dcb dfx, cmd = %#x, ret = %d\n",
+ cmd, ret);
}
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
@@ -310,8 +320,9 @@ static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
char *false_buf)
{
if (flag)
- dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
- true_buf);
+ dev_info(&hdev->pdev->dev, "%s(%d): %s weight: %u\n",
+ title_buf, index, true_buf,
+ hdev->tm_info.pg_info[0].tc_dwrr[index]);
else
dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
false_buf);
@@ -339,7 +350,8 @@ static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "dump tc\n");
+ dev_info(&hdev->pdev->dev, "dump tc: %u tc enabled\n",
+ hdev->tm_info.num_tc);
dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
ets_weight->weight_offset);
@@ -581,7 +593,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_map_cmd_send;
- qset_id = nq_to_qs_map->qset_id & 0x3FF;
+ qset_id = le16_to_cpu(nq_to_qs_map->qset_id) & 0x3FF;
cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
@@ -621,7 +633,8 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
if (ret)
goto err_tm_map_cmd_send;
- qset_maping[group_id] = bp_to_qs_map_cmd->qs_bit_map;
+ qset_maping[group_id] =
+ le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map);
}
dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");
@@ -824,6 +837,7 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
struct hclge_mac_ethertype_idx_rd_cmd *req0;
char printf_buf[HCLGE_DBG_BUF_LEN];
struct hclge_desc desc;
+ u32 msg_egress_port;
int ret, i;
dev_info(&hdev->pdev->dev, "mng tab:\n");
@@ -865,20 +879,21 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
HCLGE_DBG_BUF_LEN - strlen(printf_buf),
"%x |%04x |%x |%04x|%x |%02x |%02x |",
!!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
- req0->ethter_type,
+ le16_to_cpu(req0->ethter_type),
!!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
- req0->vlan_tag & HCLGE_DBG_MNG_VLAN_TAG,
+ le16_to_cpu(req0->vlan_tag) & HCLGE_DBG_MNG_VLAN_TAG,
!!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
req0->i_port_bitmap, req0->i_port_direction);
+ msg_egress_port = le16_to_cpu(req0->egress_port);
snprintf(printf_buf + strlen(printf_buf),
HCLGE_DBG_BUF_LEN - strlen(printf_buf),
- "%d |%d |%02d |%04d|%x\n",
- !!(req0->egress_port & HCLGE_DBG_MNG_E_TYPE_B),
- req0->egress_port & HCLGE_DBG_MNG_PF_ID,
- (req0->egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
- req0->egress_queue,
- !!(req0->egress_port & HCLGE_DBG_MNG_DROP_B));
+ "%x |%x |%02x |%04x|%x\n",
+ !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
+ msg_egress_port & HCLGE_DBG_MNG_PF_ID,
+ (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
+ le16_to_cpu(req0->egress_queue),
+ !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
dev_info(&hdev->pdev->dev, "%s", printf_buf);
}
@@ -1065,11 +1080,8 @@ static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
buf_len = sizeof(struct hclge_desc) * bd_num;
desc_src = kzalloc(buf_len, GFP_KERNEL);
- if (!desc_src) {
- dev_err(&hdev->pdev->dev,
- "allocate desc for get_m7_stats failed\n");
+ if (!desc_src)
return;
- }
desc_tmp = desc_src;
ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num,
@@ -1132,7 +1144,7 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
const char *cmd_buf)
{
#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
-#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
+#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
@@ -1156,8 +1168,8 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
while (length > 0) {
data0 = offset;
- if (length >= HCLGE_MAX_NCL_CONFIG_LENGTH)
- data0 |= HCLGE_MAX_NCL_CONFIG_LENGTH << 16;
+ if (length >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
+ data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
else
data0 |= length << 16;
ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
@@ -1169,6 +1181,57 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
}
}
+static void hclge_dbg_dump_loopback(struct hclge_dev *hdev,
+ const char *cmd_buf)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ struct hclge_config_mac_mode_cmd *req_app;
+ struct hclge_serdes_lb_cmd *req_serdes;
+ struct hclge_desc desc;
+ u8 loopback_en;
+ int ret;
+
+ req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
+ req_serdes = (struct hclge_serdes_lb_cmd *)desc.data;
+
+ dev_info(&hdev->pdev->dev, "mac id: %u\n", hdev->hw.mac.mac_id);
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump app loopback status, ret = %d\n", ret);
+ return;
+ }
+
+ loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
+ HCLGE_MAC_APP_LP_B);
+ dev_info(&hdev->pdev->dev, "app loopback: %s\n",
+ loopback_en ? "on" : "off");
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump serdes loopback status, ret = %d\n",
+ ret);
+ return;
+ }
+
+ loopback_en = req_serdes->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ dev_info(&hdev->pdev->dev, "serdes serial loopback: %s\n",
+ loopback_en ? "on" : "off");
+
+ loopback_en = req_serdes->enable &
+ HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
+ dev_info(&hdev->pdev->dev, "serdes parallel loopback: %s\n",
+ loopback_en ? "on" : "off");
+
+ if (phydev)
+ dev_info(&hdev->pdev->dev, "phy loopback: %s\n",
+ phydev->loopback_enabled ? "on" : "off");
+}
+
/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
* @hdev: pointer to struct hclge_dev
*/
@@ -1269,6 +1332,7 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
{
#define DUMP_REG "dump reg"
#define DUMP_TM_MAP "dump tm map"
+#define DUMP_LOOPBACK "dump loopback"
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -1302,6 +1366,9 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
&cmd_buf[sizeof("dump ncl_config")]);
} else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
hclge_dbg_dump_mac_tnl_status(hdev);
+ } else if (strncmp(cmd_buf, DUMP_LOOPBACK,
+ strlen(DUMP_LOOPBACK)) == 0) {
+ hclge_dbg_dump_loopback(hdev, &cmd_buf[sizeof(DUMP_LOOPBACK)]);
} else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
hclge_dbg_dump_qs_shaper(hdev,
&cmd_buf[sizeof("dump qs shaper")]);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index c85b72dc44d2..50d5ef71756b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1667,9 +1667,6 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
hclge_handle_rocee_ras_error(ae_dev);
}
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
- goto out;
-
if (ae_dev->hw_err_reset_req)
return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d3b0cd74ecd2..a758f9ae32be 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -824,6 +824,8 @@ static void hclge_get_mac_stat(struct hnae3_handle *handle,
static int hclge_parse_func_status(struct hclge_dev *hdev,
struct hclge_func_status_cmd *status)
{
+#define HCLGE_MAC_ID_MASK 0xF
+
if (!(status->pf_state & HCLGE_PF_STATE_DONE))
return -EINVAL;
@@ -833,6 +835,7 @@ static int hclge_parse_func_status(struct hclge_dev *hdev,
else
hdev->flag &= ~HCLGE_FLAG_MAIN;
+ hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
return 0;
}
@@ -3441,7 +3444,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
u32 val;
if (hclge_get_hw_reset_stat(handle)) {
- dev_info(&pdev->dev, "Hardware reset not finish\n");
+ dev_info(&pdev->dev, "hardware reset not finish\n");
dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
@@ -3450,20 +3453,20 @@ static void hclge_do_reset(struct hclge_dev *hdev)
switch (hdev->reset_type) {
case HNAE3_GLOBAL_RESET:
+ dev_info(&pdev->dev, "global reset requested\n");
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
- dev_info(&pdev->dev, "Global Reset requested\n");
break;
case HNAE3_FUNC_RESET:
- dev_info(&pdev->dev, "PF Reset requested\n");
+ dev_info(&pdev->dev, "PF reset requested\n");
/* schedule again to check later */
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
default:
dev_warn(&pdev->dev,
- "Unsupported reset type: %d\n", hdev->reset_type);
+ "unsupported reset type: %d\n", hdev->reset_type);
break;
}
}
@@ -6765,7 +6768,7 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
struct hclge_dev *hdev = vport->back;
if (enable) {
- hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+ hclge_task_schedule(hdev, 0);
} else {
/* Set the DOWN flag here to disable link updating */
set_bit(HCLGE_STATE_DOWN, &hdev->state);
@@ -7353,7 +7356,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
return -EINVAL;
}
memset(&req, 0, sizeof(req));
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (status) {
@@ -7398,7 +7400,6 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
}
memset(&req, 0, sizeof(req));
- hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) {
@@ -7618,11 +7619,17 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
}
ether_addr_copy(vport->vf_info.mac, mac_addr);
- dev_info(&hdev->pdev->dev,
- "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
- vf, mac_addr);
- return hclge_inform_reset_assert_to_vf(vport);
+ if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
+ dev_info(&hdev->pdev->dev,
+ "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
+ vf, mac_addr);
+ return hclge_inform_reset_assert_to_vf(vport);
+ }
+
+ dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
+ vf, mac_addr);
+ return 0;
}
static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
@@ -8979,6 +8986,12 @@ static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ /* When nic is down, the service task is not running, doesn't update
+ * the port information per second. Query the port information before
+ * return the media type, ensure getting the correct media information.
+ */
+ hclge_update_port_info(hdev);
+
if (media_type)
*media_type = hdev->hw.mac.media_type;
@@ -9109,8 +9122,8 @@ init_nic_err:
static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
struct hclge_vport *vport)
{
- struct hnae3_client *client = vport->roce.client;
struct hclge_dev *hdev = ae_dev->priv;
+ struct hnae3_client *client;
int rst_cnt;
int ret;
@@ -10286,8 +10299,9 @@ static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
{
u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
- int data_len_per_desc, data_len, bd_num, i;
+ int data_len_per_desc, bd_num, i;
int bd_num_list[BD_LIST_MAX_NUM];
+ u32 data_len;
int ret;
ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
@@ -10666,7 +10680,7 @@ static int hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
- hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
if (!hclge_wq) {
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index f78cbb4cc85e..71df23d5f1b4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -249,6 +249,7 @@ enum HCLGE_MAC_DUPLEX {
#define QUERY_ACTIVE_SPEED 1
struct hclge_mac {
+ u8 mac_id;
u8 phy_addr;
u8 flag;
u8 media_type; /* port media type, e.g. fibre/copper/backplane */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 3d850f6b1e37..7f24fcb4f96a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -5,6 +5,11 @@
#include "hclge_mbx.h"
#include "hnae3.h"
+static u16 hclge_errno_to_resp(int errno)
+{
+ return abs(errno);
+}
+
/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
* receives a mailbox message from VF.
* @vport: pointer to struct hclge_vport
@@ -14,25 +19,25 @@
*/
static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
- int resp_status,
- u8 *resp_data, u16 resp_data_len)
+ struct hclge_respond_to_vf_msg *resp_msg)
{
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
struct hclge_dev *hdev = vport->back;
enum hclge_cmd_status status;
struct hclge_desc desc;
+ u16 resp;
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
- if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+ if (resp_msg->len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
dev_err(&hdev->pdev->dev,
"PF fail to gen resp to VF len %u exceeds max len %u\n",
- resp_data_len,
+ resp_msg->len,
HCLGE_MBX_MAX_RESP_DATA_SIZE);
- /* If resp_data_len is too long, set the value to max length
+ /* If resp_msg->len is too long, set the value to max length
* and return the msg to VF
*/
- resp_data_len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
+ resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
}
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
@@ -40,18 +45,29 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
- resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP;
- resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0];
- resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1];
- resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1;
+ resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP;
+ resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code;
+ resp_pf_to_vf->msg.vf_mbx_msg_subcode = vf_to_pf_req->msg.subcode;
+ resp = hclge_errno_to_resp(resp_msg->status);
+ if (resp < SHRT_MAX) {
+ resp_pf_to_vf->msg.resp_status = resp;
+ } else {
+ dev_warn(&hdev->pdev->dev,
+ "failed to send response to VF, response status %d is out-of-bound\n",
+ resp);
+ resp_pf_to_vf->msg.resp_status = EIO;
+ }
- if (resp_data && resp_data_len > 0)
- memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len);
+ if (resp_msg->len > 0)
+ memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data,
+ resp_msg->len);
status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (status)
dev_err(&hdev->pdev->dev,
- "PF failed(=%d) to send response to VF\n", status);
+ "failed to send response to VF, status: %d, vfid: %u, code: %u, subcode: %u.\n",
+ status, vf_to_pf_req->mbx_src_vfid,
+ vf_to_pf_req->msg.code, vf_to_pf_req->msg.subcode);
return status;
}
@@ -70,15 +86,15 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
resp_pf_to_vf->dest_vfid = dest_vfid;
resp_pf_to_vf->msg_len = msg_len;
- resp_pf_to_vf->msg[0] = mbx_opcode;
+ resp_pf_to_vf->msg.code = mbx_opcode;
- memcpy(&resp_pf_to_vf->msg[1], msg, msg_len);
+ memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len);
status = hclge_cmd_send(&hdev->hw, &desc, 1);
if (status)
dev_err(&hdev->pdev->dev,
- "PF failed(=%d) to send mailbox message to VF\n",
- status);
+ "failed to send mailbox to VF, status: %d, vfid: %u, opcode: %u\n",
+ status, dest_vfid, mbx_opcode);
return status;
}
@@ -138,21 +154,20 @@ static int hclge_get_ring_chain_from_mbx(
{
struct hnae3_ring_chain_node *cur_chain, *new_chain;
int ring_num;
- int i;
+ int i = 0;
- ring_num = req->msg[2];
+ ring_num = req->msg.ring_num;
- if (ring_num > ((HCLGE_MBX_VF_MSG_DATA_NUM -
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
- HCLGE_MBX_RING_NODE_VARIABLE_NUM))
+ if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
return -ENOMEM;
- hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
+ hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B,
+ req->msg.param[i].ring_type);
ring_chain->tqp_index =
- hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
+ hclge_get_queue_id(vport->nic.kinfo.tqp
+ [req->msg.param[i].tqp_index]);
hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S,
- req->msg[5]);
+ HNAE3_RING_GL_IDX_S, req->msg.param[i].int_gl_index);
cur_chain = ring_chain;
@@ -162,18 +177,15 @@ static int hclge_get_ring_chain_from_mbx(
goto err;
hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
- req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
+ req->msg.param[i].ring_type);
new_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp
- [req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
+ [req->msg.param[i].tqp_index]);
hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S,
- req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
+ req->msg.param[i].int_gl_index);
cur_chain->next = new_chain;
cur_chain = new_chain;
@@ -189,7 +201,7 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
struct hclge_mbx_vf_to_pf_cmd *req)
{
struct hnae3_ring_chain_node ring_chain;
- int vector_id = req->msg[1];
+ int vector_id = req->msg.vector_id;
int ret;
memset(&ring_chain, 0, sizeof(ring_chain));
@@ -207,13 +219,9 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *req)
{
-#define HCLGE_MBX_BC_INDEX 1
-#define HCLGE_MBX_UC_INDEX 2
-#define HCLGE_MBX_MC_INDEX 3
-
- bool en_bc = req->msg[HCLGE_MBX_BC_INDEX] ? true : false;
- bool en_uc = req->msg[HCLGE_MBX_UC_INDEX] ? true : false;
- bool en_mc = req->msg[HCLGE_MBX_MC_INDEX] ? true : false;
+ bool en_bc = req->msg.en_bc ? true : false;
+ bool en_uc = req->msg.en_uc ? true : false;
+ bool en_mc = req->msg.en_mc ? true : false;
int ret;
if (!vport->vf_info.trusted) {
@@ -222,8 +230,6 @@ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
}
ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc);
- if (req->mbx_need_resp)
- hclge_gen_resp_to_vf(vport, req, ret, NULL, 0);
vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0;
@@ -244,26 +250,25 @@ void hclge_inform_vf_promisc_info(struct hclge_vport *vport)
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
- const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+#define HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET 6
+
+ const u8 *mac_addr = (const u8 *)(mbx_req->msg.data);
struct hclge_dev *hdev = vport->back;
int status;
- if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
- const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
+ if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
+ const u8 *old_addr = (const u8 *)
+ (&mbx_req->msg.data[HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET]);
/* If VF MAC has been configured by the host then it
* cannot be overridden by the MAC specified by the VM.
*/
if (!is_zero_ether_addr(vport->vf_info.mac) &&
- !ether_addr_equal(mac_addr, vport->vf_info.mac)) {
- status = -EPERM;
- goto out;
- }
+ !ether_addr_equal(mac_addr, vport->vf_info.mac))
+ return -EPERM;
- if (!is_valid_ether_addr(mac_addr)) {
- status = -EINVAL;
- goto out;
- }
+ if (!is_valid_ether_addr(mac_addr))
+ return -EINVAL;
hclge_rm_uc_addr_common(vport, old_addr);
status = hclge_add_uc_addr_common(vport, mac_addr);
@@ -275,12 +280,12 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
hclge_add_vport_mac_table(vport, mac_addr,
HCLGE_MAC_ADDR_UC);
}
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
+ } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) {
status = hclge_add_uc_addr_common(vport, mac_addr);
if (!status)
hclge_add_vport_mac_table(vport, mac_addr,
HCLGE_MAC_ADDR_UC);
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
+ } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
status = hclge_rm_uc_addr_common(vport, mac_addr);
if (!status)
hclge_rm_vport_mac_table(vport, mac_addr,
@@ -288,33 +293,26 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
} else {
dev_err(&hdev->pdev->dev,
"failed to set unicast mac addr, unknown subcode %u\n",
- mbx_req->msg[1]);
+ mbx_req->msg.subcode);
return -EIO;
}
-out:
- if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT)
- hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
-
- return 0;
+ return status;
}
static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
- const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+ const u8 *mac_addr = (const u8 *)(mbx_req->msg.data);
struct hclge_dev *hdev = vport->back;
- u8 resp_len = 0;
- u8 resp_data;
int status;
- if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
+ if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) {
status = hclge_add_mc_addr_common(vport, mac_addr);
if (!status)
hclge_add_vport_mac_table(vport, mac_addr,
HCLGE_MAC_ADDR_MC);
- } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
+ } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
status = hclge_rm_mc_addr_common(vport, mac_addr);
if (!status)
hclge_rm_vport_mac_table(vport, mac_addr,
@@ -322,15 +320,11 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
} else {
dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %u\n",
- mbx_req->msg[1]);
+ mbx_req->msg.subcode);
return -EIO;
}
- if (gen_resp)
- hclge_gen_resp_to_vf(vport, mbx_req, status,
- &resp_data, resp_len);
-
- return 0;
+ return status;
}
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
@@ -351,12 +345,16 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
}
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
+#define HCLGE_MBX_VLAN_STATE_OFFSET 0
+#define HCLGE_MBX_VLAN_INFO_OFFSET 2
+
struct hclge_vf_vlan_cfg *msg_cmd;
int status = 0;
- msg_cmd = (struct hclge_vf_vlan_cfg *)mbx_req->msg;
+ msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) {
struct hnae3_handle *handle = &vport->nic;
u16 vlan, proto;
@@ -367,38 +365,32 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
proto = msg_cmd->proto;
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
vlan, is_kill);
- if (mbx_req->mbx_need_resp)
- return hclge_gen_resp_to_vf(vport, mbx_req, status,
- NULL, 0);
} else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
struct hnae3_handle *handle = &vport->nic;
bool en = msg_cmd->is_kill ? true : false;
status = hclge_en_hw_strip_rxvtag(handle, en);
- } else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
+ } else if (msg_cmd->subcode == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
struct hclge_vlan_info *vlan_info;
u16 *state;
- state = (u16 *)&mbx_req->msg[2];
- vlan_info = (struct hclge_vlan_info *)&mbx_req->msg[4];
+ state = (u16 *)&mbx_req->msg.data[HCLGE_MBX_VLAN_STATE_OFFSET];
+ vlan_info = (struct hclge_vlan_info *)
+ &mbx_req->msg.data[HCLGE_MBX_VLAN_INFO_OFFSET];
status = hclge_update_port_base_vlan_cfg(vport, *state,
vlan_info);
- } else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
- u8 state;
-
- state = vport->port_base_vlan_cfg.state;
- status = hclge_gen_resp_to_vf(vport, mbx_req, 0, &state,
- sizeof(u8));
+ } else if (msg_cmd->subcode == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
+ resp_msg->data[0] = vport->port_base_vlan_cfg.state;
+ resp_msg->len = sizeof(u8);
}
return status;
}
static int hclge_set_vf_alive(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
- bool alive = !!mbx_req->msg[2];
+ bool alive = !!mbx_req->msg.data[0];
int ret = 0;
if (alive)
@@ -409,73 +401,76 @@ static int hclge_set_vf_alive(struct hclge_vport *vport,
return ret;
}
-static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+static void hclge_get_vf_tcinfo(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
- u8 vf_tc_map = 0;
unsigned int i;
- int ret;
for (i = 0; i < kinfo->num_tc; i++)
- vf_tc_map |= BIT(i);
-
- ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map,
- sizeof(vf_tc_map));
+ resp_msg->data[0] |= BIT(i);
- return ret;
+ resp_msg->len = sizeof(u8);
}
-static int hclge_get_vf_queue_info(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+static void hclge_get_vf_queue_info(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_TQPS_RSS_INFO_LEN 6
- u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN];
+#define HCLGE_TQPS_ALLOC_OFFSET 0
+#define HCLGE_TQPS_RSS_SIZE_OFFSET 2
+#define HCLGE_TQPS_RX_BUFFER_LEN_OFFSET 4
+
struct hclge_dev *hdev = vport->back;
/* get the queue related info */
- memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16));
- memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16));
- memcpy(&resp_data[4], &hdev->rx_buf_len, sizeof(u16));
-
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
- HCLGE_TQPS_RSS_INFO_LEN);
+ memcpy(&resp_msg->data[HCLGE_TQPS_ALLOC_OFFSET],
+ &vport->alloc_tqps, sizeof(u16));
+ memcpy(&resp_msg->data[HCLGE_TQPS_RSS_SIZE_OFFSET],
+ &vport->nic.kinfo.rss_size, sizeof(u16));
+ memcpy(&resp_msg->data[HCLGE_TQPS_RX_BUFFER_LEN_OFFSET],
+ &hdev->rx_buf_len, sizeof(u16));
+ resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN;
}
-static int hclge_get_vf_mac_addr(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static void hclge_get_vf_mac_addr(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, vport->vf_info.mac,
- ETH_ALEN);
+ ether_addr_copy(resp_msg->data, vport->vf_info.mac);
+ resp_msg->len = ETH_ALEN;
}
-static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+static void hclge_get_vf_queue_depth(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_TQPS_DEPTH_INFO_LEN 4
- u8 resp_data[HCLGE_TQPS_DEPTH_INFO_LEN];
+#define HCLGE_TQPS_NUM_TX_DESC_OFFSET 0
+#define HCLGE_TQPS_NUM_RX_DESC_OFFSET 2
+
struct hclge_dev *hdev = vport->back;
/* get the queue depth info */
- memcpy(&resp_data[0], &hdev->num_tx_desc, sizeof(u16));
- memcpy(&resp_data[2], &hdev->num_rx_desc, sizeof(u16));
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
- HCLGE_TQPS_DEPTH_INFO_LEN);
+ memcpy(&resp_msg->data[HCLGE_TQPS_NUM_TX_DESC_OFFSET],
+ &hdev->num_tx_desc, sizeof(u16));
+ memcpy(&resp_msg->data[HCLGE_TQPS_NUM_RX_DESC_OFFSET],
+ &hdev->num_rx_desc, sizeof(u16));
+ resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN;
}
-static int hclge_get_vf_media_type(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static void hclge_get_vf_media_type(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
+#define HCLGE_VF_MEDIA_TYPE_OFFSET 0
+#define HCLGE_VF_MODULE_TYPE_OFFSET 1
+#define HCLGE_VF_MEDIA_TYPE_LENGTH 2
+
struct hclge_dev *hdev = vport->back;
- u8 resp_data[2];
- resp_data[0] = hdev->hw.mac.media_type;
- resp_data[1] = hdev->hw.mac.module_type;
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
- sizeof(resp_data));
+ resp_msg->data[HCLGE_VF_MEDIA_TYPE_OFFSET] =
+ hdev->hw.mac.media_type;
+ resp_msg->data[HCLGE_VF_MODULE_TYPE_OFFSET] =
+ hdev->hw.mac.module_type;
+ resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH;
}
static int hclge_get_link_info(struct hclge_vport *vport,
@@ -529,7 +524,7 @@ static void hclge_get_link_mode(struct hclge_vport *vport,
advertising = hdev->hw.mac.advertising[0];
supported = hdev->hw.mac.supported[0];
dest_vfid = mbx_req->mbx_src_vfid;
- msg_data[0] = mbx_req->msg[2];
+ msg_data[0] = mbx_req->msg.data[0];
send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising;
@@ -543,29 +538,22 @@ static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
{
u16 queue_id;
- memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
+ memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
hclge_reset_vf_queue(vport, queue_id);
-
- /* send response msg to VF after queue reset complete */
- hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0);
}
-static void hclge_reset_vf(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static int hclge_reset_vf(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
- int ret;
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
vport->vport_id);
- ret = hclge_func_reset_cmd(hdev, vport->vport_id);
- hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
+ return hclge_func_reset_cmd(hdev, vport->vport_id);
}
-static void hclge_vf_keep_alive(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static void hclge_vf_keep_alive(struct hclge_vport *vport)
{
vport->last_active_jiffies = jiffies;
}
@@ -573,45 +561,39 @@ static void hclge_vf_keep_alive(struct hclge_vport *vport,
static int hclge_set_vf_mtu(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
- int ret;
u32 mtu;
- memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
- ret = hclge_set_vport_mtu(vport, mtu);
+ memcpy(&mtu, mbx_req->msg.data, sizeof(mtu));
- return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
+ return hclge_set_vport_mtu(vport, mtu);
}
-static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
u16 queue_id, qid_in_pf;
- u8 resp_data[2];
- memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
+ memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
- memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf));
-
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
- sizeof(resp_data));
+ memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
+ resp_msg->len = sizeof(qid_in_pf);
}
-static int hclge_get_rss_key(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+static void hclge_get_rss_key(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_RSS_MBX_RESP_LEN 8
- u8 resp_data[HCLGE_RSS_MBX_RESP_LEN];
struct hclge_dev *hdev = vport->back;
u8 index;
- index = mbx_req->msg[2];
+ index = mbx_req->msg.data[0];
- memcpy(&resp_data[0],
+ memcpy(resp_msg->data,
&hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
-
- return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
- HCLGE_RSS_MBX_RESP_LEN);
+ resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
}
static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
@@ -634,13 +616,10 @@ static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
static void hclge_handle_link_change_event(struct hclge_dev *hdev,
struct hclge_mbx_vf_to_pf_cmd *req)
{
-#define LINK_STATUS_OFFSET 1
-#define LINK_FAIL_CODE_OFFSET 2
-
hclge_task_schedule(hdev, 0);
- if (!req->msg[LINK_STATUS_OFFSET])
- hclge_link_fail_parse(hdev, req->msg[LINK_FAIL_CODE_OFFSET]);
+ if (!req->msg.subcode)
+ hclge_link_fail_parse(hdev, req->msg.data[0]);
}
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
@@ -662,12 +641,14 @@ static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
+ struct hclge_respond_to_vf_msg resp_msg;
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclge_vport *vport;
struct hclge_desc *desc;
unsigned int flag;
- int ret;
+ int ret = 0;
+ memset(&resp_msg, 0, sizeof(resp_msg));
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
@@ -683,7 +664,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %u\n",
- req->msg[0]);
+ req->msg.code);
/* dropping/not processing this invalid message */
crq->desc[crq->next_to_use].flag = 0;
@@ -693,7 +674,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
vport = &hdev->vport[req->mbx_src_vfid];
- switch (req->msg[0]) {
+ switch (req->msg.code) {
case HCLGE_MBX_MAP_RING_TO_VECTOR:
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
req);
@@ -717,47 +698,34 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret);
break;
case HCLGE_MBX_SET_MULTICAST:
- ret = hclge_set_vf_mc_mac_addr(vport, req, false);
+ ret = hclge_set_vf_mc_mac_addr(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
"PF fail(%d) to set VF MC MAC Addr\n",
ret);
break;
case HCLGE_MBX_SET_VLAN:
- ret = hclge_set_vf_vlan_cfg(vport, req);
+ ret = hclge_set_vf_vlan_cfg(vport, req, &resp_msg);
if (ret)
dev_err(&hdev->pdev->dev,
"PF failed(%d) to config VF's VLAN\n",
ret);
break;
case HCLGE_MBX_SET_ALIVE:
- ret = hclge_set_vf_alive(vport, req, false);
+ ret = hclge_set_vf_alive(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
"PF failed(%d) to set VF's ALIVE\n",
ret);
break;
case HCLGE_MBX_GET_QINFO:
- ret = hclge_get_vf_queue_info(vport, req, true);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to get Q info for VF\n",
- ret);
+ hclge_get_vf_queue_info(vport, &resp_msg);
break;
case HCLGE_MBX_GET_QDEPTH:
- ret = hclge_get_vf_queue_depth(vport, req, true);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to get Q depth for VF\n",
- ret);
+ hclge_get_vf_queue_depth(vport, &resp_msg);
break;
-
case HCLGE_MBX_GET_TCINFO:
- ret = hclge_get_vf_tcinfo(vport, req, true);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to get TC info for VF\n",
- ret);
+ hclge_get_vf_tcinfo(vport, &resp_msg);
break;
case HCLGE_MBX_GET_LINK_STATUS:
ret = hclge_get_link_info(vport, req);
@@ -770,10 +738,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
hclge_mbx_reset_vf_queue(vport, req);
break;
case HCLGE_MBX_RESET:
- hclge_reset_vf(vport, req);
+ ret = hclge_reset_vf(vport);
break;
case HCLGE_MBX_KEEP_ALIVE:
- hclge_vf_keep_alive(vport, req);
+ hclge_vf_keep_alive(vport);
break;
case HCLGE_MBX_SET_MTU:
ret = hclge_set_vf_mtu(vport, req);
@@ -782,18 +750,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"VF fail(%d) to set mtu\n", ret);
break;
case HCLGE_MBX_GET_QID_IN_PF:
- ret = hclge_get_queue_id_in_pf(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to get qid for VF\n",
- ret);
+ hclge_get_queue_id_in_pf(vport, req, &resp_msg);
break;
case HCLGE_MBX_GET_RSS_KEY:
- ret = hclge_get_rss_key(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to get rss key for VF\n",
- ret);
+ hclge_get_rss_key(vport, req, &resp_msg);
break;
case HCLGE_MBX_GET_LINK_MODE:
hclge_get_link_mode(vport, req);
@@ -807,21 +767,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
hclge_rm_vport_all_vlan_table(vport, true);
break;
case HCLGE_MBX_GET_MEDIA_TYPE:
- ret = hclge_get_vf_media_type(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to media type for VF\n",
- ret);
+ hclge_get_vf_media_type(vport, &resp_msg);
break;
case HCLGE_MBX_PUSH_LINK_STATUS:
hclge_handle_link_change_event(hdev, req);
break;
case HCLGE_MBX_GET_MAC_ADDR:
- ret = hclge_get_vf_mac_addr(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to get MAC for VF\n",
- ret);
+ hclge_get_vf_mac_addr(vport, &resp_msg);
break;
case HCLGE_MBX_NCSI_ERROR:
hclge_handle_ncsi_error(hdev);
@@ -829,11 +781,22 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %u\n",
- req->msg[0]);
+ req->msg.code);
break;
}
+
+ /* PF driver should not reply IMP */
+ if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
+ req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
+ resp_msg.status = ret;
+ hclge_gen_resp_to_vf(vport, req, &resp_msg);
+ }
+
crq->desc[crq->next_to_use].flag = 0;
hclge_mbx_ring_ptr_move_crq(crq);
+
+ /* reinitialize ret after complete the mbx message processing */
+ ret = 0;
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 0510d85a7f6a..e02d427131ee 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -229,13 +229,25 @@ static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
hclgevf_tqps_get_stats(handle, data);
}
+static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
+ u8 subcode)
+{
+ if (msg) {
+ memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg));
+ msg->code = code;
+ msg->subcode = subcode;
+ }
+}
+
static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
{
+ struct hclge_vf_to_pf_msg send_msg;
u8 resp_msg;
int status;
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
- true, &resp_msg, sizeof(resp_msg));
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
+ sizeof(resp_msg));
if (status) {
dev_err(&hdev->pdev->dev,
"VF request to get TC info from PF failed %d",
@@ -251,12 +263,14 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
{
struct hnae3_handle *nic = &hdev->nic;
+ struct hclge_vf_to_pf_msg send_msg;
u8 resp_msg;
int ret;
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
- HCLGE_MBX_GET_PORT_BASE_VLAN_STATE,
- NULL, 0, true, &resp_msg, sizeof(u8));
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_GET_PORT_BASE_VLAN_STATE);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
+ sizeof(u8));
if (ret) {
dev_err(&hdev->pdev->dev,
"VF request to get port based vlan state failed %d",
@@ -272,11 +286,16 @@ static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
{
#define HCLGEVF_TQPS_RSS_INFO_LEN 6
+#define HCLGEVF_TQPS_ALLOC_OFFSET 0
+#define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2
+#define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4
+
u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
+ struct hclge_vf_to_pf_msg send_msg;
int status;
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
- true, resp_msg,
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
HCLGEVF_TQPS_RSS_INFO_LEN);
if (status) {
dev_err(&hdev->pdev->dev,
@@ -285,9 +304,12 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
return status;
}
- memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
- memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
- memcpy(&hdev->rx_buf_len, &resp_msg[4], sizeof(u16));
+ memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET],
+ sizeof(u16));
+ memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET],
+ sizeof(u16));
+ memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET],
+ sizeof(u16));
return 0;
}
@@ -295,11 +317,15 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
{
#define HCLGEVF_TQPS_DEPTH_INFO_LEN 4
+#define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0
+#define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2
+
u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
+ struct hclge_vf_to_pf_msg send_msg;
int ret;
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QDEPTH, 0, NULL, 0,
- true, resp_msg,
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
HCLGEVF_TQPS_DEPTH_INFO_LEN);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -308,8 +334,10 @@ static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
return ret;
}
- memcpy(&hdev->num_tx_desc, &resp_msg[0], sizeof(u16));
- memcpy(&hdev->num_rx_desc, &resp_msg[2], sizeof(u16));
+ memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET],
+ sizeof(u16));
+ memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET],
+ sizeof(u16));
return 0;
}
@@ -317,14 +345,14 @@ static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- u8 msg_data[2], resp_data[2];
+ struct hclge_vf_to_pf_msg send_msg;
u16 qid_in_pf = 0;
+ u8 resp_data[2];
int ret;
- memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
-
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data,
- sizeof(msg_data), true, resp_data,
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
+ memcpy(send_msg.data, &queue_id, sizeof(queue_id));
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
sizeof(resp_data));
if (!ret)
qid_in_pf = *(u16 *)resp_data;
@@ -334,11 +362,13 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
{
+ struct hclge_vf_to_pf_msg send_msg;
u8 resp_msg[2];
int ret;
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0,
- true, resp_msg, sizeof(resp_msg));
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
+ sizeof(resp_msg));
if (ret) {
dev_err(&hdev->pdev->dev,
"VF request to get the pf port media type failed %d",
@@ -425,11 +455,11 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
{
+ struct hclge_vf_to_pf_msg send_msg;
int status;
- u8 resp_msg;
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
- 0, false, &resp_msg, sizeof(resp_msg));
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (status)
dev_err(&hdev->pdev->dev,
"VF failed to fetch link status(%d) from PF", status);
@@ -463,19 +493,16 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
{
-#define HCLGEVF_ADVERTISING 0
-#define HCLGEVF_SUPPORTED 1
- u8 send_msg;
- u8 resp_msg;
+#define HCLGEVF_ADVERTISING 0
+#define HCLGEVF_SUPPORTED 1
- send_msg = HCLGEVF_ADVERTISING;
- hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0,
- &send_msg, sizeof(send_msg), false,
- &resp_msg, sizeof(resp_msg));
- send_msg = HCLGEVF_SUPPORTED;
- hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0,
- &send_msg, sizeof(send_msg), false,
- &resp_msg, sizeof(resp_msg));
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0);
+ send_msg.data[0] = HCLGEVF_ADVERTISING;
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ send_msg.data[0] = HCLGEVF_SUPPORTED;
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
@@ -677,19 +704,19 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RSS_MBX_RESP_LEN 8
-
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
+ struct hclge_vf_to_pf_msg send_msg;
u16 msg_num, hash_key_index;
u8 index;
int ret;
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
HCLGEVF_RSS_MBX_RESP_LEN;
for (index = 0; index < msg_num; index++) {
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0,
- &index, sizeof(index),
- true, resp_msg,
+ send_msg.data[0] = index;
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
HCLGEVF_RSS_MBX_RESP_LEN);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -1001,44 +1028,32 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
struct hnae3_ring_chain_node *ring_chain)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
struct hnae3_ring_chain_node *node;
- struct hclge_mbx_vf_to_pf_cmd *req;
- struct hclgevf_desc desc;
- int i = 0;
int status;
- u8 type;
+ int i = 0;
- req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
- type = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
+ memset(&send_msg, 0, sizeof(send_msg));
+ send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
HCLGE_MBX_UNMAP_RING_TO_VECTOR;
+ send_msg.vector_id = vector_id;
for (node = ring_chain; node; node = node->next) {
- int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
- HCLGE_MBX_RING_NODE_VARIABLE_NUM * i;
-
- if (i == 0) {
- hclgevf_cmd_setup_basic_desc(&desc,
- HCLGEVF_OPC_MBX_VF_TO_PF,
- false);
- req->msg[0] = type;
- req->msg[1] = vector_id;
- }
-
- req->msg[idx_offset] =
+ send_msg.param[i].ring_type =
hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
- req->msg[idx_offset + 1] = node->tqp_index;
- req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S);
+
+ send_msg.param[i].tqp_index = node->tqp_index;
+ send_msg.param[i].int_gl_index =
+ hnae3_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S);
i++;
- if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
- HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
- HCLGE_MBX_RING_NODE_VARIABLE_NUM) ||
- !node->next) {
- req->msg[2] = i;
+ if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) {
+ send_msg.ring_num = i;
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, false,
+ NULL, 0);
if (status) {
dev_err(&hdev->pdev->dev,
"Map TQP fail, status is %d.\n",
@@ -1046,11 +1061,6 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
return status;
}
i = 0;
- hclgevf_cmd_setup_basic_desc(&desc,
- HCLGEVF_OPC_MBX_VF_TO_PF,
- false);
- req->msg[0] = type;
- req->msg[1] = vector_id;
}
}
@@ -1123,18 +1133,17 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
bool en_uc_pmc, bool en_mc_pmc,
bool en_bc_pmc)
{
- struct hclge_mbx_vf_to_pf_cmd *req;
- struct hclgevf_desc desc;
+ struct hclge_vf_to_pf_msg send_msg;
int ret;
- req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
- req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
- req->msg[1] = en_bc_pmc ? 1 : 0;
- req->msg[2] = en_uc_pmc ? 1 : 0;
- req->msg[3] = en_mc_pmc ? 1 : 0;
+ memset(&send_msg, 0, sizeof(send_msg));
+ send_msg.code = HCLGE_MBX_SET_PROMISC_MODE;
+ send_msg.en_bc = en_bc_pmc ? 1 : 0;
+ send_msg.en_uc = en_uc_pmc ? 1 : 0;
+ send_msg.en_mc = en_mc_pmc ? 1 : 0;
+
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
- ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
"Set promisc mode fail, status is %d.\n", ret);
@@ -1193,11 +1202,13 @@ static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
{
+ struct hclge_vf_to_pf_msg send_msg;
u8 host_mac[ETH_ALEN];
int status;
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MAC_ADDR, 0, NULL, 0,
- true, host_mac, ETH_ALEN);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac,
+ ETH_ALEN);
if (status) {
dev_err(&hdev->pdev->dev,
"fail to get VF MAC from host %d", status);
@@ -1229,20 +1240,16 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
+ struct hclge_vf_to_pf_msg send_msg;
u8 *new_mac_addr = (u8 *)p;
- u8 msg_data[ETH_ALEN * 2];
- u16 subcode;
int status;
- ether_addr_copy(msg_data, new_mac_addr);
- ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
-
- subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
+ send_msg.subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
HCLGE_MBX_MAC_VLAN_UC_MODIFY;
-
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
- subcode, msg_data, sizeof(msg_data),
- true, NULL, 0);
+ ether_addr_copy(send_msg.data, new_mac_addr);
+ ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
if (!status)
ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
@@ -1253,49 +1260,60 @@ static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
- HCLGE_MBX_MAC_VLAN_UC_ADD,
- addr, ETH_ALEN, false, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST,
+ HCLGE_MBX_MAC_VLAN_UC_ADD);
+ ether_addr_copy(send_msg.data, addr);
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
- HCLGE_MBX_MAC_VLAN_UC_REMOVE,
- addr, ETH_ALEN, false, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST,
+ HCLGE_MBX_MAC_VLAN_UC_REMOVE);
+ ether_addr_copy(send_msg.data, addr);
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MC_ADD,
- addr, ETH_ALEN, false, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MC_ADD);
+ ether_addr_copy(send_msg.data, addr);
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
const unsigned char *addr)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
- HCLGE_MBX_MAC_VLAN_MC_REMOVE,
- addr, ETH_ALEN, false, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST,
+ HCLGE_MBX_MAC_VLAN_MC_REMOVE);
+ ether_addr_copy(send_msg.data, addr);
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
__be16 proto, u16 vlan_id,
bool is_kill)
{
-#define HCLGEVF_VLAN_MBX_MSG_LEN 5
+#define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0
+#define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1
+#define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3
+
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
+ struct hclge_vf_to_pf_msg send_msg;
int ret;
if (vlan_id > HCLGEVF_MAX_VLAN_ID)
@@ -1313,16 +1331,18 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
return -EBUSY;
}
- msg_data[0] = is_kill;
- memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
- memcpy(&msg_data[3], &proto, sizeof(proto));
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_FILTER);
+ send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill;
+ memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id,
+ sizeof(vlan_id));
+ memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto,
+ sizeof(proto));
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack.
*/
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
- HCLGE_MBX_VLAN_FILTER, msg_data,
- HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
if (is_kill && ret)
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
@@ -1355,37 +1375,38 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- u8 msg_data;
+ struct hclge_vf_to_pf_msg send_msg;
- msg_data = enable ? 1 : 0;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
- HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
- 1, false, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_RX_OFF_CFG);
+ send_msg.data[0] = enable ? 1 : 0;
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- u8 msg_data[2];
+ struct hclge_vf_to_pf_msg send_msg;
int ret;
- memcpy(msg_data, &queue_id, sizeof(queue_id));
-
/* disable vf queue before send queue reset msg to PF */
ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
if (ret)
return ret;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
- sizeof(msg_data), true, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
+ memcpy(send_msg.data, &queue_id, sizeof(queue_id));
+ return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
}
static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu,
- sizeof(new_mtu), true, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
+ memcpy(send_msg.data, &new_mtu, sizeof(new_mtu));
+ return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
}
static int hclgevf_notify_client(struct hclgevf_dev *hdev,
@@ -1500,11 +1521,12 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_SYNC_TIME 100
+ struct hclge_vf_to_pf_msg send_msg;
int ret = 0;
if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
- 0, true, NULL, sizeof(u8));
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
hdev->rst_stats.vf_func_rst_cnt++;
}
@@ -1881,14 +1903,14 @@ static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
{
- u8 respmsg;
+ struct hclge_vf_to_pf_msg send_msg;
int ret;
if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
return;
- ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
- 0, false, &respmsg, sizeof(respmsg));
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (ret)
dev_err(&hdev->pdev->dev,
"VF sends keep alive cmd failed(=%d)\n", ret);
@@ -2002,7 +2024,10 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
return HCLGEVF_VECTOR0_EVENT_MBX;
}
- dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
+ /* print other vector0 event source */
+ dev_info(&hdev->pdev->dev,
+ "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
+ cmdq_stat_reg);
return HCLGEVF_VECTOR0_EVENT_OTHER;
}
@@ -2124,50 +2149,51 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
return ret;
}
-static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
+static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
{
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- int ret;
+ struct hclgevf_rss_tuple_cfg *tuple_sets;
u32 i;
+ rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
-
+ tuple_sets = &rss_cfg->rss_tuple_sets;
if (hdev->pdev->revision >= 0x21) {
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
HCLGEVF_RSS_KEY_SIZE);
+ tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ }
+
+ /* Initialize RSS indirect table */
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
+}
+
+static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int ret;
+
+ if (hdev->pdev->revision >= 0x21) {
ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
rss_cfg->rss_hash_key);
if (ret)
return ret;
- rss_cfg->rss_tuple_sets.ipv4_tcp_en =
- HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- rss_cfg->rss_tuple_sets.ipv4_udp_en =
- HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- rss_cfg->rss_tuple_sets.ipv4_sctp_en =
- HCLGEVF_RSS_INPUT_TUPLE_SCTP;
- rss_cfg->rss_tuple_sets.ipv4_fragment_en =
- HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- rss_cfg->rss_tuple_sets.ipv6_tcp_en =
- HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- rss_cfg->rss_tuple_sets.ipv6_udp_en =
- HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- rss_cfg->rss_tuple_sets.ipv6_sctp_en =
- HCLGEVF_RSS_INPUT_TUPLE_SCTP;
- rss_cfg->rss_tuple_sets.ipv6_fragment_en =
- HCLGEVF_RSS_INPUT_TUPLE_OTHER;
-
ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
if (ret)
return ret;
}
- /* Initialize RSS indirect table */
- for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
- rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
-
ret = hclgevf_set_rss_indir_table(hdev);
if (ret)
return ret;
@@ -2242,12 +2268,16 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
{
+#define HCLGEVF_STATE_ALIVE 1
+#define HCLGEVF_STATE_NOT_ALIVE 0
+
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- u8 msg_data;
+ struct hclge_vf_to_pf_msg send_msg;
- msg_data = alive ? 1 : 0;
- return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE,
- 0, &msg_data, 1, false, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0);
+ send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE :
+ HCLGEVF_STATE_NOT_ALIVE;
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_client_start(struct hnae3_handle *handle)
@@ -2764,6 +2794,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
/* Initialize RSS for this VF */
+ hclgevf_rss_init_cfg(hdev);
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -2801,10 +2832,12 @@ err_cmd_queue_init:
static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
{
+ struct hclge_vf_to_pf_msg send_msg;
+
hclgevf_state_uninit(hdev);
- hclgevf_send_mbx_msg(hdev, HCLGE_MBX_VF_UNINIT, 0, NULL, 0,
- false, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
hclgevf_misc_irq_uninit(hdev);
@@ -2936,6 +2969,8 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
rss_indir[i] = i % kinfo->rss_size;
+ hdev->rss_cfg.rss_size = kinfo->rss_size;
+
ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
if (ret)
dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
@@ -3101,16 +3136,17 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
u8 *port_base_vlan_info, u8 data_size)
{
struct hnae3_handle *nic = &hdev->nic;
+ struct hclge_vf_to_pf_msg send_msg;
rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
rtnl_unlock();
/* send msg to PF and wait update port based vlan info */
- hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
- HCLGE_MBX_PORT_BASE_VLAN_CFG,
- port_base_vlan_info, data_size,
- false, NULL, 0);
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_PORT_BASE_VLAN_CFG);
+ memcpy(send_msg.data, port_base_vlan_info, data_size);
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
@@ -3188,7 +3224,7 @@ static int hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
- hclgevf_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGEVF_NAME);
+ hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
if (!hclgevf_wq) {
pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index fee8d97f323c..3b88d866facc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -305,8 +305,8 @@ static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
return !!hdev->reset_pending;
}
-int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
- const u8 *msg_data, u8 msg_len, bool need_resp,
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
+ struct hclge_vf_to_pf_msg *send_msg, bool need_resp,
u8 *resp_data, u16 resp_len);
void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 7cbd715d5e7a..9b8154955f91 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -5,6 +5,11 @@
#include "hclgevf_main.h"
#include "hnae3.h"
+static int hclgevf_resp_to_errno(u16 resp_code)
+{
+ return resp_code ? -resp_code : 0;
+}
+
static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
{
/* this function should be called with mbx_resp.mbx_mutex held
@@ -79,8 +84,8 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
return 0;
}
-int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
- const u8 *msg_data, u8 msg_len, bool need_resp,
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
+ struct hclge_vf_to_pf_msg *send_msg, bool need_resp,
u8 *resp_data, u16 resp_len)
{
struct hclge_mbx_vf_to_pf_cmd *req;
@@ -89,21 +94,17 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
- /* first two bytes are reserved for code & subcode */
- if (msg_len > (HCLGE_MBX_MAX_MSG_SIZE - 2)) {
+ if (!send_msg) {
dev_err(&hdev->pdev->dev,
- "VF send mbx msg fail, msg len %d exceeds max len %d\n",
- msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+ "failed to send mbx, msg is NULL\n");
return -EINVAL;
}
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
- req->mbx_need_resp |= need_resp ? HCLGE_MBX_NEED_RESP_BIT :
- ~HCLGE_MBX_NEED_RESP_BIT;
- req->msg[0] = code;
- req->msg[1] = subcode;
- if (msg_data)
- memcpy(&req->msg[2], msg_data, msg_len);
+ if (need_resp)
+ hnae3_set_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B, 1);
+
+ memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg));
/* synchronous send */
if (need_resp) {
@@ -118,7 +119,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
return status;
}
- status = hclgevf_get_mbx_resp(hdev, code, subcode, resp_data,
+ status = hclgevf_get_mbx_resp(hdev, send_msg->code,
+ send_msg->subcode, resp_data,
resp_len);
mutex_unlock(&hdev->mbx_resp.mbx_mutex);
} else {
@@ -169,7 +171,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %u\n",
- req->msg[0]);
+ req->msg.code);
/* dropping/not processing this invalid message */
crq->desc[crq->next_to_use].flag = 0;
@@ -183,19 +185,21 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
* timeout and simultaneously queue the async messages for later
* prcessing in context of mailbox task i.e. the slow path.
*/
- switch (req->msg[0]) {
+ switch (req->msg.code) {
case HCLGE_MBX_PF_VF_RESP:
if (resp->received_resp)
dev_warn(&hdev->pdev->dev,
"VF mbx resp flag not clear(%u)\n",
- req->msg[1]);
+ req->msg.vf_mbx_msg_code);
resp->received_resp = true;
- resp->origin_mbx_msg = (req->msg[1] << 16);
- resp->origin_mbx_msg |= req->msg[2];
- resp->resp_status = req->msg[3];
+ resp->origin_mbx_msg =
+ (req->msg.vf_mbx_msg_code << 16);
+ resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode;
+ resp->resp_status =
+ hclgevf_resp_to_errno(req->msg.resp_status);
- temp = (u8 *)&req->msg[4];
+ temp = (u8 *)req->msg.resp_data;
for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) {
resp->additional_info[i] = *temp;
temp++;
@@ -220,13 +224,13 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
HCLGE_MBX_MAX_ARQ_MSG_NUM) {
dev_warn(&hdev->pdev->dev,
"Async Q full, dropping msg(%u)\n",
- req->msg[1]);
+ req->msg.code);
break;
}
/* tail the async message in arq */
msg_q = hdev->arq.msg_q[hdev->arq.tail];
- memcpy(&msg_q[0], req->msg,
+ memcpy(&msg_q[0], &req->msg,
HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
hclge_mbx_tail_ptr_move_arq(hdev->arq);
atomic_inc(&hdev->arq.count);
@@ -237,7 +241,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
default:
dev_err(&hdev->pdev->dev,
"VF received unsupported(%u) mbx msg from PF\n",
- req->msg[0]);
+ req->msg.code);
break;
}
crq->desc[crq->next_to_use].flag = 0;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index b7fc17756c51..06248a7db7f2 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -872,7 +872,7 @@ static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
{
struct emac_regs __iomem *p = dev->emacp;
u32 r = 0;
- int n, err = -ETIMEDOUT;
+ int n;
mutex_lock(&dev->mdio_lock);
@@ -919,7 +919,6 @@ static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
goto bail;
}
}
- err = 0;
bail:
if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 84121aab7ff1..96d36ae5049e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -712,29 +712,36 @@ static int ibmveth_close(struct net_device *netdev)
return 0;
}
-static int netdev_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
+static int ibmveth_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
- u32 supported, advertising;
-
- supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
- SUPPORTED_FIBRE);
- advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
- ADVERTISED_FIBRE);
- cmd->base.speed = SPEED_1000;
- cmd->base.duplex = DUPLEX_FULL;
- cmd->base.port = PORT_FIBRE;
- cmd->base.phy_address = 0;
- cmd->base.autoneg = AUTONEG_ENABLE;
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+
+ return ethtool_virtdev_set_link_ksettings(dev, cmd,
+ &adapter->speed,
+ &adapter->duplex);
+}
+
+static int ibmveth_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+
+ cmd->base.speed = adapter->speed;
+ cmd->base.duplex = adapter->duplex;
+ cmd->base.port = PORT_OTHER;
return 0;
}
+static void ibmveth_init_link_settings(struct net_device *dev)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(dev);
+
+ adapter->speed = SPEED_1000;
+ adapter->duplex = DUPLEX_FULL;
+}
+
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -965,12 +972,13 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_strings = ibmveth_get_strings,
- .get_sset_count = ibmveth_get_sset_count,
- .get_ethtool_stats = ibmveth_get_ethtool_stats,
- .get_link_ksettings = netdev_get_link_ksettings,
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = ibmveth_get_strings,
+ .get_sset_count = ibmveth_get_sset_count,
+ .get_ethtool_stats = ibmveth_get_ethtool_stats,
+ .get_link_ksettings = ibmveth_get_link_ksettings,
+ .set_link_ksettings = ibmveth_set_link_ksettings,
};
static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -978,8 +986,6 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
-#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
-
static int ibmveth_send(struct ibmveth_adapter *adapter,
union ibmveth_buf_desc *descs, unsigned long mss)
{
@@ -1674,6 +1680,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->netdev = netdev;
adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
adapter->pool_config = 0;
+ ibmveth_init_link_settings(netdev);
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 4e9bf3421f4f..27dfff200166 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -162,6 +162,9 @@ struct ibmveth_adapter {
u64 tx_send_failed;
u64 tx_large_packets;
u64 rx_large_packets;
+ /* Ethtool settings */
+ u8 duplex;
+ u32 speed;
};
/*
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 154e2e818ec6..ad34e4335df2 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -294,6 +294,7 @@ config ICE
tristate "Intel(R) Ethernet Connection E800 Series Support"
default n
depends on PCI_MSI
+ select NET_DEVLINK
---help---
This driver supports Intel(R) Ethernet Connection E800 Series of
devices. For more information on how to identify your adapter, go
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index be56e631d693..6f45df5690d4 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1852,6 +1852,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
}
static const struct ethtool_ops e1000_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 2bced34c19ba..f7103356ef56 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2715,11 +2715,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
} else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
ipcse = 0;
}
ipcss = skb_network_offset(skb);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index adce7e319b9e..1d47e2503072 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -897,6 +897,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch_cnp:
/* fall through */
case e1000_pch_tgp:
+ case e1000_pch_adp:
mask |= BIT(18);
break;
default:
@@ -1561,6 +1562,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
@@ -2305,6 +2307,7 @@ static int e1000e_get_ts_info(struct net_device *netdev,
}
static const struct ethtool_ops e1000_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs,
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index f556163481cb..b1447221669e 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -97,6 +97,11 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_TGP_I219_LM14 0x15F9
#define E1000_DEV_ID_PCH_TGP_I219_V14 0x15FA
#define E1000_DEV_ID_PCH_TGP_I219_LM15 0x15F4
+#define E1000_DEV_ID_PCH_TGP_I219_V15 0x15F5
+#define E1000_DEV_ID_PCH_ADP_I219_LM16 0x1A1E
+#define E1000_DEV_ID_PCH_ADP_I219_V16 0x1A1F
+#define E1000_DEV_ID_PCH_ADP_I219_LM17 0x1A1C
+#define E1000_DEV_ID_PCH_ADP_I219_V17 0x1A1D
#define E1000_REVISION_4 4
@@ -121,6 +126,7 @@ enum e1000_mac_type {
e1000_pch_spt,
e1000_pch_cnp,
e1000_pch_tgp,
+ e1000_pch_adp,
};
enum e1000_media_type {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index b4135c50e905..735bf25952fc 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -317,6 +317,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -460,6 +461,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -703,6 +705,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -1642,6 +1645,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2095,6 +2099,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -3133,6 +3138,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -4077,6 +4083,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_spt:
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index e531976f8a67..51512a73fdd0 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1363,7 +1363,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (!(swsm & E1000_SWSM_SMBI))
break;
- usleep_range(50, 100);
+ udelay(100);
i++;
}
@@ -1381,7 +1381,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (er32(SWSM) & E1000_SWSM_SWESMBI)
break;
- usleep_range(50, 100);
+ udelay(100);
}
if (i == timeout) {
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 0f02c7a5ee9b..177c6da80c57 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3536,6 +3536,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
break;
case e1000_pch_cnp:
case e1000_pch_tgp:
+ case e1000_pch_adp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
@@ -3807,7 +3808,7 @@ static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
tdt = er32(TDT(0));
BUG_ON(tdt != tx_ring->next_to_use);
tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use);
- tx_desc->buffer_addr = tx_ring->dma;
+ tx_desc->buffer_addr = cpu_to_le64(tx_ring->dma);
tx_desc->lower.data = cpu_to_le32(txd_lower | size);
tx_desc->upper.data = 0;
@@ -4049,6 +4050,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_cnp:
/* fall-through */
case e1000_pch_tgp:
+ case e1000_pch_adp:
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
@@ -5461,10 +5463,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
} else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
ipcse = 0;
}
ipcss = skb_network_offset(skb);
@@ -7759,6 +7758,11 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index eaa5a0fb99f0..439fda2f5368 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -297,6 +297,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_cnp:
/* fall-through */
case e1000_pch_tgp:
+ case e1000_pch_adp:
if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index f306084ca12c..5b78362b82ac 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -41,7 +41,7 @@ struct fm10k_l2_accel {
u16 count;
u16 dglort;
struct rcu_head rcu;
- struct net_device *macvlan[0];
+ struct net_device *macvlan[];
};
enum fm10k_ring_state_t {
@@ -198,7 +198,7 @@ struct fm10k_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
/* for dynamic allocation of rings associated with this q_vector */
- struct fm10k_ring ring[0] ____cacheline_internodealigned_in_smp;
+ struct fm10k_ring ring[] ____cacheline_internodealigned_in_smp;
};
enum fm10k_ring_f_enum {
@@ -218,7 +218,7 @@ struct fm10k_iov_data {
unsigned int num_vfs;
unsigned int next_vf_mbx;
struct rcu_head rcu;
- struct fm10k_vf_info vf_info[0];
+ struct fm10k_vf_info vf_info[];
};
struct fm10k_udp_port {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 68edf55ac906..37fbc646deb9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1151,6 +1151,8 @@ static int fm10k_set_channels(struct net_device *dev,
}
static const struct ethtool_ops fm10k_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_strings = fm10k_get_strings,
.get_sset_count = fm10k_get_sset_count,
.get_ethtool_stats = fm10k_get_ethtool_stats,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 4833187bd259..e95b8da45e07 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -334,13 +334,13 @@ int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
struct i40e_ddp_profile_list {
u32 p_count;
- struct i40e_profile_info p_info[0];
+ struct i40e_profile_info p_info[];
};
struct i40e_ddp_old_profile_list {
struct list_head list;
size_t old_ddp_size;
- u8 old_ddp_buf[0];
+ u8 old_ddp_buf[];
};
/* macros related to FLX_PIT */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 317f3f1458db..aa8026b1eb81 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -5249,6 +5249,11 @@ static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
};
static const struct ethtool_ops i40e_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_RX_USECS_HIGH |
+ ETHTOOL_COALESCE_TX_USECS_HIGH,
.get_drvinfo = i40e_get_drvinfo,
.get_regs_len = i40e_get_regs_len,
.get_regs = i40e_get_regs,
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index bd1b1ed323f4..bcd11b4b29df 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -81,7 +81,7 @@ struct iavf_vsi {
#define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i]))
#define IAVF_TX_CTXTDESC(R, i) \
(&(((struct iavf_tx_context_desc *)((R)->desc))[i]))
-#define IAVF_MAX_REQ_QUEUES 4
+#define IAVF_MAX_REQ_QUEUES 16
#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 84c3d8d97ef6..2c39d46b6138 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -860,7 +860,7 @@ static void iavf_get_channels(struct net_device *netdev,
struct iavf_adapter *adapter = netdev_priv(netdev);
/* Report maximum channels */
- ch->max_combined = IAVF_MAX_REQ_QUEUES;
+ ch->max_combined = adapter->vsi_res->num_queue_pairs;
ch->max_other = NONQ_VECS;
ch->other_count = NONQ_VECS;
@@ -881,14 +881,7 @@ static int iavf_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- int num_req = ch->combined_count;
-
- if (num_req != adapter->num_active_queues &&
- !(adapter->vf_res->vf_cap_flags &
- VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) {
- dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n");
- return -EINVAL;
- }
+ u32 num_req = ch->combined_count;
if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
adapter->num_tc) {
@@ -899,14 +892,19 @@ static int iavf_set_channels(struct net_device *netdev,
/* All of these should have already been checked by ethtool before this
* even gets to us, but just to be sure.
*/
- if (num_req <= 0 || num_req > IAVF_MAX_REQ_QUEUES)
+ if (num_req > adapter->vsi_res->num_queue_pairs)
return -EINVAL;
+ if (num_req == adapter->num_active_queues)
+ return 0;
+
if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
return -EINVAL;
adapter->num_req_queues = num_req;
- return iavf_request_queues(adapter, num_req);
+ adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
+ iavf_schedule_reset(adapter);
+ return 0;
}
/**
@@ -998,6 +996,10 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
}
static const struct ethtool_ops iavf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = iavf_get_ringparam,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 62fe56ddcb6e..2050649848ba 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -3061,9 +3061,6 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter,
static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
struct flow_cls_offload *cls_flower)
{
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
return iavf_configure_clsflower(adapter, cls_flower);
@@ -3087,6 +3084,11 @@ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
+ struct iavf_adapter *adapter = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
+ return -EOPNOTSUPP;
+
switch (type) {
case TC_SETUP_CLSFLOWER:
return iavf_setup_tc_cls_flower(cb_priv, type_data);
@@ -3448,7 +3450,7 @@ int iavf_process_config(struct iavf_adapter *adapter)
}
if (num_req_queues &&
- num_req_queues != adapter->vsi_res->num_queue_pairs) {
+ num_req_queues > adapter->vsi_res->num_queue_pairs) {
/* Problem. The PF gave us fewer queues than what we had
* negotiated in our request. Need a reset to see if we can't
* get back to a working state.
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 1ab9cb339acb..d58374c2c33d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -397,33 +397,6 @@ void iavf_map_queues(struct iavf_adapter *adapter)
}
/**
- * iavf_request_queues
- * @adapter: adapter structure
- * @num: number of requested queues
- *
- * We get a default number of queues from the PF. This enables us to request a
- * different number. Returns 0 on success, negative on failure
- **/
-int iavf_request_queues(struct iavf_adapter *adapter, int num)
-{
- struct virtchnl_vf_res_request vfres;
-
- if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
- /* bail because we already have a command pending */
- dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n",
- adapter->current_op);
- return -EBUSY;
- }
-
- vfres.num_queue_pairs = min_t(int, num, num_online_cpus());
-
- adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES;
- adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
- return iavf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES,
- (u8 *)&vfres, sizeof(vfres));
-}
-
-/**
* iavf_add_ether_addrs
* @adapter: adapter structure
*
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 59544b0fc086..29c6c6743450 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -19,6 +19,7 @@ ice-y := ice_main.o \
ice_txrx.o \
ice_flex_pipe.o \
ice_flow.o \
+ ice_devlink.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index cb10abb14e11..5c11448bfbb3 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -34,6 +34,7 @@
#include <linux/ctype.h>
#include <linux/bpf.h>
#include <linux/avf/virtchnl.h>
+#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
#include "ice_devids.h"
@@ -60,7 +61,6 @@ extern const char ice_drv_ver[];
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64
-#define ICE_MBXRQ_LEN 512
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
@@ -70,7 +70,6 @@ extern const char ice_drv_ver[];
#define ICE_Q_WAIT_RETRY_LIMIT 10
#define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT)
#define ICE_MAX_LG_RSS_QS 256
-#define ICE_MAX_SMALL_RSS_QS 8
#define ICE_RES_VALID_BIT 0x8000
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff
@@ -212,6 +211,8 @@ enum ice_state {
__ICE_SERVICE_SCHED,
__ICE_SERVICE_DIS,
__ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
+ __ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */
+ __ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */
__ICE_STATE_NBITS /* must be last */
};
@@ -340,12 +341,18 @@ enum ice_pf_flags {
ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX,
+ ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_PF_FLAGS_NBITS /* must be last */
};
struct ice_pf {
struct pci_dev *pdev;
+ /* devlink port data */
+ struct devlink_port devlink_port;
+
+ struct devlink_region *nvm_region;
+
/* OS reserved IRQ details */
struct msix_entry *msix_entries;
struct ice_res_tracker *irq_tracker;
@@ -361,8 +368,10 @@ struct ice_pf {
struct ice_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
u16 num_vfs_supported; /* num VFs supported for this PF */
- u16 num_vf_qps; /* num queue pairs per VF */
- u16 num_vf_msix; /* num vectors per VF */
+ u16 num_qps_per_vf;
+ u16 num_msix_per_vf;
+ /* used to ratelimit the MDD event logging */
+ unsigned long last_printed_mdd_jiffies;
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 6873998cf145..2381b4014ed6 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1232,6 +1232,7 @@ struct ice_aqc_sff_eeprom {
* NVM Update commands (indirect 0x0703)
*/
struct ice_aqc_nvm {
+#define ICE_AQC_NVM_MAX_OFFSET 0xFFFFFF
__le16 offset_low;
u8 offset_high;
u8 cmd_flags;
@@ -1250,6 +1251,8 @@ struct ice_aqc_nvm {
__le32 addr_low;
};
+#define ICE_AQC_NVM_START_POINT 0
+
/* NVM Checksum Command (direct, 0x0706) */
struct ice_aqc_nvm_checksum {
u8 flags;
@@ -1661,6 +1664,13 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[1];
};
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct ice_aqc_event_lan_overflow {
+ __le32 prtdcb_ruptq;
+ __le32 qtx_ctl;
+ u8 reserved[8];
+};
+
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
@@ -1730,6 +1740,7 @@ struct ice_aq_desc {
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_event_mask set_event_mask;
struct ice_aqc_get_link_status get_link_status;
+ struct ice_aqc_event_lan_overflow lan_overflow;
} params;
};
@@ -1756,6 +1767,7 @@ enum ice_aq_err {
ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
+ ICE_AQ_RC_EINVAL = 14, /* Invalid argument */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
@@ -1860,6 +1872,9 @@ enum ice_adminq_opc {
ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
+ /* Standalone Commands/Events */
+ ice_aqc_opc_event_lan_overflow = 0x1001,
+
/* debug commands */
ice_aqc_opc_fw_logging = 0xFF09,
ice_aqc_opc_fw_logging_info = 0xFF10,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 81885efadc7a..a19cd6f5436b 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -203,8 +203,7 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
*/
static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
{
- WARN_ONCE(ice_ring_is_xdp(ring) && tc,
- "XDP ring can't belong to TC other than 0");
+ WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
/* Idea here for calculation is that we subtract the number of queue
* count from TC that ring belongs to from it's absolute queue index
@@ -247,7 +246,6 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
*/
switch (vsi->type) {
case ICE_VSI_LB:
- /* fall through */
case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
@@ -387,8 +385,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format
*/
+ regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
if (vsi->type != ICE_VSI_VF) {
- regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
QRXFLXP_CNTXT_RXDID_IDX_M;
@@ -399,8 +397,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
- wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ } else {
+ regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
+ QRXFLXP_CNTXT_RXDID_PRIO_M |
+ QRXFLXP_CNTXT_TS_M);
}
+ wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
@@ -459,17 +461,20 @@ int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
}
/**
- * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
+ * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait
* @vsi: the VSI being configured
- * @ena: start or stop the Rx rings
- * @rxq_idx: Rx queue index
+ * @ena: start or stop the Rx ring
+ * @rxq_idx: 0-based Rx queue index for the VSI passed in
+ * @wait: wait or don't wait for configuration to finish in hardware
+ *
+ * Return 0 on success and negative on error.
*/
-int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
+int
+ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)
{
int pf_q = vsi->rxq_map[rxq_idx];
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- int ret = 0;
u32 rx_reg;
rx_reg = rd32(hw, QRX_CTRL(pf_q));
@@ -485,13 +490,30 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
rx_reg &= ~QRX_CTRL_QENA_REQ_M;
wr32(hw, QRX_CTRL(pf_q), rx_reg);
- /* wait for the change to finish */
- ret = ice_pf_rxq_wait(pf, pf_q, ena);
- if (ret)
- dev_err(ice_pf_to_dev(pf), "VSI idx %d Rx ring %d %sable timeout\n",
- vsi->idx, pf_q, (ena ? "en" : "dis"));
+ if (!wait)
+ return 0;
+
+ ice_flush(hw);
+ return ice_pf_rxq_wait(pf, pf_q, ena);
+}
- return ret;
+/**
+ * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started
+ * @vsi: the VSI being configured
+ * @ena: true/false to verify Rx ring has been enabled/disabled respectively
+ * @rxq_idx: 0-based Rx queue index for the VSI passed in
+ *
+ * This routine will wait for the given Rx queue of the VSI to reach the
+ * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach
+ * the requested state after multiple retries; else will return 0 in case of
+ * success.
+ */
+int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
+{
+ int pf_q = vsi->rxq_map[rxq_idx];
+ struct ice_pf *pf = vsi->back;
+
+ return ice_pf_rxq_wait(pf, pf_q, ena);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index 407995e8e944..44efdb627043 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -8,7 +8,9 @@
int ice_setup_rx_ctx(struct ice_ring *ring);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
-int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
+int
+ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
+int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 04d5db0a25bf..2c0d8fd3d5cd 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -6,7 +6,7 @@
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
-#define ICE_PF_RESET_WAIT_COUNT 200
+#define ICE_PF_RESET_WAIT_COUNT 300
/**
* ice_set_mac_type - Sets MAC type
@@ -615,29 +615,6 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
}
/**
- * ice_get_nvm_version - get cached NVM version data
- * @hw: pointer to the hardware structure
- * @oem_ver: 8 bit NVM version
- * @oem_build: 16 bit NVM build number
- * @oem_patch: 8 NVM patch number
- * @ver_hi: high 16 bits of the NVM version
- * @ver_lo: low 16 bits of the NVM version
- */
-void
-ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
- u8 *oem_patch, u8 *ver_hi, u8 *ver_lo)
-{
- struct ice_nvm_info *nvm = &hw->nvm;
-
- *oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
- *oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK);
- *oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >>
- ICE_OEM_VER_BUILD_SHIFT);
- *ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
- *ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
-}
-
-/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
@@ -958,72 +935,6 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
}
/**
- * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
- * @hw: pointer to hardware structure
- * @module_tlv: pointer to module TLV to return
- * @module_tlv_len: pointer to module TLV length to return
- * @module_type: module type requested
- *
- * Finds the requested sub module TLV type from the Preserved Field
- * Area (PFA) and returns the TLV pointer and length. The caller can
- * use these to read the variable length TLV value.
- */
-enum ice_status
-ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
- u16 module_type)
-{
- enum ice_status status;
- u16 pfa_len, pfa_ptr;
- u16 next_tlv;
-
- status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
- return status;
- }
- status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
- return status;
- }
- /* Starting with first TLV after PFA length, iterate through the list
- * of TLVs to find the requested one.
- */
- next_tlv = pfa_ptr + 1;
- while (next_tlv < pfa_ptr + pfa_len) {
- u16 tlv_sub_module_type;
- u16 tlv_len;
-
- /* Read TLV type */
- status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
- break;
- }
- /* Read TLV length */
- status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
- break;
- }
- if (tlv_sub_module_type == module_type) {
- if (tlv_len) {
- *module_tlv = next_tlv;
- *module_tlv_len = tlv_len;
- return 0;
- }
- return ICE_ERR_INVAL_SIZE;
- }
- /* Check next TLV, i.e. current TLV pointer + length + 2 words
- * (for current TLV's type and length)
- */
- next_tlv = next_tlv + tlv_len + 2;
- }
- /* Module does not exist */
- return ICE_ERR_DOES_NOT_EXIST;
-}
-
-/**
* ice_copy_rxq_ctx_to_hw
* @hw: pointer to the hardware structure
* @ice_rxq_ctx: pointer to the rxq context
@@ -1181,7 +1092,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
case ice_aqc_opc_release_res:
if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
break;
- /* fall-through */
+ fallthrough;
default:
mutex_lock(&ice_global_cfg_lock_sw);
lock_acquired = true;
@@ -2703,7 +2614,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
break;
}
- /* fall-through */
+ fallthrough;
default:
status = ICE_ERR_PARAM;
goto ice_aq_get_set_rss_lut_exit;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index f9fc005d35a7..8104f3d64d96 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -15,9 +15,6 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
-enum ice_status
-ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
- u16 module_type);
enum ice_status ice_check_reset(struct ice_hw *hw);
enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
@@ -38,9 +35,6 @@ enum ice_status
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
enum ice_status
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
-enum ice_status ice_init_nvm(struct ice_hw *hw);
-enum ice_status
-ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
enum ice_status
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
@@ -153,9 +147,6 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
-void
-ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
- u8 *oem_patch, u8 *ver_hi, u8 *ver_lo);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 7108fb41b604..7bea09363b42 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -63,6 +63,26 @@ u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
}
/**
+ * ice_dcb_get_mode - gets the DCB mode
+ * @port_info: pointer to port info structure
+ * @host: if set it's HOST if not it's MANAGED
+ */
+static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host)
+{
+ u8 mode;
+
+ if (host)
+ mode = DCB_CAP_DCBX_HOST;
+ else
+ mode = DCB_CAP_DCBX_LLD_MANAGED;
+
+ if (port_info->local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE)
+ return mode | DCB_CAP_DCBX_VER_CEE;
+ else
+ return mode | DCB_CAP_DCBX_VER_IEEE;
+}
+
+/**
* ice_dcb_get_num_tc - Get the number of TCs from DCBX config
* @dcbcfg: config to retrieve number of TCs from
*/
@@ -149,6 +169,43 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
}
/**
+ * ice_dcb_bwchk - check if ETS bandwidth input parameters are correct
+ * @pf: pointer to the PF struct
+ * @dcbcfg: pointer to DCB config structure
+ */
+int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg = &dcbcfg->etscfg;
+ u8 num_tc, total_bw = 0;
+ int i;
+
+ /* returns number of contigous TCs and 1 TC for non-contigous TCs,
+ * since at least 1 TC has to be configured
+ */
+ num_tc = ice_dcb_get_num_tc(dcbcfg);
+
+ /* no bandwidth checks required if there's only one TC, so assign
+ * all bandwidth to TC0 and return
+ */
+ if (num_tc == 1) {
+ etscfg->tcbwtable[0] = ICE_TC_MAX_BW;
+ return 0;
+ }
+
+ for (i = 0; i < num_tc; i++)
+ total_bw += etscfg->tcbwtable[i];
+
+ if (!total_bw) {
+ etscfg->tcbwtable[0] = ICE_TC_MAX_BW;
+ } else if (total_bw != ICE_TC_MAX_BW) {
+ dev_err(ice_pf_to_dev(pf), "Invalid config, total bandwidth must equal 100\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* ice_pf_dcb_cfg - Apply new DCB configuration
* @pf: pointer to the PF struct
* @new_cfg: DCBX config to apply
@@ -182,6 +239,9 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
return ret;
}
+ if (ice_dcb_bwchk(pf, new_cfg))
+ return -EINVAL;
+
/* Store old config in case FW config fails */
old_cfg = kmemdup(curr_cfg, sizeof(*old_cfg), GFP_KERNEL);
if (!old_cfg)
@@ -605,14 +665,14 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
ice_cfg_sw_lldp(pf_vsi, false, true);
- pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+ pf->dcbx_cap = ice_dcb_get_mode(port_info, true);
return 0;
}
set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
- /* DCBX in FW and LLDP enabled in FW */
- pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE;
+ /* DCBX/LLDP enabled in FW, set DCBNL mode advertisement */
+ pf->dcbx_cap = ice_dcb_get_mode(port_info, false);
err = ice_dcb_init_cfg(pf, locked);
if (err)
@@ -719,7 +779,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
bool need_reconfig = false;
struct ice_port_info *pi;
struct ice_vsi *pf_vsi;
- u8 type;
+ u8 mib_type;
int ret;
/* Not DCB capable or capability disabled */
@@ -734,16 +794,16 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pi = pf->hw.port_info;
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
/* Ignore if event is not for Nearest Bridge */
- type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
- ICE_AQ_LLDP_BRID_TYPE_M);
- dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", type);
- if (type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
+ mib_type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
+ ICE_AQ_LLDP_BRID_TYPE_M);
+ dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type);
+ if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
return;
/* Check MIB Type and return if event for Remote MIB update */
- type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
- dev_dbg(dev, "LLDP event mib type %s\n", type ? "remote" : "local");
- if (type == ICE_AQ_LLDP_MIB_REMOTE) {
+ mib_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
+ dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local");
+ if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
@@ -775,6 +835,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
goto out;
}
+ pf->dcbx_cap = ice_dcb_get_mode(pi, false);
+
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
&pi->local_dcbx_cfg);
ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index f15e5776f287..37680e815b02 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -20,6 +20,7 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
+int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg);
void ice_pf_dcb_recfg(struct ice_pf *pf);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index b61aba428adb..c4c12414083a 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -95,14 +95,12 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
}
- /* max_tc is a 1-8 value count of number of TC's, not a 0-7 value
- * for the TC's index number. Add one to value if not zero, and
- * for zero set it to the FW's default value
- */
- if (max_tc)
- max_tc++;
- else
- max_tc = IEEE_8021QAZ_MAX_TCS;
+ if (ice_dcb_bwchk(pf, new_cfg)) {
+ err = -EINVAL;
+ goto ets_out;
+ }
+
+ max_tc = pf->hw.func_caps.common_cap.maxtc;
new_cfg->etscfg.maxtcs = max_tc;
@@ -119,6 +117,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
if (err == ICE_DCB_NO_HW_CHG)
err = ICE_DCB_HW_CHG_RST;
+ets_out:
mutex_unlock(&pf->tc_mutex);
return err;
}
@@ -535,6 +534,30 @@ ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
}
/**
+ * ice_dcbnl_set_pg_tc_cfg_rx
+ * @netdev: relevant netdev struct
+ * @prio: corresponding user priority
+ * @prio_type: the traffic priority type
+ * @pgid: the PG ID
+ * @bw_pct: BW percentage for corresponding BWG
+ * @up_map: prio mapped to corresponding TC
+ *
+ * lldpad requires this function pointer to be non-NULL to complete CEE config.
+ */
+static void
+ice_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev,
+ int __always_unused prio,
+ u8 __always_unused prio_type,
+ u8 __always_unused pgid,
+ u8 __always_unused bw_pct,
+ u8 __always_unused up_map)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ dev_dbg(ice_pf_to_dev(pf), "Rx TC PG Config Not Supported.\n");
+}
+
+/**
* ice_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config
* @netdev: pointer to netdev struct
* @pgid: the corresponding traffic class
@@ -554,6 +577,23 @@ ice_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
}
/**
+ * ice_dcbnl_set_pg_bwg_cfg_rx
+ * @netdev: the corresponding netdev
+ * @pgid: corresponding TC
+ * @bw_pct: BW percentage for given TC
+ *
+ * lldpad requires this function pointer to be non-NULL to complete CEE config.
+ */
+static void
+ice_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
+ u8 __always_unused bw_pct)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ dev_dbg(ice_pf_to_dev(pf), "Rx BWG PG Config Not Supported.\n");
+}
+
+/**
* ice_dcbnl_get_cap - Get DCBX capabilities of adapter
* @netdev: pointer to netdev struct
* @capid: the capability type
@@ -799,6 +839,8 @@ static const struct dcbnl_rtnl_ops dcbnl_ops = {
.getpermhwaddr = ice_dcbnl_get_perm_hw_addr,
.setpgtccfgtx = ice_dcbnl_set_pg_tc_cfg_tx,
.setpgbwgcfgtx = ice_dcbnl_set_pg_bwg_cfg_tx,
+ .setpgtccfgrx = ice_dcbnl_set_pg_tc_cfg_rx,
+ .setpgbwgcfgrx = ice_dcbnl_set_pg_bwg_cfg_rx,
.getpgtccfgtx = ice_dcbnl_get_pg_tc_cfg_tx,
.getpgbwgcfgtx = ice_dcbnl_get_pg_bwg_cfg_tx,
.getpgtccfgrx = ice_dcbnl_get_pg_tc_cfg_rx,
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index ce63017c56c7..9d8194671f6a 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -5,12 +5,34 @@
#define _ICE_DEVIDS_H_
/* Device IDs */
+/* Intel(R) Ethernet Connection E823-L for backplane */
+#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
+/* Intel(R) Ethernet Connection E823-L for SFP */
+#define ICE_DEV_ID_E823L_SFP 0x124D
+/* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E823L_10G_BASE_T 0x124E
+/* Intel(R) Ethernet Connection E823-L 1GbE */
+#define ICE_DEV_ID_E823L_1GBE 0x124F
+/* Intel(R) Ethernet Connection E823-L for QSFP */
+#define ICE_DEV_ID_E823L_QSFP 0x151D
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
+/* Intel(R) Ethernet Controller E810-XXV for SFP */
+#define ICE_DEV_ID_E810_XXV_SFP 0x159B
+/* Intel(R) Ethernet Connection E823-C for backplane */
+#define ICE_DEV_ID_E823C_BACKPLANE 0x188A
+/* Intel(R) Ethernet Connection E823-C for QSFP */
+#define ICE_DEV_ID_E823C_QSFP 0x188B
+/* Intel(R) Ethernet Connection E823-C for SFP */
+#define ICE_DEV_ID_E823C_SFP 0x188C
+/* Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E823C_10G_BASE_T 0x188D
+/* Intel(R) Ethernet Connection E823-C 1GbE */
+#define ICE_DEV_ID_E823C_SGMII 0x188E
/* Intel(R) Ethernet Connection E822-C for backplane */
#define ICE_DEV_ID_E822C_BACKPLANE 0x1890
/* Intel(R) Ethernet Connection E822-C for QSFP */
@@ -21,8 +43,8 @@
#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893
/* Intel(R) Ethernet Connection E822-C 1GbE */
#define ICE_DEV_ID_E822C_SGMII 0x1894
-/* Intel(R) Ethernet Connection E822-X for backplane */
-#define ICE_DEV_ID_E822X_BACKPLANE 0x1897
+/* Intel(R) Ethernet Connection E822-L for backplane */
+#define ICE_DEV_ID_E822L_BACKPLANE 0x1897
/* Intel(R) Ethernet Connection E822-L for SFP */
#define ICE_DEV_ID_E822L_SFP 0x1898
/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
new file mode 100644
index 000000000000..c6833944b90a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_devlink.h"
+
+static int ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
+{
+ u8 dsn[8];
+
+ /* Copy the DSN into an array in Big Endian format */
+ put_unaligned_be64(pci_get_dsn(pf->pdev), dsn);
+
+ snprintf(buf, len, "%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
+ dsn[0], dsn[1], dsn[2], dsn[3],
+ dsn[4], dsn[5], dsn[6], dsn[7]);
+
+ return 0;
+}
+
+static int ice_info_pba(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+
+ status = ice_read_pba_string(hw, (u8 *)buf, len);
+ if (status)
+ return -EIO;
+
+ return 0;
+}
+
+static int ice_info_fw_mgmt(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ snprintf(buf, len, "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver,
+ hw->fw_patch);
+
+ return 0;
+}
+
+static int ice_info_fw_api(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ snprintf(buf, len, "%u.%u", hw->api_maj_ver, hw->api_min_ver);
+
+ return 0;
+}
+
+static int ice_info_fw_build(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ snprintf(buf, len, "0x%08x", hw->fw_build);
+
+ return 0;
+}
+
+static int ice_info_orom_ver(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_orom_info *orom = &pf->hw.nvm.orom;
+
+ snprintf(buf, len, "%u.%u.%u", orom->major, orom->build, orom->patch);
+
+ return 0;
+}
+
+static int ice_info_nvm_ver(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_nvm_info *nvm = &pf->hw.nvm;
+
+ snprintf(buf, len, "%x.%02x", nvm->major_ver, nvm->minor_ver);
+
+ return 0;
+}
+
+static int ice_info_eetrack(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_nvm_info *nvm = &pf->hw.nvm;
+
+ snprintf(buf, len, "0x%08x", nvm->eetrack);
+
+ return 0;
+}
+
+static int ice_info_ddp_pkg_name(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ snprintf(buf, len, "%s", hw->active_pkg_name);
+
+ return 0;
+}
+
+static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len)
+{
+ struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver;
+
+ snprintf(buf, len, "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update,
+ pkg->draft);
+
+ return 0;
+}
+
+#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter }
+#define running(key, getter) { ICE_VERSION_RUNNING, key, getter }
+
+enum ice_version_type {
+ ICE_VERSION_FIXED,
+ ICE_VERSION_RUNNING,
+ ICE_VERSION_STORED,
+};
+
+static const struct ice_devlink_version {
+ enum ice_version_type type;
+ const char *key;
+ int (*getter)(struct ice_pf *pf, char *buf, size_t len);
+} ice_devlink_versions[] = {
+ fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba),
+ running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt),
+ running("fw.mgmt.api", ice_info_fw_api),
+ running("fw.mgmt.build", ice_info_fw_build),
+ running(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver),
+ running("fw.psid.api", ice_info_nvm_ver),
+ running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack),
+ running("fw.app.name", ice_info_ddp_pkg_name),
+ running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
+};
+
+/**
+ * ice_devlink_info_get - .info_get devlink handler
+ * @devlink: devlink instance structure
+ * @req: the devlink info request
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .info_get operation. Reports information about the
+ * device.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int ice_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ char buf[100];
+ size_t i;
+ int err;
+
+ err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to set driver name");
+ return err;
+ }
+
+ err = ice_info_get_dsn(pf, buf, sizeof(buf));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to obtain serial number");
+ return err;
+ }
+
+ err = devlink_info_serial_number_put(req, buf);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number");
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) {
+ enum ice_version_type type = ice_devlink_versions[i].type;
+ const char *key = ice_devlink_versions[i].key;
+
+ err = ice_devlink_versions[i].getter(pf, buf, sizeof(buf));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
+ return err;
+ }
+
+ switch (type) {
+ case ICE_VERSION_FIXED:
+ err = devlink_info_version_fixed_put(req, key, buf);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version");
+ return err;
+ }
+ break;
+ case ICE_VERSION_RUNNING:
+ err = devlink_info_version_running_put(req, key, buf);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
+ return err;
+ }
+ break;
+ case ICE_VERSION_STORED:
+ err = devlink_info_version_stored_put(req, key, buf);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
+ return err;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static const struct devlink_ops ice_devlink_ops = {
+ .info_get = ice_devlink_info_get,
+};
+
+static void ice_devlink_free(void *devlink_ptr)
+{
+ devlink_free((struct devlink *)devlink_ptr);
+}
+
+/**
+ * ice_allocate_pf - Allocate devlink and return PF structure pointer
+ * @dev: the device to allocate for
+ *
+ * Allocate a devlink instance for this device and return the private area as
+ * the PF structure. The devlink memory is kept track of through devres by
+ * adding an action to remove it when unwinding.
+ */
+struct ice_pf *ice_allocate_pf(struct device *dev)
+{
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf));
+ if (!devlink)
+ return NULL;
+
+ /* Add an action to teardown the devlink when unwinding the driver */
+ if (devm_add_action(dev, ice_devlink_free, devlink)) {
+ devlink_free(devlink);
+ return NULL;
+ }
+
+ return devlink_priv(devlink);
+}
+
+/**
+ * ice_devlink_register - Register devlink interface for this PF
+ * @pf: the PF to register the devlink for.
+ *
+ * Register the devlink instance associated with this physical function.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_register(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ err = devlink_register(devlink, dev);
+ if (err) {
+ dev_err(dev, "devlink registration failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_unregister - Unregister devlink resources for this PF.
+ * @pf: the PF structure to cleanup
+ *
+ * Releases resources used by devlink and cleans up associated memory.
+ */
+void ice_devlink_unregister(struct ice_pf *pf)
+{
+ devlink_unregister(priv_to_devlink(pf));
+}
+
+/**
+ * ice_devlink_create_port - Create a devlink port for this PF
+ * @pf: the PF to create a port for
+ *
+ * Create and register a devlink_port for this PF. Note that although each
+ * physical function is connected to a separate devlink instance, the port
+ * will still be numbered according to the physical function id.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_port(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ if (!vsi) {
+ dev_err(dev, "%s: unable to find main VSI\n", __func__);
+ return -EIO;
+ }
+
+ devlink_port_attrs_set(&pf->devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ pf->hw.pf_id, false, 0, NULL, 0);
+ err = devlink_port_register(devlink, &pf->devlink_port, pf->hw.pf_id);
+ if (err) {
+ dev_err(dev, "devlink_port_register failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_destroy_port - Destroy the devlink_port for this PF
+ * @pf: the PF to cleanup
+ *
+ * Unregisters the devlink_port structure associated with this PF.
+ */
+void ice_devlink_destroy_port(struct ice_pf *pf)
+{
+ devlink_port_type_clear(&pf->devlink_port);
+ devlink_port_unregister(&pf->devlink_port);
+}
+
+/**
+ * ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents
+ * @devlink: the devlink instance
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
+ * the shadow-ram devlink region. It captures a snapshot of the shadow ram
+ * contents. This snapshot can later be viewed via the devlink-region
+ * interface.
+ *
+ * @returns zero on success, and updates the data pointer. Returns a non-zero
+ * error code on failure.
+ */
+static int ice_devlink_nvm_snapshot(struct devlink *devlink,
+ struct netlink_ext_ack *extack, u8 **data)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ void *nvm_data;
+ u32 nvm_size;
+
+ nvm_size = hw->nvm.flash_size;
+ nvm_data = vzalloc(nvm_size);
+ if (!nvm_data)
+ return -ENOMEM;
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (status) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ vfree(nvm_data);
+ return -EIO;
+ }
+
+ status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false);
+ if (status) {
+ dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
+ nvm_size, status, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
+ ice_release_nvm(hw);
+ vfree(nvm_data);
+ return -EIO;
+ }
+
+ ice_release_nvm(hw);
+
+ *data = nvm_data;
+
+ return 0;
+}
+
+static const struct devlink_region_ops ice_nvm_region_ops = {
+ .name = "nvm-flash",
+ .destructor = vfree,
+ .snapshot = ice_devlink_nvm_snapshot,
+};
+
+/**
+ * ice_devlink_init_regions - Initialize devlink regions
+ * @pf: the PF device structure
+ *
+ * Create devlink regions used to enable access to dump the contents of the
+ * flash memory on the device.
+ */
+void ice_devlink_init_regions(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct device *dev = ice_pf_to_dev(pf);
+ u64 nvm_size;
+
+ nvm_size = pf->hw.nvm.flash_size;
+ pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
+ nvm_size);
+ if (IS_ERR(pf->nvm_region)) {
+ dev_err(dev, "failed to create NVM devlink region, err %ld\n",
+ PTR_ERR(pf->nvm_region));
+ pf->nvm_region = NULL;
+ }
+}
+
+/**
+ * ice_devlink_destroy_regions - Destroy devlink regions
+ * @pf: the PF device structure
+ *
+ * Remove previously created regions for this PF.
+ */
+void ice_devlink_destroy_regions(struct ice_pf *pf)
+{
+ if (pf->nvm_region)
+ devlink_region_destroy(pf->nvm_region);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
new file mode 100644
index 000000000000..6e806a08dc23
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_DEVLINK_H_
+#define _ICE_DEVLINK_H_
+
+struct ice_pf *ice_allocate_pf(struct device *dev);
+
+int ice_devlink_register(struct ice_pf *pf);
+void ice_devlink_unregister(struct ice_pf *pf);
+int ice_devlink_create_port(struct ice_pf *pf);
+void ice_devlink_destroy_port(struct ice_pf *pf);
+
+void ice_devlink_init_regions(struct ice_pf *pf);
+void ice_devlink_destroy_regions(struct ice_pf *pf);
+
+#endif /* _ICE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 77c412a7e7a4..593fb37bd59e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -157,6 +157,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
+ ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
@@ -166,25 +167,26 @@ static void
ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- u8 oem_ver, oem_patch, nvm_ver_hi, nvm_ver_lo;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- u16 oem_build;
+ struct ice_orom_info *orom;
+ struct ice_nvm_info *nvm;
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
+ nvm = &hw->nvm;
+ orom = &nvm->orom;
+
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
/* Display NVM version (from which the firmware version can be
* determined) which contains more pertinent information.
*/
- ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch,
- &nvm_ver_hi, &nvm_ver_lo);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%x.%02x 0x%x %d.%d.%d", nvm_ver_hi, nvm_ver_lo,
- hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
+ "%x.%02x 0x%x %d.%d.%d", nvm->major_ver, nvm->minor_ver,
+ nvm->eetrack, orom->major, orom->build, orom->patch);
- strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
+ strscpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
}
@@ -243,7 +245,7 @@ static int ice_get_eeprom_len(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_pf *pf = np->vsi->back;
- return (int)(pf->hw.nvm.sr_words * sizeof(u16));
+ return (int)pf->hw.nvm.flash_size;
}
static int
@@ -251,39 +253,46 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
u8 *bytes)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- u16 first_word, last_word, nwords;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
struct device *dev;
int ret = 0;
- u16 *buf;
+ u8 *buf;
dev = ice_pf_to_dev(pf);
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+ netdev_dbg(netdev, "GEEPROM cmd 0x%08x, offset 0x%08x, len 0x%08x\n",
+ eeprom->cmd, eeprom->offset, eeprom->len);
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- nwords = last_word - first_word + 1;
-
- buf = devm_kcalloc(dev, nwords, sizeof(u16), GFP_KERNEL);
+ buf = kzalloc(eeprom->len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- status = ice_read_sr_buf(hw, first_word, &nwords, buf);
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status) {
- dev_err(dev, "ice_read_sr_buf failed, err %d aq_err %d\n",
+ dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
- eeprom->len = sizeof(u16) * nwords;
ret = -EIO;
goto out;
}
- memcpy(bytes, (u8 *)buf + (eeprom->offset & 1), eeprom->len);
+ status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
+ false);
+ if (status) {
+ dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ ret = -EIO;
+ goto release;
+ }
+
+ memcpy(bytes, buf, eeprom->len);
+release:
+ ice_release_nvm(hw);
out:
- devm_kfree(dev, buf);
+ kfree(buf);
return ret;
}
@@ -462,7 +471,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
if (status)
goto err_setup_rx_ring;
- status = ice_vsi_start_rx_rings(vsi);
+ status = ice_vsi_start_all_rx_rings(vsi);
if (status)
goto err_start_rx_ring;
@@ -494,7 +503,7 @@ static int ice_lbtest_disable_rings(struct ice_vsi *vsi)
netdev_err(vsi->netdev, "Failed to stop Tx rings, VSI %d error %d\n",
vsi->vsi_num, status);
- status = ice_vsi_stop_rx_rings(vsi);
+ status = ice_vsi_stop_all_rx_rings(vsi);
if (status)
netdev_err(vsi->netdev, "Failed to stop Rx rings, VSI %d error %d\n",
vsi->vsi_num, status);
@@ -672,7 +681,7 @@ static u64 ice_loopback_test(struct net_device *netdev)
test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
if (!test_vsi) {
- netdev_err(netdev, "Failed to create a VSI for the loopback test");
+ netdev_err(netdev, "Failed to create a VSI for the loopback test\n");
return 1;
}
@@ -731,7 +740,7 @@ lbtest_free_frame:
devm_kfree(dev, tx_frame);
remove_mac_filters:
if (ice_remove_mac(&pf->hw, &tmp_list))
- netdev_err(netdev, "Could not remove MAC filter for the test VSI");
+ netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
free_mac_list:
ice_free_fltr_list(dev, &tmp_list);
lbtest_mac_dis:
@@ -744,7 +753,7 @@ lbtest_rings_dis:
lbtest_vsi_close:
test_vsi->netdev = NULL;
if (ice_vsi_release(test_vsi))
- netdev_err(netdev, "Failed to remove the test VSI");
+ netdev_err(netdev, "Failed to remove the test VSI\n");
return ret;
}
@@ -834,7 +843,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
int status = ice_open(netdev);
if (status) {
- dev_err(dev, "Could not open device %s, err %d",
+ dev_err(dev, "Could not open device %s, err %d\n",
pf->int_name, status);
}
}
@@ -1091,7 +1100,6 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
fecparam->active_fec = ETHTOOL_FEC_BASER;
break;
case ICE_AQ_LINK_25G_RS_528_FEC_EN:
- /* fall through */
case ICE_AQ_LINK_25G_RS_544_FEC_EN:
fecparam->active_fec = ETHTOOL_FEC_RS;
break;
@@ -1132,6 +1140,33 @@ done:
}
/**
+ * ice_nway_reset - restart autonegotiation
+ * @netdev: network interface device structure
+ */
+static int ice_nway_reset(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_port_info *pi;
+ enum ice_status status;
+
+ pi = vsi->port_info;
+ /* If VSI state is up, then restart autoneg with link up */
+ if (!test_bit(__ICE_DOWN, vsi->back->state))
+ status = ice_aq_set_link_restart_an(pi, true, NULL);
+ else
+ status = ice_aq_set_link_restart_an(pi, false, NULL);
+
+ if (status) {
+ netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
+ status, pi->hw->adminq.sq_last_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
* ice_get_priv_flags - report device private flags
* @netdev: network interface device structure
*
@@ -1264,6 +1299,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
status = ice_cfg_lldp_mib_change(&pf->hw, true);
if (status)
dev_dbg(dev, "Fail to enable MIB change events\n");
+
+ ice_nway_reset(netdev);
}
}
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
@@ -1781,7 +1818,6 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
Asym_Pause);
break;
case ICE_FC_PFC:
- /* fall through */
default:
ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause);
ethtool_link_ksettings_del_link_mode(ks, lp_advertising,
@@ -2776,30 +2812,6 @@ done:
return err;
}
-static int ice_nway_reset(struct net_device *netdev)
-{
- /* restart autonegotiation */
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_port_info *pi;
- enum ice_status status;
-
- pi = vsi->port_info;
- /* If VSI state is up, then restart autoneg with link up */
- if (!test_bit(__ICE_DOWN, vsi->back->state))
- status = ice_aq_set_link_restart_an(pi, true, NULL);
- else
- status = ice_aq_set_link_restart_an(pi, false, NULL);
-
- if (status) {
- netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
- status, pi->hw->adminq.sq_last_status);
- return -EIO;
- }
-
- return 0;
-}
-
/**
* ice_get_pauseparam - Get Flow Control status
* @netdev: network interface device structure
@@ -3453,12 +3465,6 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break;
case ICE_TX_CONTAINER:
- if (ec->tx_coalesce_usecs_high) {
- netdev_info(vsi->netdev, "setting %s-usecs-high is not supported\n",
- c_type_str);
- return -EINVAL;
- }
-
use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
coalesce_usecs = ec->tx_coalesce_usecs;
@@ -3535,53 +3541,6 @@ ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
}
/**
- * ice_is_coalesce_param_invalid - check for unsupported coalesce parameters
- * @netdev: pointer to the netdev associated with this query
- * @ec: ethtool structure to fill with driver's coalesce settings
- *
- * Print netdev info if driver doesn't support one of the parameters
- * and return error. When any parameters will be implemented, remove only
- * this parameter from param array.
- */
-static int
-ice_is_coalesce_param_invalid(struct net_device *netdev,
- struct ethtool_coalesce *ec)
-{
- struct ice_ethtool_not_used {
- u32 value;
- const char *name;
- } param[] = {
- {ec->stats_block_coalesce_usecs, "stats-block-usecs"},
- {ec->rate_sample_interval, "sample-interval"},
- {ec->pkt_rate_low, "pkt-rate-low"},
- {ec->pkt_rate_high, "pkt-rate-high"},
- {ec->rx_max_coalesced_frames, "rx-frames"},
- {ec->rx_coalesce_usecs_irq, "rx-usecs-irq"},
- {ec->rx_max_coalesced_frames_irq, "rx-frames-irq"},
- {ec->tx_max_coalesced_frames, "tx-frames"},
- {ec->tx_coalesce_usecs_irq, "tx-usecs-irq"},
- {ec->tx_max_coalesced_frames_irq, "tx-frames-irq"},
- {ec->rx_coalesce_usecs_low, "rx-usecs-low"},
- {ec->rx_max_coalesced_frames_low, "rx-frames-low"},
- {ec->tx_coalesce_usecs_low, "tx-usecs-low"},
- {ec->tx_max_coalesced_frames_low, "tx-frames-low"},
- {ec->rx_max_coalesced_frames_high, "rx-frames-high"},
- {ec->tx_max_coalesced_frames_high, "tx-frames-high"}
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(param); i++) {
- if (param[i].value) {
- netdev_info(netdev, "Setting %s not supported\n",
- param[i].name);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-/**
* ice_print_if_odd_usecs - print message if user tries to set odd [tx|rx]-usecs
* @netdev: netdev used for print
* @itr_setting: previous user setting
@@ -3621,9 +3580,6 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- if (ice_is_coalesce_param_invalid(netdev, ec))
- return -EINVAL;
-
if (q_num < 0) {
struct ice_q_vector *q_vector = vsi->q_vectors[0];
int v_idx;
@@ -3818,6 +3774,9 @@ ice_get_module_eeprom(struct net_device *netdev,
}
static const struct ethtool_ops ice_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_RX_USECS_HIGH,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_drvinfo = ice_get_drvinfo,
@@ -3867,6 +3826,7 @@ static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
.get_regs = ice_get_regs,
.get_msglevel = ice_get_msglevel,
.set_msglevel = ice_set_msglevel,
+ .get_link = ethtool_op_get_link,
.get_eeprom_len = ice_get_eeprom_len,
.get_eeprom = ice_get_eeprom,
.get_strings = ice_get_strings,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 99208946224c..42bac3ec5526 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -3471,6 +3471,24 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
}
/**
+ * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
+ * @hw: pointer to the HW struct
+ * @idx: the index of the TCAM entry to remove
+ * @chg: the list of change structures to search
+ */
+static void
+ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
+{
+ struct ice_chs_chg *pos, *tmp;
+
+ list_for_each_entry_safe(tmp, pos, chg, list_entry)
+ if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
+ list_del(&tmp->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), tmp);
+ }
+}
+
+/**
* ice_prof_tcam_ena_dis - add enable or disable TCAM change
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -3489,14 +3507,19 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
enum ice_status status;
struct ice_chs_chg *p;
- /* Default: enable means change the low flag bit to don't care */
- u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
- u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
/* if disabling, free the TCAM */
if (!enable) {
- status = ice_free_tcam_ent(hw, blk, tcam->tcam_idx);
+ status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
+
+ /* if we have already created a change for this TCAM entry, then
+ * we need to remove that entry, in order to prevent writing to
+ * a TCAM entry we no longer will have ownership of.
+ */
+ ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
tcam->tcam_idx = 0;
tcam->in_use = 0;
return status;
@@ -3612,11 +3635,12 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @blk: hardware block
* @vsig: the VSIG to which this profile is to be added
* @hdl: the profile handle indicating the profile to add
+ * @rev: true to add entries to the end of the list
* @chg: the change list
*/
static enum ice_status
ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
- struct list_head *chg)
+ bool rev, struct list_head *chg)
{
/* Masks that ignore flags */
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -3625,7 +3649,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
struct ice_prof_map *map;
struct ice_vsig_prof *t;
struct ice_chs_chg *p;
- u16 i;
+ u16 vsig_idx, i;
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
@@ -3687,8 +3711,13 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
}
/* add profile to VSIG */
- list_add(&t->list,
- &hw->blk[blk].xlt2.vsig_tbl[(vsig & ICE_VSIG_IDX_M)].prop_lst);
+ vsig_idx = vsig & ICE_VSIG_IDX_M;
+ if (rev)
+ list_add_tail(&t->list,
+ &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
+ else
+ list_add(&t->list,
+ &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
return 0;
@@ -3728,7 +3757,7 @@ ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
if (status)
goto err_ice_create_prof_id_vsig;
- status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, chg);
+ status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
if (status)
goto err_ice_create_prof_id_vsig;
@@ -3753,11 +3782,13 @@ err_ice_create_prof_id_vsig:
* @blk: hardware block
* @vsi: the initial VSI that will be in VSIG
* @lst: the list of profile that will be added to the VSIG
+ * @new_vsig: return of new VSIG
* @chg: the change list
*/
static enum ice_status
ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
- struct list_head *lst, struct list_head *chg)
+ struct list_head *lst, u16 *new_vsig,
+ struct list_head *chg)
{
struct ice_vsig_prof *t;
enum ice_status status;
@@ -3772,12 +3803,15 @@ ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
return status;
list_for_each_entry(t, lst, list) {
+ /* Reverse the order here since we are copying the list */
status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
- chg);
+ true, chg);
if (status)
return status;
}
+ *new_vsig = vsig;
+
return 0;
}
@@ -3899,7 +3933,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
* not sharing entries and we can simply add the new
* profile to the VSIG.
*/
- status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, &chg);
+ status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
+ &chg);
if (status)
goto err_ice_add_prof_id_flow;
@@ -3910,7 +3945,8 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
} else {
/* No match, so we need a new VSIG */
status = ice_create_vsig_from_lst(hw, blk, vsi,
- &union_lst, &chg);
+ &union_lst, &vsig,
+ &chg);
if (status)
goto err_ice_add_prof_id_flow;
@@ -4076,7 +4112,8 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
* new VSIG and TCAM entries
*/
status = ice_create_vsig_from_lst(hw, blk, vsi,
- &copy, &chg);
+ &copy, &vsig,
+ &chg);
if (status)
goto err_ice_rem_prof_id_flow;
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index a05ceb59863b..3de862a3c789 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -694,7 +694,7 @@ out:
* ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
* @seg: packet segment the field being set belongs to
* @fld: field to be set
- * @type: type of the field
+ * @field_type: type of the field
* @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
* entry's input buffer
* @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
@@ -715,16 +715,16 @@ out:
*/
static void
ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
- enum ice_flow_fld_match_type type, u16 val_loc,
+ enum ice_flow_fld_match_type field_type, u16 val_loc,
u16 mask_loc, u16 last_loc)
{
u64 bit = BIT_ULL(fld);
seg->match |= bit;
- if (type == ICE_FLOW_FLD_TYPE_RANGE)
+ if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
seg->range |= bit;
- seg->fields[fld].type = type;
+ seg->fields[fld].type = field_type;
seg->fields[fld].src.val = val_loc;
seg->fields[fld].src.mask = mask_loc;
seg->fields[fld].src.last = last_loc;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 6db3d0494127..1d37a9f02c1c 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -85,6 +85,7 @@
#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, 0)
#define QRXFLXP_CNTXT_RXDID_PRIO_S 8
#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, 8)
+#define QRXFLXP_CNTXT_TS_M BIT(11)
#define GLGEN_RSTAT 0x000B8188
#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, 0)
#define GLGEN_RSTCTL 0x000B8180
@@ -217,6 +218,8 @@
#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
+#define GL_MDCK_TX_TDPU 0x00049348
+#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1)
#define GL_MDET_RX 0x00294C00
#define GL_MDET_RX_QNUM_S 0
#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0)
@@ -286,6 +289,8 @@
#define GL_PWR_MODE_CTL 0x000B820C
#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
+#define GLDCB_RTCTQ_RXQNUM_S 0
+#define GLDCB_RTCTQ_RXQNUM_M ICE_M(0x7FF, 0)
#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8))
#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8))
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index d974e2fa3e63..2f256bf45efc 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -9,11 +9,11 @@
/**
* ice_vsi_type_str - maps VSI type enum to string equivalents
- * @type: VSI type enum
+ * @vsi_type: VSI type enum
*/
-const char *ice_vsi_type_str(enum ice_vsi_type type)
+const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
{
- switch (type) {
+ switch (vsi_type) {
case ICE_VSI_PF:
return "ICE_VSI_PF";
case ICE_VSI_VF:
@@ -26,16 +26,26 @@ const char *ice_vsi_type_str(enum ice_vsi_type type)
}
/**
- * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
+ * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
* @vsi: the VSI being configured
* @ena: start or stop the Rx rings
+ *
+ * First enable/disable all of the Rx rings, flush any remaining writes, and
+ * then verify that they have all been enabled/disabled successfully. This will
+ * let all of the register writes complete when enabling/disabling the Rx rings
+ * before waiting for the change in hardware to complete.
*/
-static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
+static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
{
int i, ret = 0;
+ for (i = 0; i < vsi->num_rxq; i++)
+ ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
+
+ ice_flush(&vsi->back->hw);
+
for (i = 0; i < vsi->num_rxq; i++) {
- ret = ice_vsi_ctrl_rx_ring(vsi, ena, i);
+ ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
if (ret)
break;
}
@@ -111,7 +121,6 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
- /* fall through */
case ICE_VSI_LB:
vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
@@ -169,12 +178,12 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vf = &pf->vf[vsi->vf_id];
vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs;
- /* pf->num_vf_msix includes (VF miscellaneous vector +
+ /* pf->num_msix_per_vf includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
* original vector count
*/
- vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF;
+ vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF;
break;
case ICE_VSI_LB:
vsi->alloc_txq = 1;
@@ -341,13 +350,13 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
/**
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
- * @type: type of VSI
+ * @vsi_type: type of VSI
* @vf_id: ID of the VF being configured
*
* returns a pointer to a VSI on success, NULL on failure.
*/
static struct ice_vsi *
-ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
+ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
@@ -368,13 +377,13 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
if (!vsi)
goto unlock_pf;
- vsi->type = type;
+ vsi->type = vsi_type;
vsi->back = pf;
set_bit(__ICE_DOWN, vsi->state);
vsi->idx = pf->next_vsi;
- if (type == ICE_VSI_VF)
+ if (vsi_type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf_id);
else
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
@@ -433,7 +442,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
.scatter_count = ICE_MAX_SCATTER_TXQS,
.vsi_map = vsi->txq_map,
.vsi_map_offset = 0,
- .mapping_mode = vsi->tx_mapping_mode
+ .mapping_mode = ICE_VSI_MAP_CONTIG
};
struct ice_qs_cfg rx_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
@@ -443,18 +452,21 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
.scatter_count = ICE_MAX_SCATTER_RXQS,
.vsi_map = vsi->rxq_map,
.vsi_map_offset = 0,
- .mapping_mode = vsi->rx_mapping_mode
+ .mapping_mode = ICE_VSI_MAP_CONTIG
};
- int ret = 0;
-
- vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
- vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
+ int ret;
ret = __ice_vsi_get_qs(&tx_qs_cfg);
- if (!ret)
- ret = __ice_vsi_get_qs(&rx_qs_cfg);
+ if (ret)
+ return ret;
+ vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
- return ret;
+ ret = __ice_vsi_get_qs(&rx_qs_cfg);
+ if (ret)
+ return ret;
+ vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;
+
+ return 0;
}
/**
@@ -559,12 +571,11 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
break;
case ICE_VSI_VF:
- /* VF VSI will gets a small RSS table
- * For VSI_LUT, LUT size should be set to 64 bytes
+ /* VF VSI will get a small RSS table.
+ * For VSI_LUT, LUT size should be set to 64 bytes.
*/
vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
- vsi->rss_size = min_t(int, num_online_cpus(),
- BIT(cap->rss_table_entry_width));
+ vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
break;
case ICE_VSI_LB:
@@ -672,7 +683,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
if (vsi->type == ICE_VSI_PF)
max_rss = ICE_MAX_LG_RSS_QS;
else
- max_rss = ICE_MAX_SMALL_RSS_QS;
+ max_rss = ICE_MAX_RSS_QS_PER_VF;
qcount_rx = min_t(int, rx_numq_tc, max_rss);
if (!vsi->req_rxq)
qcount_rx = min_t(int, qcount_rx,
@@ -804,7 +815,6 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
ctxt->info = vsi->info;
switch (vsi->type) {
case ICE_VSI_LB:
- /* fall through */
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
@@ -897,6 +907,109 @@ out:
}
/**
+ * ice_free_res - free a block of resources
+ * @res: pointer to the resource
+ * @index: starting index previously returned by ice_get_res
+ * @id: identifier to track owner
+ *
+ * Returns number of resources freed
+ */
+int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
+{
+ int count = 0;
+ int i;
+
+ if (!res || index >= res->end)
+ return -EINVAL;
+
+ id |= ICE_RES_VALID_BIT;
+ for (i = index; i < res->end && res->list[i] == id; i++) {
+ res->list[i] = 0;
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * ice_search_res - Search the tracker for a block of resources
+ * @res: pointer to the resource
+ * @needed: size of the block needed
+ * @id: identifier to track owner
+ *
+ * Returns the base item index of the block, or -ENOMEM for error
+ */
+static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
+{
+ int start = 0, end = 0;
+
+ if (needed > res->end)
+ return -ENOMEM;
+
+ id |= ICE_RES_VALID_BIT;
+
+ do {
+ /* skip already allocated entries */
+ if (res->list[end++] & ICE_RES_VALID_BIT) {
+ start = end;
+ if ((start + needed) > res->end)
+ break;
+ }
+
+ if (end == (start + needed)) {
+ int i = start;
+
+ /* there was enough, so assign it to the requestor */
+ while (i != end)
+ res->list[i++] = id;
+
+ return start;
+ }
+ } while (end < res->end);
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_get_free_res_count - Get free count from a resource tracker
+ * @res: Resource tracker instance
+ */
+static u16 ice_get_free_res_count(struct ice_res_tracker *res)
+{
+ u16 i, count = 0;
+
+ for (i = 0; i < res->end; i++)
+ if (!(res->list[i] & ICE_RES_VALID_BIT))
+ count++;
+
+ return count;
+}
+
+/**
+ * ice_get_res - get a block of resources
+ * @pf: board private structure
+ * @res: pointer to the resource
+ * @needed: size of the block needed
+ * @id: identifier to track owner
+ *
+ * Returns the base item index of the block, or negative for error
+ */
+int
+ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
+{
+ if (!res || !pf)
+ return -EINVAL;
+
+ if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
+ dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
+ needed, res->num_entries, id);
+ return -EINVAL;
+ }
+
+ return ice_search_res(res, needed, id);
+}
+
+/**
* ice_vsi_setup_vector_base - Set up the base vector for the given VSI
* @vsi: ptr to the VSI
*
@@ -928,8 +1041,9 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx);
if (vsi->base_vector < 0) {
- dev_err(dev, "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
- num_q_vectors, vsi->vsi_num, vsi->base_vector);
+ dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n",
+ ice_get_free_res_count(pf->irq_tracker),
+ ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors);
return -ENOENT;
}
pf->num_avail_sw_msix -= num_q_vectors;
@@ -1348,7 +1462,9 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
list_add(&tmp->list_entry, &tmp_add_list);
status = ice_add_vlan(&pf->hw, &tmp_add_list);
- if (status) {
+ if (!status) {
+ vsi->num_vlan++;
+ } else {
err = -ENODEV;
dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid,
vsi->vsi_num);
@@ -1390,10 +1506,12 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
list_add(&list->list_entry, &tmp_add_list);
status = ice_remove_vlan(&pf->hw, &tmp_add_list);
- if (status == ICE_ERR_DOES_NOT_EXIST) {
+ if (!status) {
+ vsi->num_vlan--;
+ } else if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
vid, vsi->vsi_num, status);
- } else if (status) {
+ } else {
dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
vid, vsi->vsi_num, status);
err = -EIO;
@@ -1678,25 +1796,25 @@ out:
}
/**
- * ice_vsi_start_rx_rings - start VSI's Rx rings
- * @vsi: the VSI whose rings are to be started
+ * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
+ * @vsi: the VSI whose rings are to be enabled
*
* Returns 0 on success and a negative value on error
*/
-int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
+int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi)
{
- return ice_vsi_ctrl_rx_rings(vsi, true);
+ return ice_vsi_ctrl_all_rx_rings(vsi, true);
}
/**
- * ice_vsi_stop_rx_rings - stop VSI's Rx rings
- * @vsi: the VSI
+ * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
+ * @vsi: the VSI whose rings are to be disabled
*
* Returns 0 on success and a negative value on error
*/
-int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
+int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
{
- return ice_vsi_ctrl_rx_rings(vsi, false);
+ return ice_vsi_ctrl_all_rx_rings(vsi, false);
}
/**
@@ -1756,6 +1874,20 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
+ * @vsi: VSI to check whether or not VLAN pruning is enabled.
+ *
+ * returns true if Rx VLAN pruning is enabled and false otherwise.
+ */
+bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
+{
+ if (!vsi)
+ return false;
+
+ return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA);
+}
+
+/**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
* @ena: set to true to enable VLAN pruning and false to disable it
@@ -1952,7 +2084,7 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
* ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @pi: pointer to the port_info instance
- * @type: VSI type
+ * @vsi_type: VSI type
* @vf_id: defines VF ID to which this VSI connects. This field is meant to be
* used only for ICE_VSI_VF VSI type. For other VSI types, should
* fill-in ICE_INVAL_VFID as input.
@@ -1964,7 +2096,7 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
*/
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type type, u16 vf_id)
+ enum ice_vsi_type vsi_type, u16 vf_id)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
@@ -1972,10 +2104,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
- if (type == ICE_VSI_VF)
- vsi = ice_vsi_alloc(pf, type, vf_id);
+ if (vsi_type == ICE_VSI_VF)
+ vsi = ice_vsi_alloc(pf, vsi_type, vf_id);
else
- vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);
+ vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID);
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
@@ -2025,6 +2157,17 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_vector_base;
+ /* Always add VLAN ID 0 switch rule by default. This is needed
+ * in order to allow all untagged and 0 tagged priority traffic
+ * if Rx VLAN pruning is enabled. Also there are cases where we
+ * don't get the call to add VLAN 0 via ice_vlan_rx_add_vid()
+ * so this handles those cases (i.e. adding the PF to a bridge
+ * without the 8021q module loaded).
+ */
+ ret = ice_vsi_add_vlan(vsi, 0);
+ if (ret)
+ goto unroll_clear_rings;
+
ice_vsi_map_rings_to_vectors(vsi);
/* Do not exit if configuring RSS had an issue, at least
@@ -2104,6 +2247,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
return vsi;
+unroll_clear_rings:
+ ice_vsi_clear_rings(vsi);
unroll_vector_base:
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
@@ -2299,94 +2444,6 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
}
/**
- * ice_free_res - free a block of resources
- * @res: pointer to the resource
- * @index: starting index previously returned by ice_get_res
- * @id: identifier to track owner
- *
- * Returns number of resources freed
- */
-int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
-{
- int count = 0;
- int i;
-
- if (!res || index >= res->end)
- return -EINVAL;
-
- id |= ICE_RES_VALID_BIT;
- for (i = index; i < res->end && res->list[i] == id; i++) {
- res->list[i] = 0;
- count++;
- }
-
- return count;
-}
-
-/**
- * ice_search_res - Search the tracker for a block of resources
- * @res: pointer to the resource
- * @needed: size of the block needed
- * @id: identifier to track owner
- *
- * Returns the base item index of the block, or -ENOMEM for error
- */
-static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
-{
- int start = 0, end = 0;
-
- if (needed > res->end)
- return -ENOMEM;
-
- id |= ICE_RES_VALID_BIT;
-
- do {
- /* skip already allocated entries */
- if (res->list[end++] & ICE_RES_VALID_BIT) {
- start = end;
- if ((start + needed) > res->end)
- break;
- }
-
- if (end == (start + needed)) {
- int i = start;
-
- /* there was enough, so assign it to the requestor */
- while (i != end)
- res->list[i++] = id;
-
- return start;
- }
- } while (end < res->end);
-
- return -ENOMEM;
-}
-
-/**
- * ice_get_res - get a block of resources
- * @pf: board private structure
- * @res: pointer to the resource
- * @needed: size of the block needed
- * @id: identifier to track owner
- *
- * Returns the base item index of the block, or negative for error
- */
-int
-ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
-{
- if (!res || !pf)
- return -EINVAL;
-
- if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
- dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
- needed, res->num_entries, id);
- return -EINVAL;
- }
-
- return ice_search_res(res, needed, id);
-}
-
-/**
* ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
* @vsi: the VSI being un-configured
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index e2c0dadce920..04ca00799364 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -6,7 +6,7 @@
#include "ice.h"
-const char *ice_vsi_type_str(enum ice_vsi_type type);
+const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
int
ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
@@ -30,9 +30,9 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi);
int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena);
-int ice_vsi_start_rx_rings(struct ice_vsi *vsi);
+int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi);
-int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);
+int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi);
int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
@@ -42,6 +42,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
+bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi);
+
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
@@ -56,7 +58,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type type, u16 vf_id);
+ enum ice_vsi_type vsi_type, u16 vf_id);
void ice_napi_del(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 5ef28052c0f8..306a4e5b2320 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -10,6 +10,7 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
+#include "ice_devlink.h"
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 8
@@ -706,7 +707,6 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
/* Get FEC mode based on negotiated link info */
switch (vsi->port_info->phy.link_info.fec_info) {
case ICE_AQ_LINK_25G_RS_528_FEC_EN:
- /* fall through */
case ICE_AQ_LINK_25G_RS_544_FEC_EN:
fec = "RS-FEC";
break;
@@ -1029,6 +1029,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
if (ice_handle_link_event(pf, &event))
dev_err(dev, "Could not handle link event\n");
break;
+ case ice_aqc_opc_event_lan_overflow:
+ ice_vf_lan_overflow_event(pf, &event);
+ break;
case ice_mbx_opc_send_msg_to_pf:
ice_vc_process_vf_msg(pf, &event);
break;
@@ -1185,20 +1188,28 @@ static void ice_service_timer(struct timer_list *t)
* ice_handle_mdd_event - handle malicious driver detect event
* @pf: pointer to the PF structure
*
- * Called from service task. OICR interrupt handler indicates MDD event
+ * Called from service task. OICR interrupt handler indicates MDD event.
+ * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
+ * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
+ * disable the queue, the PF can be configured to reset the VF using ethtool
+ * private flag mdd-auto-reset-vf.
*/
static void ice_handle_mdd_event(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- bool mdd_detected = false;
u32 reg;
int i;
- if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state))
+ if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
+ /* Since the VF MDD event logging is rate limited, check if
+ * there are pending MDD events.
+ */
+ ice_print_vfs_mdd_events(pf);
return;
+ }
- /* find what triggered the MDD event */
+ /* find what triggered an MDD event */
reg = rd32(hw, GL_MDET_TX_PQM);
if (reg & GL_MDET_TX_PQM_VALID_M) {
u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
@@ -1214,7 +1225,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
- mdd_detected = true;
}
reg = rd32(hw, GL_MDET_TX_TCLAN);
@@ -1232,7 +1242,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
- mdd_detected = true;
}
reg = rd32(hw, GL_MDET_RX);
@@ -1250,85 +1259,85 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_RX, 0xffffffff);
- mdd_detected = true;
}
- if (mdd_detected) {
- bool pf_mdd_detected = false;
-
- reg = rd32(hw, PF_MDET_TX_PQM);
- if (reg & PF_MDET_TX_PQM_VALID_M) {
- wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
- dev_info(dev, "TX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
- }
+ /* check to see if this PF caused an MDD event */
+ reg = rd32(hw, PF_MDET_TX_PQM);
+ if (reg & PF_MDET_TX_PQM_VALID_M) {
+ wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
+ }
- reg = rd32(hw, PF_MDET_TX_TCLAN);
- if (reg & PF_MDET_TX_TCLAN_VALID_M) {
- wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
- dev_info(dev, "TX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
- }
+ reg = rd32(hw, PF_MDET_TX_TCLAN);
+ if (reg & PF_MDET_TX_TCLAN_VALID_M) {
+ wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
+ }
- reg = rd32(hw, PF_MDET_RX);
- if (reg & PF_MDET_RX_VALID_M) {
- wr32(hw, PF_MDET_RX, 0xFFFF);
- dev_info(dev, "RX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
- }
- /* Queue belongs to the PF initiate a reset */
- if (pf_mdd_detected) {
- set_bit(__ICE_NEEDS_RESTART, pf->state);
- ice_service_task_schedule(pf);
- }
+ reg = rd32(hw, PF_MDET_RX);
+ if (reg & PF_MDET_RX_VALID_M) {
+ wr32(hw, PF_MDET_RX, 0xFFFF);
+ if (netif_msg_rx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
}
- /* check to see if one of the VFs caused the MDD */
+ /* Check to see if one of the VFs caused an MDD event, and then
+ * increment counters and set print pending
+ */
ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
- bool vf_mdd_detected = false;
-
reg = rd32(hw, VP_MDET_TX_PQM(i));
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
- vf_mdd_detected = true;
- dev_info(dev, "TX driver issue detected on VF %d\n",
- i);
+ vf->mdd_tx_events.count++;
+ set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
+ i);
}
reg = rd32(hw, VP_MDET_TX_TCLAN(i));
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
- vf_mdd_detected = true;
- dev_info(dev, "TX driver issue detected on VF %d\n",
- i);
+ vf->mdd_tx_events.count++;
+ set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
+ i);
}
reg = rd32(hw, VP_MDET_TX_TDPU(i));
if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
- vf_mdd_detected = true;
- dev_info(dev, "TX driver issue detected on VF %d\n",
- i);
+ vf->mdd_tx_events.count++;
+ set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ if (netif_msg_tx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
+ i);
}
reg = rd32(hw, VP_MDET_RX(i));
if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF);
- vf_mdd_detected = true;
- dev_info(dev, "RX driver issue detected on VF %d\n",
- i);
- }
-
- if (vf_mdd_detected) {
- vf->num_mdd_events++;
- if (vf->num_mdd_events &&
- vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
- dev_info(dev, "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
- i, vf->num_mdd_events);
+ vf->mdd_rx_events.count++;
+ set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
+ if (netif_msg_rx_err(pf))
+ dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
+ i);
+
+ /* Since the queue is disabled on VF Rx MDD events, the
+ * PF can be configured to reset the VF through ethtool
+ * private flag mdd-auto-reset-vf.
+ */
+ if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ ice_reset_vf(&pf->vf[i], false);
}
}
+
+ ice_print_vfs_mdd_events(pf);
}
/**
@@ -1510,7 +1519,7 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
- hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN;
+ hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
@@ -1916,8 +1925,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
ret = ice_down(vsi);
if (ret) {
- NL_SET_ERR_MSG_MOD(extack,
- "Preparing device for XDP attach failed");
+ NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
return ret;
}
}
@@ -1926,13 +1934,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
vsi->num_xdp_txq = vsi->alloc_txq;
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack,
- "Setting up XDP Tx resources failed");
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack,
- "Freeing XDP Tx resources failed");
+ NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
} else {
ice_vsi_assign_bpf_prog(vsi, prog);
}
@@ -1965,8 +1971,7 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct ice_vsi *vsi = np->vsi;
if (vsi->type != ICE_VSI_PF) {
- NL_SET_ERR_MSG_MOD(xdp->extack,
- "XDP can be loaded only on PF VSI");
+ NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
return -EINVAL;
}
@@ -1993,6 +1998,14 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
u32 val;
+ /* Disable anti-spoof detection interrupt to prevent spurious event
+ * interrupts during a function reset. Anti-spoof functionally is
+ * still supported.
+ */
+ val = rd32(hw, GL_MDCK_TX_TDPU);
+ val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
+ wr32(hw, GL_MDCK_TX_TDPU, val);
+
/* clear things first */
wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
rd32(hw, PFINT_OICR); /* read to clear */
@@ -2042,8 +2055,16 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
}
if (oicr & PFINT_OICR_VFLR_M) {
- ena_mask &= ~PFINT_OICR_VFLR_M;
- set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ /* disable any further VFLR event notifications */
+ if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
+ u32 reg = rd32(hw, PFINT_OICR_ENA);
+
+ reg &= ~PFINT_OICR_VFLR_M;
+ wr32(hw, PFINT_OICR_ENA, reg);
+ } else {
+ ena_mask &= ~PFINT_OICR_VFLR_M;
+ set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
+ }
}
if (oicr & PFINT_OICR_GRST_M) {
@@ -2351,10 +2372,16 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
u8 mac_addr[ETH_ALEN];
int err;
+ err = ice_devlink_create_port(pf);
+ if (err)
+ return err;
+
netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
vsi->alloc_rxq);
- if (!netdev)
- return -ENOMEM;
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_destroy_devlink_port;
+ }
vsi->netdev = netdev;
np = netdev_priv(netdev);
@@ -2384,7 +2411,9 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
err = register_netdev(vsi->netdev);
if (err)
- return err;
+ goto err_destroy_devlink_port;
+
+ devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
netif_carrier_off(vsi->netdev);
@@ -2392,6 +2421,11 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
netif_tx_stop_all_queues(vsi->netdev);
return 0;
+
+err_destroy_devlink_port:
+ ice_devlink_destroy_port(pf);
+
+ return err;
}
/**
@@ -2461,16 +2495,19 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
if (vsi->info.pvid)
return -EINVAL;
- /* Enable VLAN pruning when VLAN 0 is added */
- if (unlikely(!vid)) {
+ /* VLAN 0 is added by default during load/reset */
+ if (!vid)
+ return 0;
+
+ /* Enable VLAN pruning when a VLAN other than 0 is added */
+ if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
ret = ice_cfg_vlan_pruning(vsi, true, false);
if (ret)
return ret;
}
- /* Add all VLAN IDs including 0 to the switch filter. VLAN ID 0 is
- * needed to continue allowing all untagged packets since VLAN prune
- * list is applied to all packets by the switch
+ /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
+ * packets aren't pruned by the device's internal switch on Rx
*/
ret = ice_vsi_add_vlan(vsi, vid);
if (!ret) {
@@ -2500,6 +2537,10 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
if (vsi->info.pvid)
return -EINVAL;
+ /* don't allow removal of VLAN 0 */
+ if (!vid)
+ return 0;
+
/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
* information
*/
@@ -2507,8 +2548,8 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
if (ret)
return ret;
- /* Disable VLAN pruning when VLAN 0 is removed */
- if (unlikely(!vid))
+ /* Disable pruning when VLAN 0 is the only VLAN rule */
+ if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
ret = ice_cfg_vlan_pruning(vsi, false, false);
vsi->vlan_ena = false;
@@ -2945,7 +2986,6 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
}
break;
case ICE_ERR_BUF_TOO_SHORT:
- /* fall-through */
case ICE_ERR_CFG:
dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
break;
@@ -2977,7 +3017,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
default:
break;
}
- /* fall-through */
+ fallthrough;
default:
dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
*status);
@@ -3069,30 +3109,22 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf)
* followed by a EUI-64 identifier (PCIe Device Serial Number)
*/
struct pci_dev *pdev = pf->pdev;
- char *opt_fw_filename = NULL;
- u32 dword;
- u8 dsn[8];
- int pos;
+ char *opt_fw_filename;
+ u64 dsn;
/* Determine the name of the optional file using the DSN (two
* dwords following the start of the DSN Capability).
*/
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- if (pos) {
- opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
- if (!opt_fw_filename)
- return NULL;
-
- pci_read_config_dword(pdev, pos + 4, &dword);
- put_unaligned_le32(dword, &dsn[0]);
- pci_read_config_dword(pdev, pos + 8, &dword);
- put_unaligned_le32(dword, &dsn[4]);
- snprintf(opt_fw_filename, NAME_MAX,
- "%sice-%02x%02x%02x%02x%02x%02x%02x%02x.pkg",
- ICE_DDP_PKG_PATH,
- dsn[7], dsn[6], dsn[5], dsn[4],
- dsn[3], dsn[2], dsn[1], dsn[0]);
- }
+ dsn = pci_get_dsn(pdev);
+ if (!dsn)
+ return NULL;
+
+ opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
+ if (!opt_fw_filename)
+ return NULL;
+
+ snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llX.pkg",
+ ICE_DDP_PKG_PATH, dsn);
return opt_fw_filename;
}
@@ -3166,7 +3198,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
return err;
}
- pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL);
+ pf = ice_allocate_pf(dev);
if (!pf)
return -ENOMEM;
@@ -3204,6 +3236,12 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
+ err = ice_devlink_register(pf);
+ if (err) {
+ dev_err(dev, "ice_devlink_register failed: %d\n", err);
+ goto err_exit_unroll;
+ }
+
#ifndef CONFIG_DYNAMIC_DEBUG
if (debug < -1)
hw->debug_mask = debug;
@@ -3238,6 +3276,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_init_pf_unroll;
}
+ ice_devlink_init_regions(pf);
+
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) {
err = -EIO;
@@ -3336,6 +3376,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
return 0;
err_alloc_sw_unroll:
+ ice_devlink_destroy_port(pf);
set_bit(__ICE_SERVICE_DIS, pf->state);
set_bit(__ICE_DOWN, pf->state);
devm_kfree(dev, pf->first_sw);
@@ -3346,8 +3387,10 @@ err_init_interrupt_unroll:
devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
+ ice_devlink_destroy_regions(pf);
ice_deinit_hw(hw);
err_exit_unroll:
+ ice_devlink_unregister(pf);
pci_disable_pcie_error_reporting(pdev);
return err;
}
@@ -3370,11 +3413,15 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
+ if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
+ set_bit(__ICE_VF_RESETS_DISABLED, pf->state);
+ ice_free_vfs(pf);
+ }
+
set_bit(__ICE_DOWN, pf->state);
ice_service_task_stop(pf);
- if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
- ice_free_vfs(pf);
+ ice_devlink_destroy_port(pf);
ice_vsi_release_all(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
@@ -3383,7 +3430,10 @@ static void ice_remove(struct pci_dev *pdev)
ice_vsi_free_q_vectors(pf->vsi[i]);
}
ice_deinit_pf(pf);
+ ice_devlink_destroy_regions(pf);
ice_deinit_hw(&pf->hw);
+ ice_devlink_unregister(pf);
+
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild
* and the service task is already stopped.
@@ -3534,15 +3584,26 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822X_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
/* required last entry */
{ 0, }
};
@@ -3961,7 +4022,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
* Tx queue group list was configured and the context bits were
* programmed using ice_vsi_cfg_txqs
*/
- err = ice_vsi_start_rx_rings(vsi);
+ err = ice_vsi_start_all_rx_rings(vsi);
if (err)
return err;
@@ -4340,7 +4401,7 @@ int ice_down(struct ice_vsi *vsi)
vsi->vsi_num, tx_err);
}
- rx_err = ice_vsi_stop_rx_rings(vsi);
+ rx_err = ice_vsi_stop_all_rx_rings(vsi);
if (rx_err)
netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
vsi->vsi_num, rx_err);
@@ -5027,6 +5088,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
/**
* ice_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: Tx queue
*/
static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -5064,13 +5126,13 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
/* Read interrupt register */
val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
- netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
+ netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, txqueue, tx_ring->next_to_clean,
head, tx_ring->next_to_use, val);
}
pf->tx_timeout_last_recovery = jiffies;
- netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n",
+ netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
pf->tx_timeout_recovery_level, txqueue);
switch (pf->tx_timeout_recovery_level) {
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 7525ac50742e..8beb675d676b 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -11,25 +11,29 @@
* @length: length of the section to be read (in bytes from the offset)
* @data: command buffer (size [bytes] = length)
* @last_command: tells if this is the last command in a series
+ * @read_shadow_ram: tell if this is a shadow RAM read
* @cd: pointer to command details structure or NULL
*
* Read the NVM using the admin queue commands (0x0701)
*/
static enum ice_status
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
- void *data, bool last_command, struct ice_sq_cd *cd)
+ void *data, bool last_command, bool read_shadow_ram,
+ struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd;
cmd = &desc.params.nvm;
- /* In offset the highest byte must be zeroed. */
- if (offset & 0xFF000000)
+ if (offset > ICE_AQC_NVM_MAX_OFFSET)
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
+ if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT)
+ cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY;
+
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
@@ -42,65 +46,64 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
}
/**
- * ice_check_sr_access_params - verify params for Shadow RAM R/W operations.
- * @hw: pointer to the HW structure
- * @offset: offset in words from module start
- * @words: number of words to access
+ * ice_read_flat_nvm - Read portion of NVM by flat offset
+ * @hw: pointer to the HW struct
+ * @offset: offset from beginning of NVM
+ * @length: (in) number of bytes to read; (out) number of bytes actually read
+ * @data: buffer to return data in (sized to fit the specified length)
+ * @read_shadow_ram: if true, read from shadow RAM instead of NVM
+ *
+ * Reads a portion of the NVM, as a flat memory space. This function correctly
+ * breaks read requests across Shadow RAM sectors and ensures that no single
+ * read request exceeds the maximum 4Kb read for a single AdminQ command.
+ *
+ * Returns a status code on failure. Note that the data pointer may be
+ * partially updated if some reads succeed before a failure.
*/
-static enum ice_status
-ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
+enum ice_status
+ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
+ bool read_shadow_ram)
{
- if ((offset + words) > hw->nvm.sr_words) {
- ice_debug(hw, ICE_DBG_NVM,
- "NVM error: offset beyond SR lmt.\n");
- return ICE_ERR_PARAM;
- }
+ enum ice_status status;
+ u32 inlen = *length;
+ u32 bytes_read = 0;
+ bool last_cmd;
- if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) {
- /* We can access only up to 4KB (one sector), in one AQ write */
- ice_debug(hw, ICE_DBG_NVM,
- "NVM error: tried to access %d words, limit is %d.\n",
- words, ICE_SR_SECTOR_SIZE_IN_WORDS);
- return ICE_ERR_PARAM;
- }
+ *length = 0;
- if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) !=
- (offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) {
- /* A single access cannot spread over two sectors */
+ /* Verify the length of the read if this is for the Shadow RAM */
+ if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) {
ice_debug(hw, ICE_DBG_NVM,
- "NVM error: cannot spread over two sectors.\n");
+ "NVM error: requested offset is beyond Shadow RAM limit\n");
return ICE_ERR_PARAM;
}
- return 0;
-}
+ do {
+ u32 read_size, sector_offset;
-/**
- * ice_read_sr_aq - Read Shadow RAM.
- * @hw: pointer to the HW structure
- * @offset: offset in words from module start
- * @words: number of words to read
- * @data: buffer for words reads from Shadow RAM
- * @last_command: tells the AdminQ that this is the last command
- *
- * Reads 16-bit word buffers from the Shadow RAM using the admin command.
- */
-static enum ice_status
-ice_read_sr_aq(struct ice_hw *hw, u32 offset, u16 words, u16 *data,
- bool last_command)
-{
- enum ice_status status;
+ /* ice_aq_read_nvm cannot read more than 4Kb at a time.
+ * Additionally, a read from the Shadow RAM may not cross over
+ * a sector boundary. Conveniently, the sector size is also
+ * 4Kb.
+ */
+ sector_offset = offset % ICE_AQ_MAX_BUF_LEN;
+ read_size = min_t(u32, ICE_AQ_MAX_BUF_LEN - sector_offset,
+ inlen - bytes_read);
- status = ice_check_sr_access_params(hw, offset, words);
+ last_cmd = !(bytes_read + read_size < inlen);
- /* values in "offset" and "words" parameters are sized as words
- * (16 bits) but ice_aq_read_nvm expects these values in bytes.
- * So do this conversion while calling ice_aq_read_nvm.
- */
- if (!status)
- status = ice_aq_read_nvm(hw, 0, 2 * offset, 2 * words, data,
- last_command, NULL);
+ status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
+ offset, read_size,
+ data + bytes_read, last_cmd,
+ read_shadow_ram, NULL);
+ if (status)
+ break;
+ bytes_read += read_size;
+ offset += read_size;
+ } while (!last_cmd);
+
+ *length = bytes_read;
return status;
}
@@ -110,75 +113,25 @@ ice_read_sr_aq(struct ice_hw *hw, u32 offset, u16 words, u16 *data,
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
- * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_aq method.
+ * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm.
*/
static enum ice_status
ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
{
+ u32 bytes = sizeof(u16);
enum ice_status status;
+ __le16 data_local;
- status = ice_read_sr_aq(hw, offset, 1, data, true);
- if (!status)
- *data = le16_to_cpu(*(__force __le16 *)data);
-
- return status;
-}
-
-/**
- * ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ
- * @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
- * @words: (in) number of words to read; (out) number of words actually read
- * @data: words read from the Shadow RAM
- *
- * Reads 16 bit words (data buf) from the SR using the ice_read_sr_aq
- * method. Ownership of the NVM is taken before reading the buffer and later
- * released.
- */
-static enum ice_status
-ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
-{
- enum ice_status status;
- bool last_cmd = false;
- u16 words_read = 0;
- u16 i = 0;
-
- do {
- u16 read_size, off_w;
-
- /* Calculate number of bytes we should read in this step.
- * It's not allowed to read more than one page at a time or
- * to cross page boundaries.
- */
- off_w = offset % ICE_SR_SECTOR_SIZE_IN_WORDS;
- read_size = off_w ?
- min_t(u16, *words,
- (ICE_SR_SECTOR_SIZE_IN_WORDS - off_w)) :
- min_t(u16, (*words - words_read),
- ICE_SR_SECTOR_SIZE_IN_WORDS);
-
- /* Check if this is last command, if so set proper flag */
- if ((words_read + read_size) >= *words)
- last_cmd = true;
-
- status = ice_read_sr_aq(hw, offset, read_size,
- data + words_read, last_cmd);
- if (status)
- goto read_nvm_buf_aq_exit;
-
- /* Increment counter for words already read and move offset to
- * new read location
- */
- words_read += read_size;
- offset += read_size;
- } while (words_read < *words);
-
- for (i = 0; i < *words; i++)
- data[i] = le16_to_cpu(((__force __le16 *)data)[i]);
+ /* Note that ice_read_flat_nvm takes into account the 4Kb AdminQ and
+ * Shadow RAM sector restrictions necessary when reading from the NVM.
+ */
+ status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
+ (u8 *)&data_local, true);
+ if (status)
+ return status;
-read_nvm_buf_aq_exit:
- *words = words_read;
- return status;
+ *data = le16_to_cpu(data_local);
+ return 0;
}
/**
@@ -188,7 +141,7 @@ read_nvm_buf_aq_exit:
*
* This function will request NVM ownership.
*/
-static enum ice_status
+enum ice_status
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
if (hw->nvm.blank_nvm_mode)
@@ -203,7 +156,7 @@ ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
*
* This function will release NVM ownership.
*/
-static void ice_release_nvm(struct ice_hw *hw)
+void ice_release_nvm(struct ice_hw *hw)
{
if (hw->nvm.blank_nvm_mode)
return;
@@ -233,6 +186,239 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
}
/**
+ * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Finds the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ */
+enum ice_status
+ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ u16 module_type)
+{
+ enum ice_status status;
+ u16 pfa_len, pfa_ptr;
+ u16 next_tlv;
+
+ status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
+ return status;
+ }
+ status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
+ return status;
+ }
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+ while (next_tlv < pfa_ptr + pfa_len) {
+ u16 tlv_sub_module_type;
+ u16 tlv_len;
+
+ /* Read TLV type */
+ status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
+ break;
+ }
+ /* Read TLV length */
+ status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
+ break;
+ }
+ if (tlv_sub_module_type == module_type) {
+ if (tlv_len) {
+ *module_tlv = next_tlv;
+ *module_tlv_len = tlv_len;
+ return 0;
+ }
+ return ICE_ERR_INVAL_SIZE;
+ }
+ /* Check next TLV, i.e. current TLV pointer + length + 2 words
+ * (for current TLV's type and length)
+ */
+ next_tlv = next_tlv + tlv_len + 2;
+ }
+ /* Module does not exist */
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_read_pba_string - Reads part number string from NVM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the NVM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the NVM.
+ */
+enum ice_status
+ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+ u16 pba_tlv, pba_tlv_len;
+ enum ice_status status;
+ u16 pba_word, pba_size;
+ u16 i;
+
+ status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
+ ICE_SR_PBA_BLOCK_PTR);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n");
+ return status;
+ }
+
+ /* pba_size is the next word */
+ status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n");
+ return status;
+ }
+
+ if (pba_tlv_len < pba_size) {
+ ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
+ return ICE_ERR_INVAL_SIZE;
+ }
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size)
+ */
+ pba_size--;
+ if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+ ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
+ return ICE_ERR_PARAM;
+ }
+
+ for (i = 0; i < pba_size; i++) {
+ status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i);
+ return status;
+ }
+
+ pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+ pba_num[(pba_size * 2)] = '\0';
+
+ return status;
+}
+
+/**
+ * ice_get_orom_ver_info - Read Option ROM version information
+ * @hw: pointer to the HW struct
+ *
+ * Read the Combo Image version data from the Boot Configuration TLV and fill
+ * in the option ROM version data.
+ */
+static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
+{
+ u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len;
+ struct ice_orom_info *orom = &hw->nvm.orom;
+ enum ice_status status;
+ u32 combo_ver;
+
+ status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
+ ICE_SR_BOOT_CFG_PTR);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read Boot Configuration Block TLV.\n");
+ return status;
+ }
+
+ /* Boot Configuration Block must have length at least 2 words
+ * (Combo Image Version High and Combo Image Version Low)
+ */
+ if (boot_cfg_tlv_len < 2) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Invalid Boot Configuration Block TLV size.\n");
+ return ICE_ERR_INVAL_SIZE;
+ }
+
+ status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF),
+ &combo_hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n");
+ return status;
+ }
+
+ status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1),
+ &combo_lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n");
+ return status;
+ }
+
+ combo_ver = ((u32)combo_hi << 16) | combo_lo;
+
+ orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >>
+ ICE_OROM_VER_SHIFT);
+ orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK);
+ orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >>
+ ICE_OROM_VER_BUILD_SHIFT);
+
+ return 0;
+}
+
+/**
+ * ice_discover_flash_size - Discover the available flash size.
+ * @hw: pointer to the HW struct
+ *
+ * The device flash could be up to 16MB in size. However, it is possible that
+ * the actual size is smaller. Use bisection to determine the accessible size
+ * of flash memory.
+ */
+static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
+{
+ u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1;
+ enum ice_status status;
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (status)
+ return status;
+
+ while ((max_size - min_size) > 1) {
+ u32 offset = (max_size + min_size) / 2;
+ u32 len = 1;
+ u8 data;
+
+ status = ice_read_flat_nvm(hw, offset, &len, &data, false);
+ if (status == ICE_ERR_AQ_ERROR &&
+ hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
+ ice_debug(hw, ICE_DBG_NVM,
+ "%s: New upper bound of %u bytes\n",
+ __func__, offset);
+ status = 0;
+ max_size = offset;
+ } else if (!status) {
+ ice_debug(hw, ICE_DBG_NVM,
+ "%s: New lower bound of %u bytes\n",
+ __func__, offset);
+ min_size = offset;
+ } else {
+ /* an unexpected error occurred */
+ goto err_read_flat_nvm;
+ }
+ }
+
+ ice_debug(hw, ICE_DBG_NVM,
+ "Predicted flash size is %u bytes\n", max_size);
+
+ hw->nvm.flash_size = max_size;
+
+err_read_flat_nvm:
+ ice_release_nvm(hw);
+
+ return status;
+}
+
+/**
* ice_init_nvm - initializes NVM setting
* @hw: pointer to the HW struct
*
@@ -241,9 +427,8 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
*/
enum ice_status ice_init_nvm(struct ice_hw *hw)
{
- u16 oem_hi, oem_lo, boot_cfg_tlv, boot_cfg_tlv_len;
struct ice_nvm_info *nvm = &hw->nvm;
- u16 eetrack_lo, eetrack_hi;
+ u16 eetrack_lo, eetrack_hi, ver;
enum ice_status status;
u32 fla, gens_stat;
u8 sr_size;
@@ -269,12 +454,14 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
return ICE_ERR_NVM_BLANK_MODE;
}
- status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &nvm->ver);
+ status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver);
if (status) {
ice_debug(hw, ICE_DBG_INIT,
"Failed to read DEV starter version.\n");
return status;
}
+ nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
+ nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
if (status) {
@@ -289,80 +476,49 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
- /* the following devices do not have boot_cfg_tlv yet */
- if (hw->device_id == ICE_DEV_ID_E822C_BACKPLANE ||
- hw->device_id == ICE_DEV_ID_E822C_QSFP ||
- hw->device_id == ICE_DEV_ID_E822C_10G_BASE_T ||
- hw->device_id == ICE_DEV_ID_E822C_SGMII ||
- hw->device_id == ICE_DEV_ID_E822C_SFP ||
- hw->device_id == ICE_DEV_ID_E822X_BACKPLANE ||
- hw->device_id == ICE_DEV_ID_E822L_SFP ||
- hw->device_id == ICE_DEV_ID_E822L_10G_BASE_T ||
- hw->device_id == ICE_DEV_ID_E822L_SGMII)
- return status;
-
- status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
- ICE_SR_BOOT_CFG_PTR);
+ status = ice_discover_flash_size(hw);
if (status) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read Boot Configuration Block TLV.\n");
+ ice_debug(hw, ICE_DBG_NVM,
+ "NVM init error: failed to discover flash size.\n");
return status;
}
- /* Boot Configuration Block must have length at least 2 words
- * (Combo Image Version High and Combo Image Version Low)
- */
- if (boot_cfg_tlv_len < 2) {
- ice_debug(hw, ICE_DBG_INIT,
- "Invalid Boot Configuration Block TLV size.\n");
- return ICE_ERR_INVAL_SIZE;
- }
-
- status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OEM_VER_OFF),
- &oem_hi);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read OEM_VER hi.\n");
+ switch (hw->device_id) {
+ /* the following devices do not have boot_cfg_tlv yet */
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_SGMII:
+ case ICE_DEV_ID_E822C_BACKPLANE:
+ case ICE_DEV_ID_E822C_QSFP:
+ case ICE_DEV_ID_E822C_10G_BASE_T:
+ case ICE_DEV_ID_E822C_SGMII:
+ case ICE_DEV_ID_E822C_SFP:
+ case ICE_DEV_ID_E822L_BACKPLANE:
+ case ICE_DEV_ID_E822L_SFP:
+ case ICE_DEV_ID_E822L_10G_BASE_T:
+ case ICE_DEV_ID_E822L_SGMII:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_QSFP:
return status;
+ default:
+ break;
}
- status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OEM_VER_OFF + 1),
- &oem_lo);
+ status = ice_get_orom_ver_info(hw);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read OEM_VER lo.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n");
return status;
}
- nvm->oem_ver = ((u32)oem_hi << 16) | oem_lo;
-
return 0;
}
/**
- * ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary
- * @hw: pointer to the HW structure
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
- * @words: (in) number of words to read; (out) number of words actually read
- * @data: words read from the Shadow RAM
- *
- * Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq
- * method. The buf read is preceded by the NVM ownership take
- * and followed by the release.
- */
-enum ice_status
-ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
-{
- enum ice_status status;
-
- status = ice_acquire_nvm(hw, ICE_RES_READ);
- if (!status) {
- status = ice_read_sr_buf_aq(hw, offset, words, data);
- ice_release_nvm(hw);
- }
-
- return status;
-}
-
-/**
* ice_nvm_validate_checksum
* @hw: pointer to the HW struct
*
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index a9fa011c22c6..999f273ba6ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -4,5 +4,17 @@
#ifndef _ICE_NVM_H_
#define _ICE_NVM_H_
+enum ice_status
+ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
+void ice_release_nvm(struct ice_hw *hw);
+enum ice_status
+ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
+ bool read_shadow_ram);
+enum ice_status
+ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ u16 module_type);
+enum ice_status
+ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
+enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
#endif /* _ICE_NVM_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index d2db0d04e117..554f567476f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -121,9 +121,7 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
break;
case ICE_AQ_LINK_SPEED_40GB:
- /* fall through */
case ICE_AQ_LINK_SPEED_50GB:
- /* fall through */
case ICE_AQ_LINK_SPEED_100GB:
speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 431266081a80..51825a203e35 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -578,7 +578,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
struct ice_aqc_get_sw_cfg_resp_elem *ele;
u16 pf_vf_num, swid, vsi_port_num;
bool is_vf = false;
- u8 type;
+ u8 res_type;
ele = rbuf[i].elements;
vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
@@ -593,16 +593,16 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
ICE_AQC_GET_SW_CONF_RESP_IS_VF)
is_vf = true;
- type = le16_to_cpu(ele->vsi_port_num) >>
+ res_type = le16_to_cpu(ele->vsi_port_num) >>
ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
- if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
+ if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
/* FW VSI is not needed. Just continue. */
continue;
}
ice_init_port_info(hw->port_info, vsi_port_num,
- type, swid, pf_vf_num, is_vf);
+ res_type, swid, pf_vf_num, is_vf);
}
} while (req_desc && !status);
@@ -760,7 +760,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
case ICE_SW_LKUP_ETHERTYPE_MAC:
daddr = f_info->l_data.ethertype_mac.mac_addr;
- /* fall-through */
+ fallthrough;
case ICE_SW_LKUP_ETHERTYPE:
off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
*off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
@@ -771,7 +771,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
case ICE_SW_LKUP_PROMISC_VLAN:
vlan_id = f_info->l_data.mac_vlan.vlan_id;
- /* fall-through */
+ fallthrough;
case ICE_SW_LKUP_PROMISC:
daddr = f_info->l_data.mac_vlan.mac_addr;
break;
@@ -958,7 +958,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
struct ice_aqc_sw_rules_elem *s_rule;
enum ice_status status;
u16 s_rule_size;
- u16 type;
+ u16 rule_type;
int i;
if (!num_vsi)
@@ -970,11 +970,11 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
- type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
- ICE_AQC_SW_RULES_T_VSI_LIST_SET;
+ rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
+ ICE_AQC_SW_RULES_T_VSI_LIST_SET;
else if (lkup_type == ICE_SW_LKUP_VLAN)
- type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
- ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
+ rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
+ ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
else
return ICE_ERR_PARAM;
@@ -992,7 +992,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
}
- s_rule->type = cpu_to_le16(type);
+ s_rule->type = cpu_to_le16(rule_type);
s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 4de61dbedd36..f67e8362958c 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -453,10 +453,10 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fallthrough -- not supported action */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fallthrough -- handle aborts by dropping frame */
+ fallthrough;
case XDP_DROP:
result = ICE_XDP_CONSUMED;
break;
@@ -1188,7 +1188,6 @@ ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
avg_pkt_size + 640);
break;
case ICE_AQ_LINK_SPEED_10GB:
- /* fall through */
default:
itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
avg_pkt_size + 640);
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index db0ef6ba907f..4ce5f92fca4a 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -239,12 +239,21 @@ struct ice_fc_info {
enum ice_fc_mode req_mode; /* FC mode requested by caller */
};
+/* Option ROM version information */
+struct ice_orom_info {
+ u8 major; /* Major version of OROM */
+ u8 patch; /* Patch version of OROM */
+ u16 build; /* Build version of OROM */
+};
+
/* NVM Information */
struct ice_nvm_info {
- u32 eetrack; /* NVM data version */
- u32 oem_ver; /* OEM version info */
- u16 sr_words; /* Shadow RAM size in words */
- u16 ver; /* NVM package version */
+ struct ice_orom_info orom; /* Option ROM version info */
+ u32 eetrack; /* NVM data version */
+ u16 sr_words; /* Shadow RAM size in words */
+ u32 flash_size; /* Size of available flash in bytes */
+ u8 major_ver; /* major version of NVM package */
+ u8 minor_ver; /* minor version of dev starter */
u8 blank_nvm_mode; /* is NVM empty (no FW present) */
};
@@ -626,7 +635,8 @@ struct ice_hw_port_stats {
/* Checksum and Shadow RAM pointers */
#define ICE_SR_BOOT_CFG_PTR 0x132
-#define ICE_NVM_OEM_VER_OFF 0x02
+#define ICE_NVM_OROM_VER_OFF 0x02
+#define ICE_SR_PBA_BLOCK_PTR 0x16
#define ICE_SR_NVM_DEV_STARTER_VER 0x18
#define ICE_SR_NVM_EETRACK_LO 0x2D
#define ICE_SR_NVM_EETRACK_HI 0x2E
@@ -634,12 +644,12 @@ struct ice_hw_port_stats {
#define ICE_NVM_VER_LO_MASK (0xff << ICE_NVM_VER_LO_SHIFT)
#define ICE_NVM_VER_HI_SHIFT 12
#define ICE_NVM_VER_HI_MASK (0xf << ICE_NVM_VER_HI_SHIFT)
-#define ICE_OEM_VER_PATCH_SHIFT 0
-#define ICE_OEM_VER_PATCH_MASK (0xff << ICE_OEM_VER_PATCH_SHIFT)
-#define ICE_OEM_VER_BUILD_SHIFT 8
-#define ICE_OEM_VER_BUILD_MASK (0xffff << ICE_OEM_VER_BUILD_SHIFT)
-#define ICE_OEM_VER_SHIFT 24
-#define ICE_OEM_VER_MASK (0xff << ICE_OEM_VER_SHIFT)
+#define ICE_OROM_VER_PATCH_SHIFT 0
+#define ICE_OROM_VER_PATCH_MASK (0xff << ICE_OROM_VER_PATCH_SHIFT)
+#define ICE_OROM_VER_BUILD_SHIFT 8
+#define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT)
+#define ICE_OROM_VER_SHIFT 24
+#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
#define ICE_SR_PFA_PTR 0x40
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
#define ICE_SR_WORDS_IN_1KB 512
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 75c70d432c72..15191a325918 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -91,6 +91,39 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
}
/**
+ * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
+ * @vf: the VF to check
+ *
+ * Returns true if the VF has no Rx and no Tx queues enabled and returns false
+ * otherwise
+ */
+static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
+{
+ return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
+ !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
+}
+
+/**
+ * ice_is_vf_link_up - check if the VF's link is up
+ * @vf: VF to check if link is up
+ */
+static bool ice_is_vf_link_up(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (ice_check_vf_init(pf, vf))
+ return false;
+
+ if (ice_vf_has_no_qs_ena(vf))
+ return false;
+ else if (vf->link_forced)
+ return vf->link_up;
+ else
+ return pf->hw.port_info->phy.link_info.link_info &
+ ICE_AQ_LINK_UP;
+}
+
+/**
* ice_vc_notify_vf_link_state - Inform a VF of link status
* @vf: pointer to the VF structure
*
@@ -99,28 +132,16 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
{
struct virtchnl_pf_event pfe = { 0 };
- struct ice_link_status *ls;
- struct ice_pf *pf = vf->pf;
- struct ice_hw *hw;
-
- hw = &pf->hw;
- ls = &hw->port_info->phy.link_info;
+ struct ice_hw *hw = &vf->pf->hw;
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
- /* Always report link is down if the VF queues aren't enabled */
- if (!vf->num_qs_ena) {
+ if (ice_is_vf_link_up(vf))
+ ice_set_pfe_link(vf, &pfe,
+ hw->port_info->phy.link_info.link_speed, true);
+ else
ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
- } else if (vf->link_forced) {
- u16 link_speed = vf->link_up ?
- ls->link_speed : ICE_AQ_LINK_SPEED_UNKNOWN;
-
- ice_set_pfe_link(vf, &pfe, link_speed, vf->link_up);
- } else {
- ice_set_pfe_link(vf, &pfe, ls->link_speed,
- ls->link_info & ICE_AQ_LINK_UP);
- }
ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
@@ -149,7 +170,12 @@ static void ice_free_vf_res(struct ice_vf *vf)
vf->num_mac = 0;
}
- last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
+ last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
+
+ /* clear VF MDD event information */
+ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+ memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+
/* Disable interrupts so that VF starts in a known state */
for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
@@ -180,7 +206,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
first = vf->first_vector_idx;
- last = first + pf->num_vf_msix - 1;
+ last = first + pf->num_msix_per_vf - 1;
for (v = first; v <= last; v++) {
u32 reg;
@@ -206,11 +232,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
* ice_sriov_free_msix_res - Reset/free any used MSIX resources
* @pf: pointer to the PF structure
*
- * If MSIX entries from the pf->irq_tracker were needed then we need to
- * reset the irq_tracker->end and give back the entries we needed to
- * num_avail_sw_msix.
- *
- * If no MSIX entries were taken from the pf->irq_tracker then just clear
+ * Since no MSIX entries are taken from the pf->irq_tracker then just clear
* the pf->sriov_base_vector.
*
* Returns 0 on success, and -EINVAL on error.
@@ -227,11 +249,7 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf)
return -EINVAL;
/* give back irq_tracker resources used */
- if (pf->sriov_base_vector < res->num_entries) {
- res->end = res->num_entries;
- pf->num_avail_sw_msix +=
- res->num_entries - pf->sriov_base_vector;
- }
+ WARN_ON(pf->sriov_base_vector < res->num_entries);
pf->sriov_base_vector = 0;
@@ -245,9 +263,8 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf)
void ice_set_vf_state_qs_dis(struct ice_vf *vf)
{
/* Clear Rx/Tx enabled queues flag */
- bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF);
- bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF);
- vf->num_qs_ena = 0;
+ bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
}
@@ -263,7 +280,7 @@ static void ice_dis_vf_qs(struct ice_vf *vf)
vsi = pf->vsi[vf->lan_vsi_idx];
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
- ice_vsi_stop_rx_rings(vsi);
+ ice_vsi_stop_all_rx_rings(vsi);
ice_set_vf_state_qs_dis(vf);
}
@@ -283,11 +300,6 @@ void ice_free_vfs(struct ice_pf *pf)
while (test_and_set_bit(__ICE_VF_DIS, pf->state))
usleep_range(1000, 2000);
- /* Avoid wait time by stopping all VFs at the same time */
- ice_for_each_vf(pf, i)
- if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
- ice_dis_vf_qs(&pf->vf[i]);
-
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
* the carpet out from underneath their feet.
@@ -297,8 +309,13 @@ void ice_free_vfs(struct ice_pf *pf)
else
dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
+ /* Avoid wait time by stopping all VFs at the same time */
+ ice_for_each_vf(pf, i)
+ if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
+ ice_dis_vf_qs(&pf->vf[i]);
+
tmp = pf->num_alloc_vfs;
- pf->num_vf_qps = 0;
+ pf->num_qps_per_vf = 0;
pf->num_alloc_vfs = 0;
for (i = 0; i < tmp; i++) {
if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
@@ -407,43 +424,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
}
/**
- * ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add PVID
- * @ctxt: the VSI ctxt to fill
- * @vid: the VLAN ID to set as a PVID
- */
-static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
-{
- ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
- ICE_AQ_VSI_PVLAN_INSERT_PVID |
- ICE_AQ_VSI_VLAN_EMOD_STR);
- ctxt->info.pvid = cpu_to_le16(vid);
- ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
-}
-
-/**
- * ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove PVID
- * @ctxt: the VSI ctxt to fill
- */
-static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
-{
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
- ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
- ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
-}
-
-/**
* ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
* @vsi: the VSI to update
- * @vid: the VLAN ID to set as a PVID
+ * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
* @enable: true for enable PVID false for disable
*/
-static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
+static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
{
struct ice_hw *hw = &vsi->back->hw;
+ struct ice_aqc_vsi_props *info;
struct ice_vsi_ctx *ctxt;
enum ice_status status;
int ret = 0;
@@ -453,20 +442,33 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
return -ENOMEM;
ctxt->info = vsi->info;
- if (enable)
- ice_vsi_set_pvid_fill_ctxt(ctxt, vid);
- else
- ice_vsi_kill_pvid_fill_ctxt(ctxt);
+ info = &ctxt->info;
+ if (enable) {
+ info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
+ ICE_AQ_VSI_PVLAN_INSERT_PVID |
+ ICE_AQ_VSI_VLAN_EMOD_STR;
+ info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ } else {
+ info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
+ ICE_AQ_VSI_VLAN_MODE_ALL;
+ info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ }
+
+ info->pvid = cpu_to_le16(pvid_info);
+ info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_info(ice_pf_to_dev(vsi->back), "update VSI for port VLAN failed, err %d aq_err %d\n",
+ dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
}
- vsi->info = ctxt->info;
+ vsi->info.vlan_flags = info->vlan_flags;
+ vsi->info.sw_flags2 = info->sw_flags2;
+ vsi->info.pvid = info->pvid;
out:
kfree(ctxt);
return ret;
@@ -501,7 +503,7 @@ ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
*/
static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
{
- return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
+ return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
}
/**
@@ -533,9 +535,20 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
vf->lan_vsi_num = vsi->vsi_num;
/* Check if port VLAN exist before, and restore it accordingly */
- if (vf->port_vlan_id) {
- ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
- ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M);
+ if (vf->port_vlan_info) {
+ ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
+ if (ice_vsi_add_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK))
+ dev_warn(ice_pf_to_dev(pf), "Failed to add Port VLAN %d filter for VF %d\n",
+ vf->port_vlan_info & VLAN_VID_MASK, vf->vf_id);
+ } else {
+ /* set VLAN 0 filter by default when no port VLAN is
+ * enabled. If a port VLAN is enabled we don't want
+ * untagged broadcast/multicast traffic seen on the VF
+ * interface.
+ */
+ if (ice_vsi_add_vlan(vsi, 0))
+ dev_warn(ice_pf_to_dev(pf), "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest\n",
+ vf->vf_id);
}
eth_broadcast_addr(broadcast);
@@ -583,7 +596,7 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
*/
tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf),
ice_get_avail_rxq_count(pf));
- tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
+ tx_rx_queue_left += pf->num_qps_per_vf;
if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
vf->num_req_qs != vf->num_vf_qs)
vf->num_vf_qs = vf->num_req_qs;
@@ -629,9 +642,9 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx];
first = vf->first_vector_idx;
- last = (first + pf->num_vf_msix) - 1;
+ last = (first + pf->num_msix_per_vf) - 1;
abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
- abs_last = (abs_first + pf->num_vf_msix) - 1;
+ abs_last = (abs_first + pf->num_msix_per_vf) - 1;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
/* VF Vector allocation */
@@ -749,7 +762,7 @@ int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
pf = vf->pf;
/* always add one to account for the OICR being the first MSIX */
- return pf->sriov_base_vector + pf->num_vf_msix * vf->vf_id +
+ return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
q_vector->v_idx + 1;
}
@@ -782,127 +795,112 @@ static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
* @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
*
* This function allows SR-IOV resources to be taken from the end of the PF's
- * allowed HW MSIX vectors so in many cases the irq_tracker will not
- * be needed. In these cases we just set the pf->sriov_base_vector and return
- * success.
+ * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
+ * just set the pf->sriov_base_vector and return success.
*
- * If SR-IOV needs to use any pf->irq_tracker entries it updates the
- * irq_tracker->end based on the first entry needed for SR-IOV. This makes it
- * so any calls to ice_get_res() using the irq_tracker will not try to use
- * resources at or beyond the newly set value.
+ * If there are not enough resources available, return an error. This should
+ * always be caught by ice_set_per_vf_res().
*
* Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
* in the PF's space available for SR-IOV.
*/
static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
{
- int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
- u16 pf_total_msix_vectors =
- pf->hw.func_caps.common_cap.num_msix_vectors;
- struct ice_res_tracker *res = pf->irq_tracker;
+ u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
+ int vectors_used = pf->irq_tracker->num_entries;
int sriov_base_vector;
- if (max_valid_res_idx < 0)
- return max_valid_res_idx;
-
- sriov_base_vector = pf_total_msix_vectors - num_msix_needed;
+ sriov_base_vector = total_vectors - num_msix_needed;
/* make sure we only grab irq_tracker entries from the list end and
* that we have enough available MSIX vectors
*/
- if (sriov_base_vector <= max_valid_res_idx)
+ if (sriov_base_vector < vectors_used)
return -EINVAL;
pf->sriov_base_vector = sriov_base_vector;
- /* dip into irq_tracker entries and update used resources */
- if (num_msix_needed > (pf_total_msix_vectors - res->num_entries)) {
- pf->num_avail_sw_msix -=
- res->num_entries - pf->sriov_base_vector;
- res->end = pf->sriov_base_vector;
- }
-
return 0;
}
/**
- * ice_check_avail_res - check if vectors and queues are available
+ * ice_set_per_vf_res - check if vectors and queues are available
* @pf: pointer to the PF structure
*
- * This function is where we calculate actual number of resources for VF VSIs,
- * we don't reserve ahead of time during probe. Returns success if vectors and
- * queues resources are available, otherwise returns error code
+ * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
+ * get more vectors and can enable more queues per VF. Note that this does not
+ * grab any vectors from the SW pool already allocated. Also note, that all
+ * vector counts include one for each VF's miscellaneous interrupt vector
+ * (i.e. OICR).
+ *
+ * Minimum VFs - 2 vectors, 1 queue pair
+ * Small VFs - 5 vectors, 4 queue pairs
+ * Medium VFs - 17 vectors, 16 queue pairs
+ *
+ * Second, determine number of queue pairs per VF by starting with a pre-defined
+ * maximum each VF supports. If this is not possible, then we adjust based on
+ * queue pairs available on the device.
+ *
+ * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
+ * by each VF during VF initialization and reset.
*/
-static int ice_check_avail_res(struct ice_pf *pf)
+static int ice_set_per_vf_res(struct ice_pf *pf)
{
int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
- u16 num_msix, num_txq, num_rxq, num_avail_msix;
+ int msix_avail_per_vf, msix_avail_for_sriov;
struct device *dev = ice_pf_to_dev(pf);
+ u16 num_msix_per_vf, num_txq, num_rxq;
if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
return -EINVAL;
- /* add 1 to max_valid_res_idx to account for it being 0-based */
- num_avail_msix = pf->hw.func_caps.common_cap.num_msix_vectors -
- (max_valid_res_idx + 1);
-
- /* Grab from HW interrupts common pool
- * Note: By the time the user decides it needs more vectors in a VF
- * its already too late since one must decide this prior to creating the
- * VF interface. So the best we can do is take a guess as to what the
- * user might want.
- *
- * We have two policies for vector allocation:
- * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
- * number of NFV VFs used for NFV appliances, since this is a special
- * case, we try to assign maximum vectors per VF (65) as much as
- * possible, based on determine_resources algorithm.
- * 2. if num_alloc_vfs is from 17 to 256, then its large number of
- * regular VFs which are not used for any special purpose. Hence try to
- * grab default interrupt vectors (5 as supported by AVF driver).
- */
- if (pf->num_alloc_vfs <= 16) {
- num_msix = ice_determine_res(pf, num_avail_msix,
- ICE_MAX_INTR_PER_VF,
- ICE_MIN_INTR_PER_VF);
- } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
- num_msix = ice_determine_res(pf, num_avail_msix,
- ICE_DFLT_INTR_PER_VF,
- ICE_MIN_INTR_PER_VF);
+ /* determine MSI-X resources per VF */
+ msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
+ pf->irq_tracker->num_entries;
+ msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
+ if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
+ } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
+ } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
+ num_msix_per_vf = ICE_MIN_INTR_PER_VF;
} else {
- dev_err(dev, "Number of VFs %d exceeds max VF count %d\n",
- pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
+ dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
+ msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
+ pf->num_alloc_vfs);
return -EIO;
}
- if (!num_msix)
- return -EIO;
-
- /* Grab from the common pool
- * start by requesting Default queues (4 as supported by AVF driver),
- * Note that, the main difference between queues and vectors is, latter
- * can only be reserved at init time but queues can be requested by VF
- * at runtime through Virtchnl, that is the reason we start by reserving
- * few queues.
- */
+ /* determine queue resources per VF */
num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
- ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
+ min_t(u16,
+ num_msix_per_vf - ICE_NONQ_VECS_VF,
+ ICE_MAX_RSS_QS_PER_VF),
+ ICE_MIN_QS_PER_VF);
num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
- ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
-
- if (!num_txq || !num_rxq)
+ min_t(u16,
+ num_msix_per_vf - ICE_NONQ_VECS_VF,
+ ICE_MAX_RSS_QS_PER_VF),
+ ICE_MIN_QS_PER_VF);
+
+ if (!num_txq || !num_rxq) {
+ dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
+ ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
return -EIO;
+ }
- if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs))
+ if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
+ dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
+ pf->num_alloc_vfs);
return -EINVAL;
+ }
- /* since AVF driver works with only queue pairs which means, it expects
- * to have equal number of Rx and Tx queues, so take the minimum of
- * available Tx or Rx queues
- */
- pf->num_vf_qps = min_t(int, num_txq, num_rxq);
- pf->num_vf_msix = num_msix;
+ /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
+ pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
+ pf->num_msix_per_vf = num_msix_per_vf;
+ dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
+ pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
return 0;
}
@@ -943,17 +941,9 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
/* reallocate VF resources to finish resetting the VSI state */
if (!ice_alloc_vf_res(vf)) {
- struct ice_vsi *vsi;
-
ice_ena_vf_mappings(vf);
set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
-
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (ice_vsi_add_vlan(vsi, 0))
- dev_warn(ice_pf_to_dev(pf),
- "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest",
- vf->vf_id);
}
/* Tell the VF driver the reset is done. This needs to be done only
@@ -985,13 +975,13 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
if (vsi->num_vlan) {
status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
rm_promisc);
- } else if (vf->port_vlan_id) {
+ } else if (vf->port_vlan_info) {
if (rm_promisc)
status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_id);
+ vf->port_vlan_info);
else
status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_id);
+ vf->port_vlan_info);
} else {
if (rm_promisc)
status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
@@ -1019,7 +1009,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
int v;
- if (ice_check_avail_res(pf)) {
+ if (ice_set_per_vf_res(pf)) {
dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n");
return false;
}
@@ -1032,7 +1022,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, v) {
struct ice_vf *vf = &pf->vf[v];
- vf->num_vf_qs = pf->num_vf_qps;
+ vf->num_vf_qs = pf->num_qps_per_vf;
dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id,
vf->num_vf_qs);
ice_cleanup_and_realloc_vf(vf);
@@ -1165,9 +1155,10 @@ static bool ice_is_vf_disabled(struct ice_vf *vf)
* @vf: pointer to the VF structure
* @is_vflr: true if VFLR was issued, false if not
*
- * Returns true if the VF is reset, false otherwise.
+ * Returns true if the VF is currently in reset, resets successfully, or resets
+ * are disabled and false otherwise.
*/
-static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
+bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
{
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
@@ -1180,6 +1171,12 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
dev = ice_pf_to_dev(pf);
+ if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
+ dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
+ vf->vf_id);
+ return true;
+ }
+
if (ice_is_vf_disabled(vf)) {
dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
vf->vf_id);
@@ -1231,7 +1228,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
*/
if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
- if (vf->port_vlan_id || vsi->num_vlan)
+ if (vf->port_vlan_info || vsi->num_vlan)
promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_UCAST_PROMISC_BITS;
@@ -1432,7 +1429,7 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
if (num_vfs > pf->num_vfs_supported) {
dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
num_vfs, pf->num_vfs_supported);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
dev_info(dev, "Allocating %d VFs\n", num_vfs);
@@ -1518,6 +1515,72 @@ static void ice_vc_reset_vf(struct ice_vf *vf)
}
/**
+ * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
+ * @pf: PF used to index all VFs
+ * @pfq: queue index relative to the PF's function space
+ *
+ * If no VF is found who owns the pfq then return NULL, otherwise return a
+ * pointer to the VF who owns the pfq
+ */
+static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
+{
+ int vf_id;
+
+ ice_for_each_vf(pf, vf_id) {
+ struct ice_vf *vf = &pf->vf[vf_id];
+ struct ice_vsi *vsi;
+ u16 rxq_idx;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ ice_for_each_rxq(vsi, rxq_idx)
+ if (vsi->rxq_map[rxq_idx] == pfq)
+ return vf;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_globalq_to_pfq - convert from global queue index to PF space queue index
+ * @pf: PF used for conversion
+ * @globalq: global queue index used to convert to PF space queue index
+ */
+static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
+{
+ return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
+}
+
+/**
+ * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
+ * @pf: PF that the LAN overflow event happened on
+ * @event: structure holding the event information for the LAN overflow event
+ *
+ * Determine if the LAN overflow event was caused by a VF queue. If it was not
+ * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
+ * reset on the offending VF.
+ */
+void
+ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ u32 gldcb_rtctq, queue;
+ struct ice_vf *vf;
+
+ gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
+ dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
+
+ /* event returns device global Rx queue number */
+ queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
+ GLDCB_RTCTQ_RXQNUM_S;
+
+ vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
+ if (!vf)
+ return;
+
+ ice_vc_reset_vf(vf);
+}
+
+/**
* ice_vc_send_msg_to_vf - Send message to VF
* @vf: pointer to the VF info
* @v_opcode: virtual channel opcode
@@ -1675,7 +1738,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->num_vsis = 1;
/* Tx and Rx queue are equal for VF */
vfres->num_queue_pairs = vsi->num_txq;
- vfres->max_vectors = pf->num_vf_msix;
+ vfres->max_vectors = pf->num_msix_per_vf;
vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
@@ -1981,7 +2044,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
if (status) {
- dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d",
+ dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
ret = -EIO;
goto out;
@@ -2039,6 +2102,22 @@ error_param:
}
/**
+ * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Return true on successful validation, else false
+ */
+static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
+ vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
+ return false;
+
+ return true;
+}
+
+/**
* ice_vc_ena_qs_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2065,13 +2144,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (!vqs->rx_queues && !vqs->tx_queues) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
- vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -2087,7 +2160,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
* programmed using ice_vsi_cfg_txqs
*/
q_map = vqs->rx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2097,7 +2170,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
if (test_bit(vf_q_id, vf->rxq_ena))
continue;
- if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
+ if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2105,12 +2178,11 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
}
set_bit(vf_q_id, vf->rxq_ena);
- vf->num_qs_ena++;
}
vsi = pf->vsi[vf->lan_vsi_idx];
q_map = vqs->tx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2121,7 +2193,6 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
continue;
set_bit(vf_q_id, vf->txq_ena);
- vf->num_qs_ena++;
}
/* Set flag to indicate that queues are enabled */
@@ -2163,13 +2234,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (!vqs->rx_queues && !vqs->tx_queues) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
- vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -2183,7 +2248,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
if (vqs->tx_queues) {
q_map = vqs->tx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
struct ice_ring *ring = vsi->tx_rings[vf_q_id];
struct ice_txq_meta txq_meta = { 0 };
@@ -2208,14 +2273,23 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
/* Clear enabled queues flag */
clear_bit(vf_q_id, vf->txq_ena);
- vf->num_qs_ena--;
}
}
- if (vqs->rx_queues) {
- q_map = vqs->rx_queues;
+ q_map = vqs->rx_queues;
+ /* speed up Rx queue disable by batching them if possible */
+ if (q_map &&
+ bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
+ if (ice_vsi_stop_all_rx_rings(vsi)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
+ vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ } else if (q_map) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2225,7 +2299,8 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
if (!test_bit(vf_q_id, vf->rxq_ena))
continue;
- if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
+ true)) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2234,12 +2309,11 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
/* Clear enabled queues flag */
clear_bit(vf_q_id, vf->rxq_ena);
- vf->num_qs_ena--;
}
}
/* Clear enabled queues flag */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena)
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
error_param:
@@ -2249,6 +2323,57 @@ error_param:
}
/**
+ * ice_cfg_interrupt
+ * @vf: pointer to the VF info
+ * @vsi: the VSI being configured
+ * @vector_id: vector ID
+ * @map: vector map for mapping vectors to queues
+ * @q_vector: structure for interrupt vector
+ * configure the IRQ to queue map
+ */
+static int
+ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
+ struct virtchnl_vector_map *map,
+ struct ice_q_vector *q_vector)
+{
+ u16 vsi_q_id, vsi_q_id_idx;
+ unsigned long qmap;
+
+ q_vector->num_ring_rx = 0;
+ q_vector->num_ring_tx = 0;
+
+ qmap = map->rxq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_rx++;
+ q_vector->rx.itr_idx = map->rxitr_idx;
+ vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
+ q_vector->rx.itr_idx);
+ }
+
+ qmap = map->txq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_tx++;
+ q_vector->tx.itr_idx = map->txitr_idx;
+ vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
+ q_vector->tx.itr_idx);
+ }
+
+ return VIRTCHNL_STATUS_SUCCESS;
+}
+
+/**
* ice_vc_cfg_irq_map_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2258,13 +2383,11 @@ error_param:
static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u16 num_q_vectors_mapped, vsi_id, vector_id;
struct virtchnl_irq_map_info *irqmap_info;
- u16 vsi_id, vsi_q_id, vector_id;
struct virtchnl_vector_map *map;
struct ice_pf *pf = vf->pf;
- u16 num_q_vectors_mapped;
struct ice_vsi *vsi;
- unsigned long qmap;
int i;
irqmap_info = (struct virtchnl_irq_map_info *)msg;
@@ -2275,8 +2398,8 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
* there is actually at least a single VF queue vector mapped
*/
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- pf->num_vf_msix < num_q_vectors_mapped ||
- !irqmap_info->num_vectors) {
+ pf->num_msix_per_vf < num_q_vectors_mapped ||
+ !num_q_vectors_mapped) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -2297,7 +2420,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
/* vector_id is always 0-based for each VF, and can never be
* larger than or equal to the max allowed interrupts per VF
*/
- if (!(vector_id < ICE_MAX_INTR_PER_VF) ||
+ if (!(vector_id < pf->num_msix_per_vf) ||
!ice_vc_isvalid_vsi_id(vf, vsi_id) ||
(!vector_id && (map->rxq_map || map->txq_map))) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2318,33 +2441,10 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
}
/* lookout for the invalid queue index */
- qmap = map->rxq_map;
- q_vector->num_ring_rx = 0;
- for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- q_vector->num_ring_rx++;
- q_vector->rx.itr_idx = map->rxitr_idx;
- vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
- q_vector->rx.itr_idx);
- }
-
- qmap = map->txq_map;
- q_vector->num_ring_tx = 0;
- for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- q_vector->num_ring_tx++;
- q_vector->tx.itr_idx = map->txitr_idx;
- vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
- q_vector->tx.itr_idx);
- }
+ v_ret = (enum virtchnl_status_code)
+ ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
+ if (v_ret)
+ goto error_param;
}
error_param:
@@ -2387,7 +2487,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
+ if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
@@ -2694,16 +2794,16 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
if (!req_queues) {
dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
vf->vf_id);
- } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
+ } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
dev_err(dev, "VF %d tried to request more than %d queues.\n",
- vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
- vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
+ vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
} else if (req_queues > cur_queues &&
req_queues - cur_queues > tx_rx_queue_left) {
dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
- ICE_MAX_BASE_QS_PER_VF);
+ ICE_MAX_RSS_QS_PER_VF);
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
@@ -2733,19 +2833,20 @@ int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
__be16 vlan_proto)
{
- u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_vsi *vsi;
struct device *dev;
struct ice_vf *vf;
+ u16 vlanprio;
int ret;
dev = ice_pf_to_dev(pf);
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- if (vlan_id > ICE_MAX_VLANID || qos > 7) {
- dev_err(dev, "Invalid VF Parameters\n");
+ if (vlan_id >= VLAN_N_VID || qos > 7) {
+ dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
+ vf_id, vlan_id, qos);
return -EINVAL;
}
@@ -2761,40 +2862,52 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
if (ret)
return ret;
- if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
+ vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
+
+ if (vf->port_vlan_info == vlanprio) {
/* duplicate request, so just return success */
dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
return 0;
}
- /* If PVID, then remove all filters on the old VLAN */
- if (vsi->info.pvid)
- ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
- VLAN_VID_MASK));
-
if (vlan_id || qos) {
+ /* remove VLAN 0 filter set by default when transitioning from
+ * no port VLAN to a port VLAN. No change to old port VLAN on
+ * failure.
+ */
+ ret = ice_vsi_kill_vlan(vsi, 0);
+ if (ret)
+ return ret;
ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
if (ret)
return ret;
} else {
- ice_vsi_manage_pvid(vsi, 0, false);
- vsi->info.pvid = 0;
+ /* add VLAN 0 filter back when transitioning from port VLAN to
+ * no port VLAN. No change to old port VLAN on failure.
+ */
+ ret = ice_vsi_add_vlan(vsi, 0);
+ if (ret)
+ return ret;
+ ret = ice_vsi_manage_pvid(vsi, 0, false);
+ if (ret)
+ return ret;
}
if (vlan_id) {
dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
vlan_id, qos, vf_id);
- /* add new VLAN filter for each MAC */
+ /* add VLAN filter for the port VLAN */
ret = ice_vsi_add_vlan(vsi, vlan_id);
if (ret)
return ret;
}
+ /* remove old port VLAN filter with valid VLAN ID or QoS fields */
+ if (vf->port_vlan_info)
+ ice_vsi_kill_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK);
- /* The Port VLAN needs to be saved across resets the same as the
- * default LAN MAC address.
- */
- vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+ /* keep port VLAN information persistent on resets */
+ vf->port_vlan_info = le16_to_cpu(vsi->info.pvid);
return 0;
}
@@ -2849,7 +2962,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
}
for (i = 0; i < vfl->num_elements; i++) {
- if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
+ if (vfl->vlan_id[i] >= VLAN_N_VID) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "invalid VF VLAN id %d\n",
vfl->vlan_id[i]);
@@ -2911,9 +3024,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- vsi->num_vlan++;
- /* Enable VLAN pruning when VLAN is added */
- if (!vlan_promisc) {
+ /* Enable VLAN pruning when non-zero VLAN is added */
+ if (!vlan_promisc && vid &&
+ !ice_vsi_is_vlan_pruning_ena(vsi)) {
status = ice_cfg_vlan_pruning(vsi, true, false);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2921,7 +3034,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
vid, status);
goto error_param;
}
- } else {
+ } else if (vlan_promisc) {
/* Enable Ucast/Mcast VLAN promiscuous mode */
promisc_m = ICE_PROMISC_VLAN_TX |
ICE_PROMISC_VLAN_RX;
@@ -2965,9 +3078,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
- vsi->num_vlan--;
- /* Disable VLAN pruning when the last VLAN is removed */
- if (!vsi->num_vlan)
+ /* Disable VLAN pruning when only VLAN 0 is left */
+ if (vsi->num_vlan == 1 &&
+ ice_vsi_is_vlan_pruning_ena(vsi))
ice_cfg_vlan_pruning(vsi, false, false);
/* Disable Unicast/Multicast VLAN promiscuous mode */
@@ -3246,14 +3359,12 @@ int
ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vsi *vsi;
struct ice_vf *vf;
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
vf = &pf->vf[vf_id];
- vsi = pf->vsi[vf->lan_vsi_idx];
if (ice_check_vf_init(pf, vf))
return -EBUSY;
@@ -3262,9 +3373,8 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
/* VF configuration for VLAN and applicable QoS */
- ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M;
- ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >>
- ICE_VLAN_PRIORITY_S;
+ ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
+ ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
ivi->trusted = vf->trusted;
ivi->spoofchk = vf->spoofchk;
@@ -3442,3 +3552,52 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id,
return 0;
}
+
+/**
+ * ice_print_vfs_mdd_event - print VFs malicious driver detect event
+ * @pf: pointer to the PF structure
+ *
+ * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
+ */
+void ice_print_vfs_mdd_events(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int i;
+
+ /* check that there are pending MDD events to print */
+ if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
+ return;
+
+ /* VF MDD event logs are rate limited to one second intervals */
+ if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
+ return;
+
+ pf->last_printed_mdd_jiffies = jiffies;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ /* only print Rx MDD event message if there are new events */
+ if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
+ vf->mdd_rx_events.last_printed =
+ vf->mdd_rx_events.count;
+
+ dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
+ vf->mdd_rx_events.count, hw->pf_id, i,
+ vf->dflt_lan_addr.addr,
+ test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
+ ? "on" : "off");
+ }
+
+ /* only print Tx MDD event message if there are new events */
+ if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
+ vf->mdd_tx_events.last_printed =
+ vf->mdd_tx_events.count;
+
+ dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
+ vf->mdd_tx_events.count, hw->pf_id, i,
+ vf->dflt_lan_addr.addr);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index ac67982751df..3f9464269bd2 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -5,11 +5,6 @@
#define _ICE_VIRTCHNL_PF_H_
#include "ice.h"
-#define ICE_MAX_VLANID 4095
-#define ICE_VLAN_PRIORITY_S 12
-#define ICE_VLAN_M 0xFFF
-#define ICE_PRIORITY_M 0x7000
-
/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
#define ICE_MAX_VLAN_PER_VF 8
#define ICE_MAX_MACADDR_PER_VF 12
@@ -26,18 +21,15 @@
#define ICE_PCI_CIAD_WAIT_COUNT 100
#define ICE_PCI_CIAD_WAIT_DELAY_US 1
-/* VF resources default values and limitation */
+/* VF resource constraints */
#define ICE_MAX_VF_COUNT 256
-#define ICE_MAX_QS_PER_VF 256
#define ICE_MIN_QS_PER_VF 1
-#define ICE_DFLT_QS_PER_VF 4
#define ICE_NONQ_VECS_VF 1
#define ICE_MAX_SCATTER_QS_PER_VF 16
-#define ICE_MAX_BASE_QS_PER_VF 16
-#define ICE_MAX_INTR_PER_VF 65
-#define ICE_MAX_POLICY_INTR_PER_VF 33
+#define ICE_MAX_RSS_QS_PER_VF 16
+#define ICE_NUM_VF_MSIX_MED 17
+#define ICE_NUM_VF_MSIX_SMALL 5
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
-#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_VF_RESET_TRIES 40
#define ICE_MAX_VF_RESET_SLEEP_MS 20
@@ -61,6 +53,13 @@ enum ice_virtchnl_cap {
ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
};
+/* VF MDD events print structure */
+struct ice_mdd_vf_events {
+ u16 count; /* total count of Rx|Tx events */
+ /* count number of the last printed event */
+ u16 last_printed;
+};
+
/* VF information structure */
struct ice_vf {
struct ice_pf *pf;
@@ -73,9 +72,9 @@ struct ice_vf {
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
struct virtchnl_ether_addr dflt_lan_addr;
- DECLARE_BITMAP(txq_ena, ICE_MAX_BASE_QS_PER_VF);
- DECLARE_BITMAP(rxq_ena, ICE_MAX_BASE_QS_PER_VF);
- u16 port_vlan_id;
+ DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
+ DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ u16 port_vlan_info; /* Port VLAN ID and QoS */
u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
u8 trusted:1;
u8 spoofchk:1;
@@ -89,14 +88,14 @@ struct ice_vf {
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
- u64 num_mdd_events; /* number of MDD events detected */
u64 num_inval_msgs; /* number of continuous invalid msgs */
u64 num_valid_msgs; /* number of valid msgs detected */
unsigned long vf_caps; /* VF's adv. capabilities */
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
u16 num_vf_qs; /* num of queue configured per VF */
- u16 num_qs_ena; /* total num of Tx/Rx queue enabled */
+ struct ice_mdd_vf_events mdd_rx_events;
+ struct ice_mdd_vf_events mdd_tx_events;
};
#ifdef CONFIG_PCI_IOV
@@ -111,6 +110,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
+bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@@ -128,6 +128,9 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf);
int
ice_get_vf_stats(struct net_device *netdev, int vf_id,
struct ifla_vf_stats *vf_stats);
+void
+ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_print_vfs_mdd_events(struct ice_pf *pf);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
@@ -135,6 +138,8 @@ ice_get_vf_stats(struct net_device *netdev, int vf_id,
#define ice_vc_notify_link_state(pf) do {} while (0)
#define ice_vc_notify_reset(pf) do {} while (0)
#define ice_set_vf_state_qs_dis(vf) do {} while (0)
+#define ice_vf_lan_overflow_event(pf, event) do {} while (0)
+#define ice_print_vfs_mdd_events(pf) do {} while (0)
static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
@@ -143,6 +148,12 @@ ice_reset_all_vfs(struct ice_pf __always_unused *pf,
return true;
}
+static inline bool
+ice_reset_vf(struct ice_vf __always_unused *vf, bool __always_unused is_vflr)
+{
+ return true;
+}
+
static inline int
ice_sriov_configure(struct pci_dev __always_unused *pdev,
int __always_unused num_vfs)
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 4d3407bbd4c4..8279db15e870 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -183,7 +183,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
}
- err = ice_vsi_ctrl_rx_ring(vsi, false, q_idx);
+ err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
@@ -243,7 +243,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
ice_qvec_cfg_msix(vsi, q_vector);
- err = ice_vsi_ctrl_rx_ring(vsi, true, q_idx);
+ err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
if (err)
goto free_buf;
@@ -457,7 +457,7 @@ int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
if (if_running) {
ret = ice_qp_dis(vsi, qid);
if (ret) {
- netdev_err(vsi->netdev, "ice_qp_dis error = %d", ret);
+ netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
goto xsk_umem_if_up;
}
}
@@ -471,11 +471,11 @@ xsk_umem_if_up:
if (!ret && umem_present)
napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
else if (ret)
- netdev_err(vsi->netdev, "ice_qp_ena error = %d", ret);
+ netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
if (umem_failure) {
- netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d",
+ netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d\n",
umem_present ? "en" : "dis", umem_failure);
return umem_failure;
}
@@ -609,7 +609,7 @@ ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
*/
static bool
ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
- bool alloc(struct ice_ring *, struct ice_rx_buf *))
+ bool (*alloc)(struct ice_ring *, struct ice_rx_buf *))
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
@@ -816,10 +816,10 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fallthrough -- not supported action */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- /* fallthrough -- handle aborts by dropping frame */
+ fallthrough;
case XDP_DROP:
result = ICE_XDP_CONSUMED;
break;
@@ -841,8 +841,8 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
unsigned int xdp_xmit = 0;
+ bool failure = false;
struct xdp_buff xdp;
- bool failure = 0;
xdp.rxq = &rx_ring->xdp_rxq;
@@ -937,6 +937,15 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
ice_finalize_xdp_rx(rx_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
+ if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
+ xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ else
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+
+ return (int)total_rx_packets;
+ }
+
return failure ? budget : (int)total_rx_packets;
}
@@ -988,6 +997,8 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
+ xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
return budget > 0 && work_done;
@@ -1063,6 +1074,13 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
if (xsk_frames)
xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+ if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) {
+ if (xdp_ring->next_to_clean == xdp_ring->next_to_use)
+ xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
+ else
+ xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
+ }
+
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 3479e1de98fe..8a4ba7c6d549 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -24,7 +24,7 @@ ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
struct xdp_umem __always_unused *umem,
u16 __always_unused qid)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void
@@ -63,7 +63,7 @@ static inline int
ice_xsk_wakeup(struct net_device __always_unused *netdev,
u32 __always_unused queue_id, u32 __always_unused flags)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
#define ice_xsk_clean_rx_ring(rx_ring) do {} while (0)
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 49b5fa9d4783..0c9282e2aaec 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -306,7 +306,7 @@ struct igb_q_vector {
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
- struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
+ struct igb_ring ring[] ____cacheline_internodealigned_in_smp;
};
enum e1000_ring_flags_t {
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index f96ffa83efbe..39d3b76a6f5d 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2183,27 +2183,6 @@ static int igb_set_coalesce(struct net_device *netdev,
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
- if (ec->rx_max_coalesced_frames ||
- ec->rx_coalesce_usecs_irq ||
- ec->rx_max_coalesced_frames_irq ||
- ec->tx_max_coalesced_frames ||
- ec->tx_coalesce_usecs_irq ||
- ec->stats_block_coalesce_usecs ||
- ec->use_adaptive_rx_coalesce ||
- ec->use_adaptive_tx_coalesce ||
- ec->pkt_rate_low ||
- ec->rx_coalesce_usecs_low ||
- ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low ||
- ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high ||
- ec->rx_coalesce_usecs_high ||
- ec->rx_max_coalesced_frames_high ||
- ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high ||
- ec->rate_sample_interval)
- return -ENOTSUPP;
-
if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 3) &&
(ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
@@ -3477,6 +3456,7 @@ static int igb_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static const struct ethtool_ops igb_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = igb_get_drvinfo,
.get_regs_len = igb_get_regs_len,
.get_regs = igb_get_regs,
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 3b83747b2700..21a29a0ca7f4 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -198,11 +198,11 @@ int igb_sysfs_init(struct igb_adapter *adapter)
}
/* init i2c_client */
- client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
- if (client == NULL) {
+ client = i2c_new_client_device(&adapter->i2c_adap, &i350_sensor_info);
+ if (IS_ERR(client)) {
dev_info(&adapter->pdev->dev,
"Failed to create new i2c device.\n");
- rc = -ENODEV;
+ rc = PTR_ERR(client);
goto exit;
}
adapter->i2c_client = client;
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 3ae358b35227..9217d150e286 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -424,6 +424,7 @@ static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
}
static const struct ethtool_ops igbvf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = igbvf_get_drvinfo,
.get_regs_len = igbvf_get_regs_len,
.get_regs = igbvf_get_regs,
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index 49fb1e1965cd..e3c164c12e10 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -8,4 +8,4 @@
obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
-igc_ethtool.o igc_ptp.o
+igc_ethtool.o igc_ptp.o igc_dump.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 52066bdbbad0..a1f845a2aa80 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -42,6 +42,10 @@ int igc_del_mac_steering_filter(struct igc_adapter *adapter,
const u8 *addr, u8 queue, u8 flags);
void igc_update_stats(struct igc_adapter *adapter);
+/* igc_dump declarations */
+void igc_rings_dump(struct igc_adapter *adapter);
+void igc_regs_dump(struct igc_adapter *adapter);
+
extern char igc_driver_name[];
extern char igc_driver_version[];
@@ -53,10 +57,13 @@ extern char igc_driver_version[];
/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
+
+/* Flags definitions */
#define IGC_FLAG_HAS_MSI BIT(0)
#define IGC_FLAG_QUEUE_PAIRS BIT(3)
#define IGC_FLAG_DMAC BIT(4)
#define IGC_FLAG_PTP BIT(8)
+#define IGC_FLAG_WOL_SUPPORTED BIT(8)
#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
#define IGC_FLAG_MEDIA_RESET BIT(10)
#define IGC_FLAG_MAS_ENABLE BIT(12)
@@ -108,7 +115,7 @@ extern char igc_driver_version[];
#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
/* Transmit and receive latency (for PTP timestamps) */
-/* FIXME: These values were estimated using the ones that i210 has as
+/* FIXME: These values were estimated using the ones that i225 has as
* basis, they seem to provide good numbers with ptp4l/phc2sys, but we
* need to confirm them.
*/
@@ -319,7 +326,7 @@ struct igc_q_vector {
struct net_device poll_dev;
/* for dynamic allocation of rings associated with this q_vector */
- struct igc_ring ring[0] ____cacheline_internodealigned_in_smp;
+ struct igc_ring ring[] ____cacheline_internodealigned_in_smp;
};
#define MAX_ETYPE_FILTER (4 - 1)
@@ -552,6 +559,7 @@ int igc_erase_filter(struct igc_adapter *adapter,
void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
+void igc_ptp_suspend(struct igc_adapter *adapter);
void igc_ptp_stop(struct igc_adapter *adapter);
void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector, struct sk_buff *skb);
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 58efa7a02c68..4ddccccf42cc 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -16,7 +16,10 @@
/* Wake Up Filter Control */
#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
#define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
@@ -259,6 +262,9 @@
#define IGC_GPIE_EIAME 0x40000000
#define IGC_GPIE_PBA 0x80000000
+/* Receive Descriptor bit definitions */
+#define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */
+
/* Transmit Descriptor bit definitions */
#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */
#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */
diff --git a/drivers/net/ethernet/intel/igc/igc_dump.c b/drivers/net/ethernet/intel/igc/igc_dump.c
new file mode 100644
index 000000000000..657ab50ae296
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_dump.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Intel Corporation */
+
+#include "igc.h"
+
+struct igc_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+static const struct igc_reg_info igc_reg_info_tbl[] = {
+ /* General Registers */
+ {IGC_CTRL, "CTRL"},
+ {IGC_STATUS, "STATUS"},
+ {IGC_CTRL_EXT, "CTRL_EXT"},
+ {IGC_MDIC, "MDIC"},
+
+ /* Interrupt Registers */
+ {IGC_ICR, "ICR"},
+
+ /* RX Registers */
+ {IGC_RCTL, "RCTL"},
+ {IGC_RDLEN(0), "RDLEN"},
+ {IGC_RDH(0), "RDH"},
+ {IGC_RDT(0), "RDT"},
+ {IGC_RXDCTL(0), "RXDCTL"},
+ {IGC_RDBAL(0), "RDBAL"},
+ {IGC_RDBAH(0), "RDBAH"},
+
+ /* TX Registers */
+ {IGC_TCTL, "TCTL"},
+ {IGC_TDBAL(0), "TDBAL"},
+ {IGC_TDBAH(0), "TDBAH"},
+ {IGC_TDLEN(0), "TDLEN"},
+ {IGC_TDH(0), "TDH"},
+ {IGC_TDT(0), "TDT"},
+ {IGC_TXDCTL(0), "TXDCTL"},
+ {IGC_TDFH, "TDFH"},
+ {IGC_TDFT, "TDFT"},
+ {IGC_TDFHS, "TDFHS"},
+ {IGC_TDFPC, "TDFPC"},
+
+ /* List Terminator */
+ {}
+};
+
+/* igc_regdump - register printout routine */
+static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo)
+{
+ int n = 0;
+ char rname[16];
+ u32 regs[8];
+
+ switch (reginfo->ofs) {
+ case IGC_RDLEN(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDLEN(n));
+ break;
+ case IGC_RDH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDH(n));
+ break;
+ case IGC_RDT(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDT(n));
+ break;
+ case IGC_RXDCTL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RXDCTL(n));
+ break;
+ case IGC_RDBAL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDBAL(n));
+ break;
+ case IGC_RDBAH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDBAH(n));
+ break;
+ case IGC_TDBAL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_RDBAL(n));
+ break;
+ case IGC_TDBAH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_TDBAH(n));
+ break;
+ case IGC_TDLEN(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_TDLEN(n));
+ break;
+ case IGC_TDH(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_TDH(n));
+ break;
+ case IGC_TDT(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_TDT(n));
+ break;
+ case IGC_TXDCTL(0):
+ for (n = 0; n < 4; n++)
+ regs[n] = rd32(IGC_TXDCTL(n));
+ break;
+ default:
+ pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
+ return;
+ }
+
+ snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
+ pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
+ regs[2], regs[3]);
+}
+
+/* igc_rings_dump - Tx-rings and Rx-rings */
+void igc_rings_dump(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ union igc_adv_tx_desc *tx_desc;
+ union igc_adv_rx_desc *rx_desc;
+ struct igc_ring *tx_ring;
+ struct igc_ring *rx_ring;
+ u32 staterr;
+ u16 i, n;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ pr_info("Device Name state trans_start\n");
+ pr_info("%-15s %016lX %016lX\n", netdev->name,
+ netdev->state, dev_trans_start(netdev));
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ struct igc_tx_buffer *buffer_info;
+
+ tx_ring = adapter->tx_ring[n];
+ buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
+
+ pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)dma_unmap_addr(buffer_info, dma),
+ dma_unmap_len(buffer_info, len),
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
+ }
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats
+ *
+ * Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 38 36 35 32 31 24 15 0
+ */
+
+ for (n = 0; n < adapter->num_tx_queues; n++) {
+ tx_ring = adapter->tx_ring[n];
+ pr_info("------------------------------------\n");
+ pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
+
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ const char *next_desc;
+ struct igc_tx_buffer *buffer_info;
+
+ tx_desc = IGC_TX_DESC(tx_ring, i);
+ buffer_info = &tx_ring->tx_buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ next_desc = " NTC/U";
+ else if (i == tx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == tx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
+ pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
+ i, le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)dma_unmap_addr(buffer_info, dma),
+ dma_unmap_len(buffer_info, len),
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp,
+ buffer_info->skb, next_desc);
+
+ if (netif_msg_pktdata(adapter) && buffer_info->skb)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, buffer_info->skb->data,
+ dma_unmap_len(buffer_info, len),
+ true);
+ }
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ pr_info("Queue [NTU] [NTC]\n");
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ pr_info(" %5d %5X %5X\n",
+ n, rx_ring->next_to_use, rx_ring->next_to_clean);
+ }
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+
+ /* Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 17 16 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
+ * | Checksum Ident | | | | Type | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ pr_info("------------------------------------\n");
+ pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
+
+ for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
+ struct igc_rx_buffer *buffer_info;
+
+ buffer_info = &rx_ring->rx_buffer_info[i];
+ rx_desc = IGC_RX_DESC(rx_ring, i);
+ u0 = (struct my_u0 *)rx_desc;
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
+ if (staterr & IGC_RXD_STAT_DD) {
+ /* Descriptor Done */
+ pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
+ "RWB", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ next_desc);
+ } else {
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
+ "R ", i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)buffer_info->dma,
+ next_desc);
+
+ if (netif_msg_pktdata(adapter) &&
+ buffer_info->dma && buffer_info->page) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ page_address
+ (buffer_info->page) +
+ buffer_info->page_offset,
+ igc_rx_bufsz(rx_ring),
+ true);
+ }
+ }
+ }
+ }
+
+exit:
+ return;
+}
+
+/* igc_regs_dump - registers dump */
+void igc_regs_dump(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ struct igc_reg_info *reginfo;
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ pr_info(" Register Name Value\n");
+ for (reginfo = (struct igc_reg_info *)igc_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ igc_regdump(hw, reginfo);
+ }
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index ee07011e13e9..f530fc29b074 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -308,6 +308,65 @@ static void igc_get_regs(struct net_device *netdev,
regs_buff[168 + i] = rd32(IGC_TXDCTL(i));
}
+static void igc_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ wol->wolopts = 0;
+
+ if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED))
+ return;
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC |
+ WAKE_PHY;
+
+ /* apply any specific unsupported masks here */
+ switch (adapter->hw.device_id) {
+ default:
+ break;
+ }
+
+ if (adapter->wol & IGC_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & IGC_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & IGC_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & IGC_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+ if (adapter->wol & IGC_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int igc_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER))
+ return -EOPNOTSUPP;
+
+ if (!(adapter->flags & IGC_FLAG_WOL_SUPPORTED))
+ return wol->wolopts ? -EOPNOTSUPP : 0;
+
+ /* these settings will always override what we currently have */
+ adapter->wol = 0;
+
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= IGC_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= IGC_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= IGC_WUFC_BC;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= IGC_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= IGC_WUFC_LNKC;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
static u32 igc_get_msglevel(struct net_device *netdev)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -802,27 +861,6 @@ static int igc_set_coalesce(struct net_device *netdev,
struct igc_adapter *adapter = netdev_priv(netdev);
int i;
- if (ec->rx_max_coalesced_frames ||
- ec->rx_coalesce_usecs_irq ||
- ec->rx_max_coalesced_frames_irq ||
- ec->tx_max_coalesced_frames ||
- ec->tx_coalesce_usecs_irq ||
- ec->stats_block_coalesce_usecs ||
- ec->use_adaptive_rx_coalesce ||
- ec->use_adaptive_tx_coalesce ||
- ec->pkt_rate_low ||
- ec->rx_coalesce_usecs_low ||
- ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low ||
- ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high ||
- ec->rx_coalesce_usecs_high ||
- ec->rx_max_coalesced_frames_high ||
- ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high ||
- ec->rate_sample_interval)
- return -ENOTSUPP;
-
if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS ||
(ec->rx_coalesce_usecs > 3 &&
ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) ||
@@ -1856,9 +1894,12 @@ static int igc_set_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops igc_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = igc_get_drvinfo,
.get_regs_len = igc_get_regs_len,
.get_regs = igc_get_regs,
+ .get_wol = igc_get_wol,
+ .set_wol = igc_set_wol,
.get_msglevel = igc_get_msglevel,
.set_msglevel = igc_set_msglevel,
.nway_reset = igc_nway_reset,
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index d9d5425fe8d9..69fa1ce1f927 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -3546,6 +3546,8 @@ static void igc_reset_task(struct work_struct *work)
adapter = container_of(work, struct igc_adapter, reset_task);
+ igc_rings_dump(adapter);
+ igc_regs_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igc_reinit_locked(adapter);
}
@@ -4029,6 +4031,9 @@ static void igc_watchdog_task(struct work_struct *work)
}
}
if (link) {
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
@@ -4114,6 +4119,8 @@ no_wait:
return;
}
}
+ pm_schedule_suspend(netdev->dev.parent,
+ MSEC_PER_SEC * 5);
/* also check for alternate media here */
} else if (!netif_carrier_ok(netdev) &&
@@ -4337,6 +4344,7 @@ request_done:
static int __igc_open(struct net_device *netdev, bool resuming)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
int err = 0;
int i = 0;
@@ -4348,6 +4356,9 @@ static int __igc_open(struct net_device *netdev, bool resuming)
return -EBUSY;
}
+ if (!resuming)
+ pm_runtime_get_sync(&pdev->dev);
+
netif_carrier_off(netdev);
/* allocate transmit descriptors */
@@ -4386,6 +4397,9 @@ static int __igc_open(struct net_device *netdev, bool resuming)
rd32(IGC_ICR);
igc_irq_enable(adapter);
+ if (!resuming)
+ pm_runtime_put(&pdev->dev);
+
netif_tx_start_all_queues(netdev);
/* start the watchdog. */
@@ -4404,6 +4418,8 @@ err_setup_rx:
igc_free_all_tx_resources(adapter);
err_setup_tx:
igc_reset(adapter);
+ if (!resuming)
+ pm_runtime_put(&pdev->dev);
return err;
}
@@ -4428,9 +4444,13 @@ static int igc_open(struct net_device *netdev)
static int __igc_close(struct net_device *netdev, bool suspending)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
+ if (!suspending)
+ pm_runtime_get_sync(&pdev->dev);
+
igc_down(adapter);
igc_release_hw_control(adapter);
@@ -4440,6 +4460,9 @@ static int __igc_close(struct net_device *netdev, bool suspending)
igc_free_all_tx_resources(adapter);
igc_free_all_rx_resources(adapter);
+ if (!suspending)
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
@@ -4766,6 +4789,16 @@ static int igc_probe(struct pci_dev *pdev,
hw->fc.requested_mode = igc_fc_default;
hw->fc.current_mode = igc_fc_default;
+ /* By default, support wake on port A */
+ adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
+
+ /* initialize the wol settings based on the eeprom settings */
+ if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
+ adapter->wol |= IGC_WUFC_MAG;
+
+ device_set_wakeup_enable(&adapter->pdev->dev,
+ adapter->flags & IGC_FLAG_WOL_SUPPORTED);
+
/* reset the hardware with the new settings */
igc_reset(adapter);
@@ -4792,6 +4825,10 @@ static int igc_probe(struct pci_dev *pdev,
pcie_print_link_status(pdev);
netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+
+ pm_runtime_put_noidle(&pdev->dev);
+
return 0;
err_register:
@@ -4826,6 +4863,8 @@ static void igc_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
+ pm_runtime_get_noresume(&pdev->dev);
+
igc_ptp_stop(adapter);
set_bit(__IGC_DOWN, &adapter->state);
@@ -4870,6 +4909,8 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
if (netif_running(netdev))
__igc_close(netdev, true);
+ igc_ptp_suspend(adapter);
+
igc_clear_interrupt_scheme(adapter);
rtnl_unlock();
@@ -5045,6 +5086,108 @@ static void igc_shutdown(struct pci_dev *pdev)
}
}
+/**
+ * igc_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current PCI connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ **/
+static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ igc_down(adapter);
+ pci_disable_device(pdev);
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * igc_io_slot_reset - called after the PCI bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the igc_resume routine.
+ **/
+static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ pci_ers_result_t result;
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev,
+ "Could not re-enable PCI device after reset.\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ /* In case of PCI error, adapter loses its HW address
+ * so we should re-assign it here.
+ */
+ hw->hw_addr = adapter->io_addr;
+
+ igc_reset(adapter);
+ wr32(IGC_WUS, ~0);
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ return result;
+}
+
+/**
+ * igc_io_resume - called when traffic can start to flow again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the igc_resume routine.
+ */
+static void igc_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ if (igc_open(netdev)) {
+ dev_err(&pdev->dev, "igc_open failed after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+
+ /* let the f/w know that the h/w is now under the control of the
+ * driver.
+ */
+ igc_get_hw_control(adapter);
+ rtnl_unlock();
+}
+
+static const struct pci_error_handlers igc_err_handler = {
+ .error_detected = igc_io_error_detected,
+ .slot_reset = igc_io_slot_reset,
+ .resume = igc_io_resume,
+};
+
#ifdef CONFIG_PM
static const struct dev_pm_ops igc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
@@ -5062,6 +5205,7 @@ static struct pci_driver igc_driver = {
.driver.pm = &igc_pm_ops,
#endif
.shutdown = igc_shutdown,
+ .err_handler = &igc_err_handler,
};
/**
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 693506587198..f99c514ad0f4 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -509,7 +509,7 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
* This work function polls the TSYNCTXCTL valid bit to determine when a
* timestamp has been taken for the current stored skb.
*/
-void igc_ptp_tx_work(struct work_struct *work)
+static void igc_ptp_tx_work(struct work_struct *work)
{
struct igc_adapter *adapter = container_of(work, struct igc_adapter,
ptp_tx_work);
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index c9029b549b90..d4af53a80f11 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -17,6 +17,11 @@
/* Internal Packet Buffer Size Registers */
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
+#define IGC_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define IGC_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define IGC_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define IGC_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define IGC_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
/* NVM Register Descriptions */
#define IGC_EERD 0x12014 /* EEprom mode read - RW */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 39e73ad60352..2833e4f041ce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -462,7 +462,7 @@ struct ixgbe_q_vector {
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
- struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
+ struct ixgbe_ring ring[] ____cacheline_internodealigned_in_smp;
};
#ifdef CONFIG_IXGBE_HWMON
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 7c52ae8ac005..c6bf0a50ee63 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3444,6 +3444,7 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = ixgbe_get_drvinfo,
.get_regs_len = ixgbe_get_regs_len,
.get_regs = ixgbe_get_regs,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index ccd852ad62a4..ec7a11d13fdc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -968,8 +968,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- int i, pos;
- u8 buf[8];
+ u64 dsn;
if (!info)
return -EINVAL;
@@ -985,17 +984,11 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
/* Serial Number */
/* Get the PCI-e Device Serial Number Capability */
- pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN);
- if (pos) {
- pos += 4;
- for (i = 0; i < 8; i++)
- pci_read_config_byte(adapter->pdev, pos + i, &buf[i]);
-
+ dsn = pci_get_dsn(adapter->pdev);
+ if (dsn)
snprintf(info->serial_number, sizeof(info->serial_number),
- "%02X%02X%02X%02X%02X%02X%02X%02X",
- buf[7], buf[6], buf[5], buf[4],
- buf[3], buf[2], buf[1], buf[0]);
- } else
+ "%016llX", dsn);
+ else
snprintf(info->serial_number, sizeof(info->serial_number),
"Unknown");
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index f7f309c96fa8..988fa49fa99a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -968,6 +968,7 @@ static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static const struct ethtool_ops ixgbevf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = ixgbevf_get_drvinfo,
.get_regs_len = ixgbevf_get_regs_len,
.get_regs = ixgbevf_get_regs,
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 2e4975572e9f..c97c74164c73 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2077,12 +2077,7 @@ jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
IPPROTO_TCP,
0);
} else {
- struct ipv6hdr *ip6h = ipv6_hdr(skb);
-
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
- &ip6h->daddr, 0,
- IPPROTO_TCP,
- 0);
+ tcp_v6_gso_csum_prep(skb);
}
return 0;
@@ -2844,6 +2839,9 @@ jme_set_eeprom(struct net_device *netdev,
}
static const struct ethtool_ops jme_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = jme_get_drvinfo,
.get_regs_len = jme_get_regs_len,
.get_regs = jme_get_regs,
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 3c8125cbc84d..81d24481b22c 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1737,6 +1737,7 @@ static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
}
static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = mv643xx_eth_get_drvinfo,
.nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 11babc79dc6c..5be61f73b6ab 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -341,6 +341,13 @@ enum {
ETHTOOL_STAT_EEE_WAKEUP,
ETHTOOL_STAT_SKB_ALLOC_ERR,
ETHTOOL_STAT_REFILL_ERR,
+ ETHTOOL_XDP_REDIRECT,
+ ETHTOOL_XDP_PASS,
+ ETHTOOL_XDP_DROP,
+ ETHTOOL_XDP_TX,
+ ETHTOOL_XDP_TX_ERR,
+ ETHTOOL_XDP_XMIT,
+ ETHTOOL_XDP_XMIT_ERR,
ETHTOOL_MAX_STATS,
};
@@ -354,10 +361,10 @@ struct mvneta_statistic {
#define T_REG_64 64
#define T_SW 1
-#define MVNETA_XDP_PASS BIT(0)
-#define MVNETA_XDP_DROPPED BIT(1)
-#define MVNETA_XDP_TX BIT(2)
-#define MVNETA_XDP_REDIR BIT(3)
+#define MVNETA_XDP_PASS 0
+#define MVNETA_XDP_DROPPED BIT(0)
+#define MVNETA_XDP_TX BIT(1)
+#define MVNETA_XDP_REDIR BIT(2)
static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x3000, T_REG_64, "good_octets_received", },
@@ -395,16 +402,42 @@ static const struct mvneta_statistic mvneta_statistics[] = {
{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
{ ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
{ ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
+ { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", },
+ { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", },
+ { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", },
+ { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", },
+ { ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", },
+ { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", },
+ { ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", },
};
-struct mvneta_pcpu_stats {
- struct u64_stats_sync syncp;
+struct mvneta_stats {
u64 rx_packets;
u64 rx_bytes;
- u64 rx_dropped;
- u64 rx_errors;
u64 tx_packets;
u64 tx_bytes;
+ /* xdp */
+ u64 xdp_redirect;
+ u64 xdp_pass;
+ u64 xdp_drop;
+ u64 xdp_xmit;
+ u64 xdp_xmit_err;
+ u64 xdp_tx;
+ u64 xdp_tx_err;
+};
+
+struct mvneta_ethtool_stats {
+ struct mvneta_stats ps;
+ u64 skb_alloc_error;
+ u64 refill_error;
+};
+
+struct mvneta_pcpu_stats {
+ struct u64_stats_sync syncp;
+
+ struct mvneta_ethtool_stats es;
+ u64 rx_dropped;
+ u64 rx_errors;
};
struct mvneta_pcpu_port {
@@ -660,10 +693,6 @@ struct mvneta_rx_queue {
/* pointer to uncomplete skb buffer */
struct sk_buff *skb;
int left_size;
-
- /* error counters */
- u32 skb_alloc_err;
- u32 refill_err;
};
static enum cpuhp_state online_hpstate;
@@ -748,12 +777,12 @@ mvneta_get_stats64(struct net_device *dev,
cpu_stats = per_cpu_ptr(pp->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
- rx_packets = cpu_stats->rx_packets;
- rx_bytes = cpu_stats->rx_bytes;
+ rx_packets = cpu_stats->es.ps.rx_packets;
+ rx_bytes = cpu_stats->es.ps.rx_bytes;
rx_dropped = cpu_stats->rx_dropped;
rx_errors = cpu_stats->rx_errors;
- tx_packets = cpu_stats->tx_packets;
- tx_bytes = cpu_stats->tx_bytes;
+ tx_packets = cpu_stats->es.ps.tx_packets;
+ tx_bytes = cpu_stats->es.ps.tx_bytes;
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
stats->rx_packets += rx_packets;
@@ -1933,7 +1962,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
if (!data || !(rx_desc->buf_phys_addr))
continue;
- page_pool_put_page(rxq->page_pool, data, false);
+ page_pool_put_full_page(rxq->page_pool, data, false);
}
if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
xdp_rxq_info_unreg(&rxq->xdp_rxq);
@@ -1942,19 +1971,18 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
}
static void
-mvneta_update_stats(struct mvneta_port *pp, u32 pkts,
- u32 len, bool tx)
+mvneta_update_stats(struct mvneta_port *pp,
+ struct mvneta_stats *ps)
{
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
u64_stats_update_begin(&stats->syncp);
- if (tx) {
- stats->tx_packets += pkts;
- stats->tx_bytes += len;
- } else {
- stats->rx_packets += pkts;
- stats->rx_bytes += len;
- }
+ stats->es.ps.rx_packets += ps->rx_packets;
+ stats->es.ps.rx_bytes += ps->rx_bytes;
+ /* xdp */
+ stats->es.ps.xdp_redirect += ps->xdp_redirect;
+ stats->es.ps.xdp_pass += ps->xdp_pass;
+ stats->es.ps.xdp_drop += ps->xdp_drop;
u64_stats_update_end(&stats->syncp);
}
@@ -1969,9 +1997,15 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
rx_desc = rxq->descs + curr_desc;
if (!(rx_desc->buf_phys_addr)) {
if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
+ struct mvneta_pcpu_stats *stats;
+
pr_err("Can't refill queue %d. Done %d from %d\n",
rxq->id, i, rxq->refill_num);
- rxq->refill_err++;
+
+ stats = this_cpu_ptr(pp->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.refill_error++;
+ u64_stats_update_end(&stats->syncp);
break;
}
}
@@ -2021,7 +2055,6 @@ mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
tx_desc->buf_phys_addr = dma_addr;
tx_desc->data_size = xdpf->len;
- mvneta_update_stats(pp, 1, xdpf->len, true);
mvneta_txq_inc_put(txq);
txq->pending++;
txq->count++;
@@ -2032,6 +2065,7 @@ mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
static int
mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
{
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
struct xdp_frame *xdpf;
@@ -2048,8 +2082,19 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
__netif_tx_lock(nq, cpu);
ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
- if (ret == MVNETA_XDP_TX)
+ if (ret == MVNETA_XDP_TX) {
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.tx_bytes += xdpf->len;
+ stats->es.ps.tx_packets++;
+ stats->es.ps.xdp_tx++;
+ u64_stats_update_end(&stats->syncp);
+
mvneta_txq_pend_desc_add(pp, txq, 0);
+ } else {
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.xdp_tx_err++;
+ u64_stats_update_end(&stats->syncp);
+ }
__netif_tx_unlock(nq);
return ret;
@@ -2060,10 +2105,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
struct xdp_frame **frames, u32 flags)
{
struct mvneta_port *pp = netdev_priv(dev);
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ int i, nxmit_byte = 0, nxmit = num_frame;
int cpu = smp_processor_id();
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
- int i, drops = 0;
u32 ret;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -2075,9 +2121,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
__netif_tx_lock(nq, cpu);
for (i = 0; i < num_frame; i++) {
ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
- if (ret != MVNETA_XDP_TX) {
+ if (ret == MVNETA_XDP_TX) {
+ nxmit_byte += frames[i]->len;
+ } else {
xdp_return_frame_rx_napi(frames[i]);
- drops++;
+ nxmit--;
}
}
@@ -2085,12 +2133,20 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
mvneta_txq_pend_desc_add(pp, txq, 0);
__netif_tx_unlock(nq);
- return num_frame - drops;
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.tx_bytes += nxmit_byte;
+ stats->es.ps.tx_packets += nxmit;
+ stats->es.ps.xdp_xmit += nxmit;
+ stats->es.ps.xdp_xmit_err += num_frame - nxmit;
+ u64_stats_update_end(&stats->syncp);
+
+ return nxmit;
}
static int
mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
- struct bpf_prog *prog, struct xdp_buff *xdp)
+ struct bpf_prog *prog, struct xdp_buff *xdp,
+ struct mvneta_stats *stats)
{
unsigned int len;
u32 ret, act;
@@ -2100,28 +2156,29 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
switch (act) {
case XDP_PASS:
- ret = MVNETA_XDP_PASS;
- break;
+ stats->xdp_pass++;
+ return MVNETA_XDP_PASS;
case XDP_REDIRECT: {
int err;
err = xdp_do_redirect(pp->dev, xdp, prog);
- if (err) {
+ if (unlikely(err)) {
ret = MVNETA_XDP_DROPPED;
- __page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data), len,
+ true);
} else {
ret = MVNETA_XDP_REDIR;
+ stats->xdp_redirect++;
}
break;
}
case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX)
- __page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data), len,
+ true);
break;
default:
bpf_warn_invalid_xdp_action(act);
@@ -2130,13 +2187,16 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
trace_xdp_exception(pp->dev, prog, act);
/* fall through */
case XDP_DROP:
- __page_pool_put_page(rxq->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(rxq->page_pool,
+ virt_to_head_page(xdp->data), len, true);
ret = MVNETA_XDP_DROPPED;
+ stats->xdp_drop++;
break;
}
+ stats->rx_bytes += xdp->data_end - xdp->data;
+ stats->rx_packets++;
+
return ret;
}
@@ -2146,12 +2206,14 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog,
- struct page *page, u32 *xdp_ret)
+ struct page *page,
+ struct mvneta_stats *stats)
{
unsigned char *data = page_address(page);
int data_len = -MVNETA_MH_SIZE, len;
struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir;
+ int ret = 0;
if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
len = MVNETA_MAX_RX_BUF_SIZE;
@@ -2175,17 +2237,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
xdp_set_data_meta_invalid(xdp);
if (xdp_prog) {
- u32 ret;
-
- ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp);
- if (ret != MVNETA_XDP_PASS) {
- mvneta_update_stats(pp, 1,
- xdp->data_end - xdp->data,
- false);
- rx_desc->buf_phys_addr = 0;
- *xdp_ret |= ret;
- return ret;
- }
+ ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp, stats);
+ if (ret)
+ goto out;
}
rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
@@ -2193,9 +2247,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id);
- rxq->skb_alloc_err++;
u64_stats_update_begin(&stats->syncp);
+ stats->es.skb_alloc_error++;
stats->rx_dropped++;
u64_stats_update_end(&stats->syncp);
@@ -2209,9 +2263,11 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
mvneta_rx_csum(pp, rx_desc->status, rxq->skb);
rxq->left_size = rx_desc->data_size - len;
+
+out:
rx_desc->buf_phys_addr = 0;
- return 0;
+ return ret;
}
static void
@@ -2252,12 +2308,11 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
struct mvneta_port *pp, int budget,
struct mvneta_rx_queue *rxq)
{
- int rcvd_pkts = 0, rcvd_bytes = 0, rx_proc = 0;
+ int rx_proc = 0, rx_todo, refill;
struct net_device *dev = pp->dev;
+ struct mvneta_stats ps = {};
struct bpf_prog *xdp_prog;
struct xdp_buff xdp_buf;
- int rx_todo, refill;
- u32 xdp_ret = 0;
/* Get number of received packets */
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -2290,7 +2345,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
}
err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
- xdp_prog, page, &xdp_ret);
+ xdp_prog, page, &ps);
if (err)
continue;
} else {
@@ -2314,8 +2369,9 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
rxq->skb = NULL;
continue;
}
- rcvd_pkts++;
- rcvd_bytes += rxq->skb->len;
+
+ ps.rx_bytes += rxq->skb->len;
+ ps.rx_packets++;
/* Linux processing */
rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
@@ -2327,11 +2383,11 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
}
rcu_read_unlock();
- if (xdp_ret & MVNETA_XDP_REDIR)
+ if (ps.xdp_redirect)
xdp_do_flush_map();
- if (rcvd_pkts)
- mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
+ if (ps.rx_packets)
+ mvneta_update_stats(pp, &ps);
/* return some buffers to hardware queue, one at a time is too slow */
refill = mvneta_rx_refill_queue(pp, rxq);
@@ -2339,7 +2395,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* Update rxq management counters */
mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
- return rcvd_pkts;
+ return ps.rx_packets;
}
/* Main rx processing when using hardware buffer management */
@@ -2423,8 +2479,15 @@ err_drop_frame:
/* Refill processing */
err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
if (err) {
+ struct mvneta_pcpu_stats *stats;
+
netdev_err(dev, "Linux processing - Can't refill\n");
- rxq->refill_err++;
+
+ stats = this_cpu_ptr(pp->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.refill_error++;
+ u64_stats_update_end(&stats->syncp);
+
goto err_drop_frame_ret_pool;
}
@@ -2454,8 +2517,14 @@ err_drop_frame:
napi_gro_receive(napi, skb);
}
- if (rcvd_pkts)
- mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
+ if (rcvd_pkts) {
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.rx_packets += rcvd_pkts;
+ stats->es.ps.rx_bytes += rcvd_bytes;
+ u64_stats_update_end(&stats->syncp);
+ }
/* Update rxq management counters */
mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
@@ -2711,6 +2780,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
out:
if (frags > 0) {
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
netdev_tx_sent_queue(nq, len);
@@ -2724,7 +2794,10 @@ out:
else
txq->pending += frags;
- mvneta_update_stats(pp, 1, len, true);
+ u64_stats_update_begin(&stats->syncp);
+ stats->es.ps.tx_bytes += len;
+ stats->es.ps.tx_packets++;
+ u64_stats_update_end(&stats->syncp);
} else {
dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
@@ -3766,13 +3839,9 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
MVNETA_GMAC_INBAND_RESTART_AN |
- MVNETA_GMAC_CONFIG_MII_SPEED |
- MVNETA_GMAC_CONFIG_GMII_SPEED |
MVNETA_GMAC_AN_SPEED_EN |
MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
- MVNETA_GMAC_CONFIG_FLOW_CTRL |
MVNETA_GMAC_AN_FLOW_CTRL_EN |
- MVNETA_GMAC_CONFIG_FULL_DUPLEX |
MVNETA_GMAC_AN_DUPLEX_EN);
/* Even though it might look weird, when we're configured in
@@ -3787,24 +3856,20 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
if (phylink_test(state->advertising, Pause))
new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
- if (state->pause & MLO_PAUSE_TXRX_MASK)
- new_an |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
if (!phylink_autoneg_inband(mode)) {
- /* Phy or fixed speed */
- if (state->duplex)
- new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
-
- if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
- new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
- else if (state->speed == SPEED_100)
- new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
+ /* Phy or fixed speed - nothing to do, leave the
+ * configured speed, duplex and flow control as-is.
+ */
} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
/* SGMII mode receives the state from the PHY */
new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
- MVNETA_GMAC_FORCE_LINK_PASS)) |
+ MVNETA_GMAC_FORCE_LINK_PASS |
+ MVNETA_GMAC_CONFIG_MII_SPEED |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX)) |
MVNETA_GMAC_INBAND_AN_ENABLE |
MVNETA_GMAC_AN_SPEED_EN |
MVNETA_GMAC_AN_DUPLEX_EN;
@@ -3813,7 +3878,8 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
- MVNETA_GMAC_FORCE_LINK_PASS)) |
+ MVNETA_GMAC_FORCE_LINK_PASS |
+ MVNETA_GMAC_CONFIG_MII_SPEED)) |
MVNETA_GMAC_INBAND_AN_ENABLE |
MVNETA_GMAC_CONFIG_GMII_SPEED |
/* The MAC only supports FD mode */
@@ -3901,9 +3967,11 @@ static void mvneta_mac_link_down(struct phylink_config *config,
mvneta_set_eee(pp, false);
}
-static void mvneta_mac_link_up(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phy)
+static void mvneta_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct net_device *ndev = to_net_dev(config->dev);
struct mvneta_port *pp = netdev_priv(ndev);
@@ -3911,8 +3979,36 @@ static void mvneta_mac_link_up(struct phylink_config *config, unsigned int mode,
if (!phylink_autoneg_inband(mode)) {
val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
- val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
+ val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN |
+ MVNETA_GMAC_CONFIG_MII_SPEED |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_CONFIG_FLOW_CTRL |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX);
val |= MVNETA_GMAC_FORCE_LINK_PASS;
+
+ if (speed == SPEED_1000 || speed == SPEED_2500)
+ val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+ else if (speed == SPEED_100)
+ val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+
+ if (duplex == DUPLEX_FULL)
+ val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+ if (tx_pause || rx_pause)
+ val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
+
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ } else {
+ /* When inband doesn't cover flow control or flow control is
+ * disabled, we need to manually configure it. This bit will
+ * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset.
+ */
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL;
+
+ if (tx_pause || rx_pause)
+ val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
+
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
}
@@ -4419,45 +4515,112 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
}
}
+static void
+mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
+ struct mvneta_ethtool_stats *es)
+{
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct mvneta_pcpu_stats *stats;
+ u64 skb_alloc_error;
+ u64 refill_error;
+ u64 xdp_redirect;
+ u64 xdp_xmit_err;
+ u64 xdp_tx_err;
+ u64 xdp_pass;
+ u64 xdp_drop;
+ u64 xdp_xmit;
+ u64 xdp_tx;
+
+ stats = per_cpu_ptr(pp->stats, cpu);
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ skb_alloc_error = stats->es.skb_alloc_error;
+ refill_error = stats->es.refill_error;
+ xdp_redirect = stats->es.ps.xdp_redirect;
+ xdp_pass = stats->es.ps.xdp_pass;
+ xdp_drop = stats->es.ps.xdp_drop;
+ xdp_xmit = stats->es.ps.xdp_xmit;
+ xdp_xmit_err = stats->es.ps.xdp_xmit_err;
+ xdp_tx = stats->es.ps.xdp_tx;
+ xdp_tx_err = stats->es.ps.xdp_tx_err;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ es->skb_alloc_error += skb_alloc_error;
+ es->refill_error += refill_error;
+ es->ps.xdp_redirect += xdp_redirect;
+ es->ps.xdp_pass += xdp_pass;
+ es->ps.xdp_drop += xdp_drop;
+ es->ps.xdp_xmit += xdp_xmit;
+ es->ps.xdp_xmit_err += xdp_xmit_err;
+ es->ps.xdp_tx += xdp_tx;
+ es->ps.xdp_tx_err += xdp_tx_err;
+ }
+}
+
static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
{
+ struct mvneta_ethtool_stats stats = {};
const struct mvneta_statistic *s;
void __iomem *base = pp->base;
u32 high, low;
u64 val;
int i;
+ mvneta_ethtool_update_pcpu_stats(pp, &stats);
for (i = 0, s = mvneta_statistics;
s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
s++, i++) {
- val = 0;
-
switch (s->type) {
case T_REG_32:
val = readl_relaxed(base + s->offset);
+ pp->ethtool_stats[i] += val;
break;
case T_REG_64:
/* Docs say to read low 32-bit then high */
low = readl_relaxed(base + s->offset);
high = readl_relaxed(base + s->offset + 4);
val = (u64)high << 32 | low;
+ pp->ethtool_stats[i] += val;
break;
case T_SW:
switch (s->offset) {
case ETHTOOL_STAT_EEE_WAKEUP:
val = phylink_get_eee_err(pp->phylink);
+ pp->ethtool_stats[i] += val;
break;
case ETHTOOL_STAT_SKB_ALLOC_ERR:
- val = pp->rxqs[0].skb_alloc_err;
+ pp->ethtool_stats[i] = stats.skb_alloc_error;
break;
case ETHTOOL_STAT_REFILL_ERR:
- val = pp->rxqs[0].refill_err;
+ pp->ethtool_stats[i] = stats.refill_error;
+ break;
+ case ETHTOOL_XDP_REDIRECT:
+ pp->ethtool_stats[i] = stats.ps.xdp_redirect;
+ break;
+ case ETHTOOL_XDP_PASS:
+ pp->ethtool_stats[i] = stats.ps.xdp_pass;
+ break;
+ case ETHTOOL_XDP_DROP:
+ pp->ethtool_stats[i] = stats.ps.xdp_drop;
+ break;
+ case ETHTOOL_XDP_TX:
+ pp->ethtool_stats[i] = stats.ps.xdp_tx;
+ break;
+ case ETHTOOL_XDP_TX_ERR:
+ pp->ethtool_stats[i] = stats.ps.xdp_tx_err;
+ break;
+ case ETHTOOL_XDP_XMIT:
+ pp->ethtool_stats[i] = stats.ps.xdp_xmit;
+ break;
+ case ETHTOOL_XDP_XMIT_ERR:
+ pp->ethtool_stats[i] = stats.ps.xdp_xmit_err;
break;
}
break;
}
-
- pp->ethtool_stats[i] += val;
}
}
@@ -4674,6 +4837,8 @@ static const struct net_device_ops mvneta_netdev_ops = {
};
static const struct ethtool_ops mvneta_eth_tool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = mvneta_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.set_coalesce = mvneta_ethtool_set_coalesce,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 35478cba2aa5..8972cdd559e8 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1082,6 +1082,9 @@ static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
u8 qh, ql, pmap;
int index, ctx;
+ if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL))
+ return -EOPNOTSUPP;
+
memset(&c2, 0, sizeof(c2));
index = mvpp2_cls_c2_port_flow_index(port, rule->loc);
@@ -1305,6 +1308,9 @@ static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule)
struct flow_rule *flow = rule->flow;
struct flow_action_entry *act;
+ if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL))
+ return -EOPNOTSUPP;
+
act = &flow->action.entries[0];
if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 72133cbe55d4..1fa60e985b43 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -58,8 +58,11 @@ static struct {
*/
static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state);
-static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface, struct phy_device *phy);
+static void mvpp2_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause);
/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE 0
@@ -3473,8 +3476,9 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
.interface = port->phy_interface,
};
mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
- mvpp2_mac_link_up(&port->phylink_config, MLO_AN_INBAND,
- port->phy_interface, NULL);
+ mvpp2_mac_link_up(&port->phylink_config, NULL,
+ MLO_AN_INBAND, port->phy_interface,
+ SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
}
netif_tx_start_all_queues(port->dev);
@@ -4380,6 +4384,8 @@ static const struct net_device_ops mvpp2_netdev_ops = {
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = mvpp2_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.set_coalesce = mvpp2_ethtool_set_coalesce,
@@ -4972,15 +4978,13 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
- an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
+ an &= ~(MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN |
- MVPP2_GMAC_IN_BAND_AUTONEG | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS);
+ MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_IN_BAND_AUTONEG |
+ MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS);
ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK |
MVPP2_GMAC_PCS_ENABLE_MASK);
- ctrl4 &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
/* Configure port type */
if (phy_interface_mode_is_8023z(state->interface)) {
@@ -5010,31 +5014,20 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
/* Configure negotiation style */
if (!phylink_autoneg_inband(mode)) {
- /* Phy or fixed speed - no in-band AN */
- if (state->duplex)
- an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
-
- if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
- an |= MVPP2_GMAC_CONFIG_GMII_SPEED;
- else if (state->speed == SPEED_100)
- an |= MVPP2_GMAC_CONFIG_MII_SPEED;
-
- if (state->pause & MLO_PAUSE_TX)
- ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
- if (state->pause & MLO_PAUSE_RX)
- ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
+ /* Phy or fixed speed - no in-band AN, nothing to do, leave the
+ * configured speed, duplex and flow control as-is.
+ */
} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
/* SGMII in-band mode receives the speed and duplex from
* the PHY. Flow control information is not received. */
- an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS);
+ an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_CONFIG_MII_SPEED |
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX);
an |= MVPP2_GMAC_IN_BAND_AUTONEG |
MVPP2_GMAC_AN_SPEED_EN |
MVPP2_GMAC_AN_DUPLEX_EN;
-
- if (state->pause & MLO_PAUSE_TX)
- ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
- if (state->pause & MLO_PAUSE_RX)
- ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
} else if (phy_interface_mode_is_8023z(state->interface)) {
/* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
* they negotiate duplex: they are always operating with a fixed
@@ -5042,19 +5035,17 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
* speed and full duplex here.
*/
ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
- an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS);
+ an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_CONFIG_MII_SPEED |
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX);
an |= MVPP2_GMAC_IN_BAND_AUTONEG |
MVPP2_GMAC_CONFIG_GMII_SPEED |
MVPP2_GMAC_CONFIG_FULL_DUPLEX;
- if (state->pause & MLO_PAUSE_AN && state->an_enabled) {
+ if (state->pause & MLO_PAUSE_AN && state->an_enabled)
an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
- } else {
- if (state->pause & MLO_PAUSE_TX)
- ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
- if (state->pause & MLO_PAUSE_RX)
- ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
- }
}
/* Some fields of the auto-negotiation register require the port to be down when
@@ -5141,25 +5132,54 @@ static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
mvpp2_port_enable(port);
}
-static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface, struct phy_device *phy)
+static void mvpp2_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct net_device *dev = to_net_dev(config->dev);
struct mvpp2_port *port = netdev_priv(dev);
u32 val;
- if (!phylink_autoneg_inband(mode)) {
- if (mvpp2_is_xlg(interface)) {
+ if (mvpp2_is_xlg(interface)) {
+ if (!phylink_autoneg_inband(mode)) {
val = readl(port->base + MVPP22_XLG_CTRL0_REG);
val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
val |= MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
writel(val, port->base + MVPP22_XLG_CTRL0_REG);
- } else {
+ }
+ } else {
+ if (!phylink_autoneg_inband(mode)) {
val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
+ val &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
+ MVPP2_GMAC_CONFIG_MII_SPEED |
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX);
val |= MVPP2_GMAC_FORCE_LINK_PASS;
+
+ if (speed == SPEED_1000 || speed == SPEED_2500)
+ val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
+ else if (speed == SPEED_100)
+ val |= MVPP2_GMAC_CONFIG_MII_SPEED;
+
+ if (duplex == DUPLEX_FULL)
+ val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+
writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
}
+
+ /* We can always update the flow control enable bits;
+ * these will only be effective if flow control AN
+ * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
+ */
+ val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
+ val &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
+ if (tx_pause)
+ val |= MVPP22_CTRL4_TX_FC_EN;
+ if (rx_pause)
+ val |= MVPP22_CTRL4_RX_FC_EN;
+ writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
}
mvpp2_port_enable(port);
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index ced514c05c97..d9dfb614daa6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -33,3 +33,9 @@ config OCTEONTX2_PF
depends on PCI
help
This driver supports Marvell's OcteonTX2 NIC physical function.
+
+config OCTEONTX2_VF
+ tristate "Marvell OcteonTX2 NIC Virtual Function driver"
+ depends on OCTEONTX2_PF
+ help
+ This driver supports Marvell's OcteonTX2 NIC virtual function.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 5ca788691911..a4e65da8d95b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -113,7 +113,6 @@ int cgx_get_cgxcnt_max(void)
return idmax + 1;
}
-EXPORT_SYMBOL(cgx_get_cgxcnt_max);
int cgx_get_lmac_cnt(void *cgxd)
{
@@ -124,7 +123,6 @@ int cgx_get_lmac_cnt(void *cgxd)
return cgx->lmac_count;
}
-EXPORT_SYMBOL(cgx_get_lmac_cnt);
void *cgx_get_pdata(int cgx_id)
{
@@ -136,7 +134,6 @@ void *cgx_get_pdata(int cgx_id)
}
return NULL;
}
-EXPORT_SYMBOL(cgx_get_pdata);
int cgx_get_cgxid(void *cgxd)
{
@@ -164,7 +161,6 @@ int cgx_get_link_info(void *cgxd, int lmac_id,
*linfo = lmac->link_info;
return 0;
}
-EXPORT_SYMBOL(cgx_get_link_info);
static u64 mac2u64 (u8 *mac_addr)
{
@@ -195,7 +191,6 @@ int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_addr_set);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
{
@@ -205,7 +200,6 @@ u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
return cfg & CGX_RX_DMAC_ADR_MASK;
}
-EXPORT_SYMBOL(cgx_lmac_addr_get);
int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
{
@@ -217,7 +211,6 @@ int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
return 0;
}
-EXPORT_SYMBOL(cgx_set_pkind);
static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
{
@@ -255,7 +248,6 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
}
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_internal_loopback);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
@@ -289,7 +281,6 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
(CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
}
}
-EXPORT_SYMBOL(cgx_lmac_promisc_config);
/* Enable or disable forwarding received pause frames to Tx block */
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
@@ -318,7 +309,6 @@ void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
}
}
-EXPORT_SYMBOL(cgx_lmac_enadis_rx_pause_fwding);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
{
@@ -329,7 +319,6 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
return 0;
}
-EXPORT_SYMBOL(cgx_get_rx_stats);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
{
@@ -340,7 +329,6 @@ int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
return 0;
}
-EXPORT_SYMBOL(cgx_get_tx_stats);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
{
@@ -358,7 +346,6 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
{
@@ -379,7 +366,107 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
return !!(last & DATA_PKT_TX_EN);
}
-EXPORT_SYMBOL(cgx_lmac_tx_enable);
+
+int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
+ return 0;
+}
+
+int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ } else {
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ }
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
+ return 0;
+}
+
+static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
+{
+ u64 cfg;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return;
+ if (enable) {
+ /* Enable receive pause frames */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ /* Enable pause frames transmission */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ /* Set pause time and interval */
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
+ DEFAULT_PAUSE_TIME);
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
+ cfg &= ~0xFFFFULL;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
+ cfg | (DEFAULT_PAUSE_TIME / 2));
+
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
+ DEFAULT_PAUSE_TIME);
+
+ cfg = cgx_read(cgx, lmac_id,
+ CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
+ cfg &= ~0xFFFFULL;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
+ cfg | (DEFAULT_PAUSE_TIME / 2));
+ } else {
+ /* ALL pause frames received are completely ignored */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+ }
+}
/* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
@@ -558,60 +645,6 @@ static inline bool cgx_event_is_linkevent(u64 event)
return false;
}
-static inline int cgx_fwi_get_mkex_prfl_sz(u64 *prfl_sz,
- struct cgx *cgx)
-{
- u64 req = 0;
- u64 resp;
- int err;
-
- req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_SIZE, req);
- err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
- if (!err)
- *prfl_sz = FIELD_GET(RESP_MKEX_PRFL_SIZE, resp);
-
- return err;
-}
-
-static inline int cgx_fwi_get_mkex_prfl_addr(u64 *prfl_addr,
- struct cgx *cgx)
-{
- u64 req = 0;
- u64 resp;
- int err;
-
- req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_ADDR, req);
- err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
- if (!err)
- *prfl_addr = FIELD_GET(RESP_MKEX_PRFL_ADDR, resp);
-
- return err;
-}
-
-int cgx_get_mkex_prfl_info(u64 *addr, u64 *size)
-{
- struct cgx *cgx_dev;
- int err;
-
- if (!addr || !size)
- return -EINVAL;
-
- cgx_dev = list_first_entry(&cgx_list, struct cgx, cgx_list);
- if (!cgx_dev)
- return -ENXIO;
-
- err = cgx_fwi_get_mkex_prfl_sz(size, cgx_dev);
- if (err)
- return -EIO;
-
- err = cgx_fwi_get_mkex_prfl_addr(addr, cgx_dev);
- if (err)
- return -EIO;
-
- return 0;
-}
-EXPORT_SYMBOL(cgx_get_mkex_prfl_info);
-
static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
{
struct lmac *lmac = data;
@@ -676,7 +709,6 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_evh_register);
int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
{
@@ -695,7 +727,24 @@ int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_evh_unregister);
+
+int cgx_get_fwdata_base(u64 *base)
+{
+ u64 req = 0, resp;
+ struct cgx *cgx;
+ int err;
+
+ cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
+ if (!cgx)
+ return -ENXIO;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
+ if (!err)
+ *base = FIELD_GET(RESP_FWD_BASE, resp);
+
+ return err;
+}
static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
{
@@ -769,7 +818,6 @@ int cgx_lmac_linkup_start(void *cgxd)
return 0;
}
-EXPORT_SYMBOL(cgx_lmac_linkup_start);
static int cgx_lmac_init(struct cgx *cgx)
{
@@ -805,6 +853,7 @@ static int cgx_lmac_init(struct cgx *cgx)
/* Add reference */
cgx->lmac_idmap[i] = lmac;
+ cgx_lmac_pause_frm_config(cgx, i, true);
}
return cgx_lmac_verify_fwi_version(cgx);
@@ -823,6 +872,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
/* Free all lmac related resources */
for (i = 0; i < cgx->lmac_count; i++) {
+ cgx_lmac_pause_frm_config(cgx, i, false);
lmac = cgx->lmac_idmap[i];
if (!lmac)
continue;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 9343bf39cfac..394f96591feb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -60,10 +60,20 @@
#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGXX_SMUX_TX_CTL 0x20178
+#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
+#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
+#define CGXX_GMP_GMI_TX_PAUSE_PKT_TIME 0x38230
+#define CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL 0x38248
+#define CGX_SMUX_TX_CTL_L2P_BP_CONV BIT_ULL(7)
+#define CGXX_CMR_RX_OVR_BP 0x130
+#define CGX_CMR_RX_OVR_BP_EN(X) BIT_ULL(((X) + 8))
+#define CGX_CMR_RX_OVR_BP_BP(X) BIT_ULL(((X) + 4))
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
#define CGX_CMD_TIMEOUT 2200 /* msecs */
+#define DEFAULT_PAUSE_TIME 0x7FF
#define CGX_NVEC 37
#define CGX_LMAC_FWI 0
@@ -124,5 +134,9 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
int cgx_lmac_linkup_start(void *cgxd);
-int cgx_get_mkex_prfl_info(u64 *addr, u64 *size);
+int cgx_get_fwdata_base(u64 *base);
+int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index 473d9751601f..c3702fa58b6b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -79,7 +79,8 @@ enum cgx_cmd_id {
CGX_CMD_MODE_CHANGE, /* hot plug support */
CGX_CMD_INTF_SHUTDOWN,
CGX_CMD_GET_MKEX_PRFL_SIZE,
- CGX_CMD_GET_MKEX_PRFL_ADDR
+ CGX_CMD_GET_MKEX_PRFL_ADDR,
+ CGX_CMD_GET_FWD_BASE, /* get base address of shared FW data */
};
/* async event ids */
@@ -149,6 +150,11 @@ enum cgx_cmd_own {
*/
#define RESP_MKEX_PRFL_ADDR GENMASK_ULL(63, 9)
+/* Response to cmd ID as CGX_CMD_GET_FWD_BASE with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_FWD_BASE GENMASK_ULL(56, 9)
+
/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
* status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
*
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 8bbc1f1d81f5..6dfd0f90cd70 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -125,7 +125,7 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
-M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \
+M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
@@ -143,6 +143,8 @@ M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
+M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
+ cgx_pause_frm_cfg) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -211,6 +213,9 @@ M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
+M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
+ nix_bp_cfg_rsp) \
+M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
@@ -251,7 +256,8 @@ enum rvu_af_status {
struct ready_msg_rsp {
struct mbox_msghdr hdr;
- u16 sclk_feq; /* SCLK frequency */
+ u16 sclk_freq; /* SCLK frequency (in MHz) */
+ u16 rclk_freq; /* RCLK frequency (in MHz) */
};
/* Structure for requesting resource provisioning.
@@ -342,6 +348,15 @@ struct cgx_link_info_msg {
struct cgx_link_user_info link_info;
};
+struct cgx_pause_frm_cfg {
+ struct mbox_msghdr hdr;
+ u8 set;
+ /* set = 1 if the request is to config pause frames */
+ /* set = 0 if the request is to fetch pause frames config */
+ u8 rx_pause;
+ u8 tx_pause;
+};
+
/* NPA mbox message formats */
/* NPA mailbox error codes
@@ -676,6 +691,25 @@ struct nix_lso_format_cfg_rsp {
u8 lso_format_idx;
};
+struct nix_bp_cfg_req {
+ struct mbox_msghdr hdr;
+ u16 chan_base; /* Starting channel number */
+ u8 chan_cnt; /* Number of channels */
+ u8 bpid_per_chan;
+ /* bpid_per_chan = 0 assigns single bp id for range of channels */
+ /* bpid_per_chan = 1 assigns separate bp id for each channel */
+};
+
+/* PF can be mapped to either CGX or LBK interface,
+ * so maximum 64 channels are possible.
+ */
+#define NIX_MAX_BPID_CHAN 64
+struct nix_bp_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u16 chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */
+ u8 chan_cnt; /* Number of channel for which bpids are assigned */
+};
+
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 5c190c3ce898..557e4292c846 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -21,7 +21,6 @@
#define DRV_NAME "octeontx2-af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
-#define DRV_VERSION "1.0"
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
@@ -46,10 +45,9 @@ static const struct pci_device_id rvu_id_table[] = {
{ 0, } /* end of table */
};
-MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, rvu_id_table);
static char *mkex_profile; /* MKEX profile name */
@@ -88,13 +86,15 @@ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
u64 reg_val;
reg = rvu->afreg_base + ((block << 28) | offset);
- while (time_before(jiffies, timeout)) {
- reg_val = readq(reg);
- if (zero && !(reg_val & mask))
- return 0;
- if (!zero && (reg_val & mask))
- return 0;
+again:
+ reg_val = readq(reg);
+ if (zero && !(reg_val & mask))
+ return 0;
+ if (!zero && (reg_val & mask))
+ return 0;
+ if (time_before(jiffies, timeout)) {
usleep_range(1, 5);
+ goto again;
}
return -EBUSY;
}
@@ -421,6 +421,19 @@ static void rvu_check_block_implemented(struct rvu *rvu)
}
}
+static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
+{
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
+ RVU_BLK_RVUM_REVID);
+}
+
+static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
+{
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
+}
+
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
{
int err;
@@ -603,7 +616,11 @@ setup_vfmsix:
*/
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
max_msix = cfg & 0xFFFFF;
- phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
+ if (rvu->fwdata && rvu->fwdata->msixtr_base)
+ phy_addr = rvu->fwdata->msixtr_base;
+ else
+ phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
+
iova = dma_map_resource(rvu->dev, phy_addr,
max_msix * PCI_MSIX_ENTRY_SIZE,
DMA_BIDIRECTIONAL, 0);
@@ -613,10 +630,18 @@ setup_vfmsix:
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
rvu->msix_base_iova = iova;
+ rvu->msixtr_base_phy = phy_addr;
return 0;
}
+static void rvu_reset_msix(struct rvu *rvu)
+{
+ /* Restore msixtr base register */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
+ rvu->msixtr_base_phy);
+}
+
static void rvu_free_hw_resources(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -655,9 +680,80 @@ static void rvu_free_hw_resources(struct rvu *rvu)
max_msix * PCI_MSIX_ENTRY_SIZE,
DMA_BIDIRECTIONAL, 0);
+ rvu_reset_msix(rvu);
mutex_destroy(&rvu->rsrc_lock);
}
+static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int pf, vf, numvfs, hwvf;
+ struct rvu_pfvf *pfvf;
+ u64 *mac;
+
+ for (pf = 0; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+ /* Assign MAC address to PF */
+ pfvf = &rvu->pf[pf];
+ if (rvu->fwdata && pf < PF_MACNUM_MAX) {
+ mac = &rvu->fwdata->pf_macs[pf];
+ if (*mac)
+ u64_to_ether_addr(*mac, pfvf->mac_addr);
+ else
+ eth_random_addr(pfvf->mac_addr);
+ } else {
+ eth_random_addr(pfvf->mac_addr);
+ }
+
+ /* Assign MAC address to VFs */
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+ for (vf = 0; vf < numvfs; vf++, hwvf++) {
+ pfvf = &rvu->hwvf[hwvf];
+ if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
+ mac = &rvu->fwdata->vf_macs[hwvf];
+ if (*mac)
+ u64_to_ether_addr(*mac, pfvf->mac_addr);
+ else
+ eth_random_addr(pfvf->mac_addr);
+ } else {
+ eth_random_addr(pfvf->mac_addr);
+ }
+ }
+ }
+}
+
+static int rvu_fwdata_init(struct rvu *rvu)
+{
+ u64 fwdbase;
+ int err;
+
+ /* Get firmware data base address */
+ err = cgx_get_fwdata_base(&fwdbase);
+ if (err)
+ goto fail;
+ rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
+ if (!rvu->fwdata)
+ goto fail;
+ if (!is_rvu_fwdata_valid(rvu)) {
+ dev_err(rvu->dev,
+ "Mismatch in 'fwdata' struct btw kernel and firmware\n");
+ iounmap(rvu->fwdata);
+ rvu->fwdata = NULL;
+ return -EINVAL;
+ }
+ return 0;
+fail:
+ dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
+ return -EIO;
+}
+
+static void rvu_fwdata_exit(struct rvu *rvu)
+{
+ if (rvu->fwdata)
+ iounmap(rvu->fwdata);
+}
+
static int rvu_setup_hw_resources(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -813,6 +909,8 @@ init:
mutex_init(&rvu->rsrc_lock);
+ rvu_fwdata_init(rvu);
+
err = rvu_setup_msix_resources(rvu);
if (err)
return err;
@@ -825,8 +923,10 @@ init:
/* Allocate memory for block LF/slot to pcifunc mapping info */
block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
sizeof(u16), GFP_KERNEL);
- if (!block->fn_map)
- return -ENOMEM;
+ if (!block->fn_map) {
+ err = -ENOMEM;
+ goto msix_err;
+ }
/* Scan all blocks to check if low level firmware has
* already provisioned any of the resources to a PF/VF.
@@ -836,25 +936,36 @@ init:
err = rvu_npc_init(rvu);
if (err)
- goto exit;
+ goto npc_err;
err = rvu_cgx_init(rvu);
if (err)
- goto exit;
+ goto cgx_err;
+
+ /* Assign MACs for CGX mapped functions */
+ rvu_setup_pfvf_macaddress(rvu);
err = rvu_npa_init(rvu);
if (err)
- goto cgx_err;
+ goto npa_err;
err = rvu_nix_init(rvu);
if (err)
- goto cgx_err;
+ goto nix_err;
return 0;
+nix_err:
+ rvu_nix_freemem(rvu);
+npa_err:
+ rvu_npa_freemem(rvu);
cgx_err:
rvu_cgx_exit(rvu);
-exit:
+npc_err:
+ rvu_npc_freemem(rvu);
+ rvu_fwdata_exit(rvu);
+msix_err:
+ rvu_reset_msix(rvu);
return err;
}
@@ -901,6 +1012,10 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
struct ready_msg_rsp *rsp)
{
+ if (rvu->fwdata) {
+ rsp->rclk_freq = rvu->fwdata->rclk;
+ rsp->sclk_freq = rvu->fwdata->sclk;
+ }
return 0;
}
@@ -2128,6 +2243,9 @@ static int rvu_register_interrupts(struct rvu *rvu)
}
rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
+ /* Clear TRPEND bit for all PF */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
/* Enable ME interrupt for all PFs*/
rvu_write64(rvu, BLKADDR_RVUM,
RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
@@ -2439,17 +2557,13 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_disable_device;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
- dev_err(dev, "Unable to set DMA mask\n");
+ dev_err(dev, "DMA mask config failed, abort\n");
goto err_release_regions;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
- if (err) {
- dev_err(dev, "Unable to set consistent DMA mask\n");
- goto err_release_regions;
- }
+ pci_set_master(pdev);
/* Map Admin function CSRs */
rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
@@ -2489,6 +2603,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_flr;
+ rvu_setup_rvum_blk_revid(rvu);
+
/* Enable AF's VFs (if any) */
err = rvu_enable_sriov(rvu);
if (err)
@@ -2506,8 +2622,10 @@ err_mbox:
rvu_mbox_destroy(&rvu->afpf_wq_info);
err_hwsetup:
rvu_cgx_exit(rvu);
+ rvu_fwdata_exit(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
+ rvu_clear_rvum_blk_revid(rvu);
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
@@ -2527,11 +2645,12 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_unregister_interrupts(rvu);
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
+ rvu_fwdata_exit(rvu);
rvu_mbox_destroy(&rvu->afpf_wq_info);
rvu_disable_sriov(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
-
+ rvu_clear_rvum_blk_revid(rvu);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 51c206f4fe6f..dcf25a092008 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -269,6 +269,26 @@ struct mbox_wq_info {
struct workqueue_struct *mbox_wq;
};
+struct rvu_fwdata {
+#define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/
+#define RVU_FWDATA_VERSION 0x0001
+ u32 header_magic;
+ u32 version; /* version id */
+
+ /* MAC address */
+#define PF_MACNUM_MAX 32
+#define VF_MACNUM_MAX 256
+ u64 pf_macs[PF_MACNUM_MAX];
+ u64 vf_macs[VF_MACNUM_MAX];
+ u64 sclk;
+ u64 rclk;
+ u64 mcam_addr;
+ u64 mcam_sz;
+ u64 msixtr_base;
+#define FWDATA_RESERVED_MEM 1023
+ u64 reserved[FWDATA_RESERVED_MEM];
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -294,6 +314,7 @@ struct rvu {
char *irq_name;
bool *irq_allocated;
dma_addr_t msix_base_iova;
+ u64 msixtr_base_phy; /* Register reset value */
/* CGX */
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
@@ -313,6 +334,9 @@ struct rvu {
char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
+ /* Firmware data */
+ struct rvu_fwdata *fwdata;
+
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
@@ -363,6 +387,12 @@ static inline int is_afvf(u16 pcifunc)
return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
}
+static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
+{
+ return (rvu->fwdata->header_magic == RVU_FWDATA_HEADER_MAGIC) &&
+ (rvu->fwdata->version == RVU_FWDATA_VERSION);
+}
+
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
@@ -432,7 +462,7 @@ int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
-int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf);
+int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 11e5921c55b9..f3c82e489897 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -350,6 +350,18 @@ int rvu_cgx_exit(struct rvu *rvu)
return 0;
}
+/* Most of the CGX configuration is restricted to the mapped PF only,
+ * VF's of mapped PF and other PFs are not allowed. This fn() checks
+ * whether a PFFUNC is permitted to do the config or not.
+ */
+static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
+{
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ return false;
+ return true;
+}
+
void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
{
u8 cgx_id, lmac_id;
@@ -373,11 +385,8 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -409,8 +418,7 @@ int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
u8 cgx_idx, lmac;
void *cgxd;
- if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, pf))
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
@@ -477,12 +485,8 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -493,16 +497,11 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- u16 pcifunc = req->hdr.pcifunc;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((req->hdr.pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -515,11 +514,8 @@ static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -571,11 +567,8 @@ static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
int pf = rvu_get_pf(pcifunc);
u8 cgx_id, lmac_id;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
- * if received from other PF/VF simply ACK, nothing to do.
- */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -597,6 +590,30 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
+ struct cgx_pause_frm_cfg *req,
+ struct cgx_pause_frm_cfg *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ if (req->set)
+ cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ req->tx_pause, req->rx_pause);
+ else
+ cgx_lmac_get_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ &rsp->tx_pause, &rsp->rx_pause);
+ return 0;
+}
+
/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
* from its VFs as well. ie. NIX rx/tx counters at the CGX port level
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index eb5e542424e7..36953d4f51c7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -18,6 +18,8 @@
#include "cgx.h"
static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
+static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
+ int type, int chan_id);
enum mc_tbl_sz {
MC_TBL_SZ_256,
@@ -90,6 +92,26 @@ int rvu_get_nixlf_count(struct rvu *rvu)
return block->lf.max;
}
+int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (*nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (nix_blkaddr)
+ *nix_blkaddr = blkaddr;
+
+ return 0;
+}
+
static void nix_mce_list_init(struct nix_mce_list *list, int max)
{
INIT_HLIST_HEAD(&list->head);
@@ -191,6 +213,11 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
pfvf->tx_chan_cnt = 1;
cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
rvu_npc_set_pkind(rvu, pkind, pfvf);
+
+ /* By default we enable pause frames */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
+ cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true, true);
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
@@ -253,6 +280,142 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
}
+int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, pf, type;
+ u16 chan_base, chan;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ return 0;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+
+ chan_base = pfvf->rx_chan_base + req->chan_base;
+ for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ cfg & ~BIT_ULL(16));
+ }
+ return 0;
+}
+
+static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
+ int type, int chan_id)
+{
+ int bpid, blkaddr, lmac_chan_cnt;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 cgx_bpid_cnt, lbk_bpid_cnt;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ lmac_chan_cnt = cfg & 0xFF;
+
+ cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
+ lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ /* Backpressure IDs range division
+ * CGX channles are mapped to (0 - 191) BPIDs
+ * LBK channles are mapped to (192 - 255) BPIDs
+ * SDP channles are mapped to (256 - 511) BPIDs
+ *
+ * Lmac channles and bpids mapped as follows
+ * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
+ * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
+ * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
+ */
+ switch (type) {
+ case NIX_INTF_TYPE_CGX:
+ if ((req->chan_base + req->chan_cnt) > 15)
+ return -EINVAL;
+ rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+ /* Assign bpid based on cgx, lmac and chan id */
+ bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
+ (lmac_id * lmac_chan_cnt) + req->chan_base;
+
+ if (req->bpid_per_chan)
+ bpid += chan_id;
+ if (bpid > cgx_bpid_cnt)
+ return -EINVAL;
+ break;
+
+ case NIX_INTF_TYPE_LBK:
+ if ((req->chan_base + req->chan_cnt) > 63)
+ return -EINVAL;
+ bpid = cgx_bpid_cnt + req->chan_base;
+ if (req->bpid_per_chan)
+ bpid += chan_id;
+ if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return bpid;
+}
+
+int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ int blkaddr, pf, type, chan_id = 0;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ u16 chan_base, chan;
+ s16 bpid, bpid_base;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+
+ /* Enable backpressure only for CGX mapped PFs and LBK interface */
+ if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
+ return 0;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+
+ bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
+ chan_base = pfvf->rx_chan_base + req->chan_base;
+ bpid = bpid_base;
+
+ for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
+ if (bpid < 0) {
+ dev_warn(rvu->dev, "Fail to enable backpressure\n");
+ return -EINVAL;
+ }
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ cfg | (bpid & 0xFF) | BIT_ULL(16));
+ chan_id++;
+ bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
+ }
+
+ for (chan = 0; chan < req->chan_cnt; chan++) {
+ /* Map channel and bpid assign to it */
+ rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
+ (bpid_base & 0x3FF);
+ if (req->bpid_per_chan)
+ bpid_base++;
+ }
+ rsp->chan_cnt = req->chan_cnt;
+
+ return 0;
+}
+
static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
u64 format, bool v4, u64 *fidx)
{
@@ -545,6 +708,11 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
*/
inst.res_addr = (u64)aq->res->iova;
+ /* Hardware uses same aq->res->base for updating result of
+ * previous instruction hence wait here till it is done.
+ */
+ spin_lock(&aq->lock);
+
/* Clean result + context memory */
memset(aq->res->base, 0, aq->res->entry_sz);
/* Context needs to be written at RES_ADDR + 128 */
@@ -589,11 +757,10 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
break;
default:
rc = NIX_AF_ERR_AQ_ENQUEUE;
+ spin_unlock(&aq->lock);
return rc;
}
- spin_lock(&aq->lock);
-
/* Submit the instruction to AQ */
rc = nix_aq_enqueue_wait(rvu, block, &inst);
if (rc) {
@@ -698,6 +865,8 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
if (req->ctype == NIX_AQ_CTYPE_CQ) {
aq_req.cq.ena = 0;
aq_req.cq_mask.ena = 1;
+ aq_req.cq.bp_ena = 0;
+ aq_req.cq_mask.bp_ena = 1;
q_cnt = pfvf->cq_ctx->qsize;
bmap = pfvf->cq_bmap;
}
@@ -1667,13 +1836,9 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
req->num_regs > MAX_REGS_PER_MBOX_MSG)
return NIX_AF_INVAL_TXSCHQ_CFG;
- err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
if (err)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ return err;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
@@ -1767,17 +1932,12 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
struct nix_vtag_config *req,
struct msg_rsp *rsp)
{
- struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, nixlf, err;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
if (req->cfg_type) {
err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
@@ -2119,18 +2279,13 @@ static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
- int i, nixlf, blkaddr;
+ int i, nixlf, blkaddr, err;
u64 stats;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
/* Get stats count supported by HW */
stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
@@ -2418,18 +2573,14 @@ int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
struct nix_rss_flowkey_cfg *req,
struct nix_rss_flowkey_cfg_rsp *rsp)
{
- struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int alg_idx, nixlf, blkaddr;
struct nix_hw *nix_hw;
+ int err;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
@@ -2522,19 +2673,15 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req,
struct msg_rsp *rsp)
{
- struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
- int blkaddr, nixlf;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
ether_addr_copy(pfvf->mac_addr, req->mac_addr);
@@ -2567,19 +2714,15 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp)
{
bool allmulti = false, disable_promisc = false;
- struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
- int blkaddr, nixlf;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
if (req->mode & NIX_RX_MODE_PROMISC)
allmulti = false;
@@ -2794,22 +2937,12 @@ free_entry:
int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
struct msg_rsp *rsp)
{
- struct rvu_hwinfo *hw = rvu->hw;
- u16 pcifunc = req->hdr.pcifunc;
- struct rvu_block *block;
- struct rvu_pfvf *pfvf;
- int nixlf, blkaddr;
+ int nixlf, blkaddr, err;
u64 cfg;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- block = &hw->block[blkaddr];
- nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
/* Set the interface configuration */
@@ -3077,6 +3210,9 @@ int rvu_nix_init(struct rvu *rvu)
/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
nix_link_config(rvu, blkaddr);
+
+ /* Enable Channel backpressure */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
}
return 0;
}
@@ -3114,30 +3250,13 @@ void rvu_nix_freemem(struct rvu *rvu)
}
}
-int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
-{
- struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
- struct rvu_hwinfo *hw = rvu->hw;
- int blkaddr;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (*nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- return 0;
-}
-
int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
int nixlf, err;
- err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
return err;
@@ -3152,7 +3271,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
u16 pcifunc = req->hdr.pcifunc;
int nixlf, err;
- err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
return err;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 6e7c7f459f74..67471cb2b129 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -94,6 +94,11 @@ int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
*/
inst.res_addr = (u64)aq->res->iova;
+ /* Hardware uses same aq->res->base for updating result of
+ * previous instruction hence wait here till it is done.
+ */
+ spin_lock(&aq->lock);
+
/* Clean result + context memory */
memset(aq->res->base, 0, aq->res->entry_sz);
/* Context needs to be written at RES_ADDR + 128 */
@@ -138,10 +143,10 @@ int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
break;
}
- if (rc)
+ if (rc) {
+ spin_unlock(&aq->lock);
return rc;
-
- spin_lock(&aq->lock);
+ }
/* Submit the instruction to AQ */
rc = npa_aq_enqueue_wait(rvu, block, &inst);
@@ -218,6 +223,8 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
} else if (req->ctype == NPA_AQ_CTYPE_AURA) {
aq_req.aura.ena = 0;
aq_req.aura_mask.ena = 1;
+ aq_req.aura.bp_ena = 0;
+ aq_req.aura_mask.bp_ena = 1;
cnt = pfvf->aura_ctx->qsize;
bmap = pfvf->aura_bmap;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 40e431debbe9..0a214084406a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -825,8 +825,10 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN))
goto load_default;
- if (cgx_get_mkex_prfl_info(&prfl_addr, &prfl_sz))
+ if (!rvu->fwdata)
goto load_default;
+ prfl_addr = rvu->fwdata->mcam_addr;
+ prfl_sz = rvu->fwdata->mcam_sz;
if (!prfl_addr || !prfl_sz)
goto load_default;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 9d8942acc232..a3ecb5de9000 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -11,6 +11,9 @@
#ifndef RVU_STRUCT_H
#define RVU_STRUCT_H
+/* RVU Block revision IDs */
+#define RVU_BLK_RVUM_REVID 0x01
+
/* RVU Block Address Enumeration */
enum rvu_block_addr_e {
BLKADDR_RVUM = 0x0ULL,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 41bf00cf5b1d..778df331c8ac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -4,7 +4,9 @@
#
obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
+obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o
+octeontx2_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index b945bd3d5d88..f1d2dea90a8c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -49,15 +49,15 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf)
if (!netif_running(pfvf->netdev))
return;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox);
if (!req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return;
}
otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
}
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
@@ -128,6 +128,7 @@ void otx2_get_stats64(struct net_device *netdev,
stats->tx_packets = dev_stats->tx_frames;
stats->tx_dropped = dev_stats->tx_drops;
}
+EXPORT_SYMBOL(otx2_get_stats64);
/* Sync MAC address with RVU AF */
static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
@@ -135,17 +136,17 @@ static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
struct nix_set_mac_addr *req;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox);
if (!req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
ether_addr_copy(req->mac_addr, mac);
err = otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -157,27 +158,27 @@ static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
struct msg_req *req;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox);
if (!req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
if (IS_ERR(msghdr)) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return PTR_ERR(msghdr);
}
rsp = (struct nix_get_mac_addr_rsp *)msghdr;
ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return 0;
}
@@ -197,26 +198,50 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
return 0;
}
+EXPORT_SYMBOL(otx2_set_mac_address);
int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
{
struct nix_frs_cfg *req;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
if (!req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
- /* SMQ config limits maximum pkt size that can be transmitted */
- req->update_smq = true;
pfvf->max_frs = mtu + OTX2_ETH_HLEN;
req->maxlen = pfvf->max_frs;
err = otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_config_pause_frm(struct otx2_nic *pfvf)
+{
+ struct cgx_pause_frm_cfg *req;
+ int err;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED);
+ req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED);
+ req->set = 1;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+unlock:
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -226,10 +251,10 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
struct nix_rss_flowkey_cfg *req;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox);
if (!req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
req->mcam_index = -1; /* Default or reserved index */
@@ -237,7 +262,7 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
req->group = DEFAULT_RSS_CONTEXT_GROUP;
err = otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -248,7 +273,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
struct nix_aq_enq_req *aq;
int idx, err;
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
/* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) {
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
@@ -258,12 +283,12 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
*/
err = otx2_sync_mbox_msg(mbox);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return err;
}
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
if (!aq) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return -ENOMEM;
}
}
@@ -276,7 +301,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
aq->op = NIX_AQ_INSTOP_INIT;
}
err = otx2_sync_mbox_msg(mbox);
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return err;
}
@@ -394,6 +419,7 @@ void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
schedule_work(&pfvf->reset_task);
}
+EXPORT_SYMBOL(otx2_tx_timeout);
void otx2_get_mac_from_af(struct net_device *netdev)
{
@@ -408,6 +434,7 @@ void otx2_get_mac_from_af(struct net_device *netdev)
if (!is_valid_ether_addr(netdev->dev_addr))
eth_hw_addr_random(netdev);
}
+EXPORT_SYMBOL(otx2_get_mac_from_af);
static int otx2_get_link(struct otx2_nic *pfvf)
{
@@ -443,7 +470,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
- req->regval[0] = ((pfvf->netdev->mtu + OTX2_ETH_HLEN) << 8) |
+ req->regval[0] = ((OTX2_MAX_MTU + OTX2_ETH_HLEN) << 8) |
OTX2_MIN_MTU;
req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
@@ -529,17 +556,17 @@ int otx2_txschq_stop(struct otx2_nic *pfvf)
struct nix_txsch_free_req *free_req;
int lvl, schq, err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
/* Free the transmit schedulers */
free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
if (!free_req) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
free_req->flags = TXSCHQ_FREE_ALL;
err = otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
/* Clear the txschq list */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
@@ -553,17 +580,19 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
{
int qidx, sqe_tail, sqe_head;
u64 incr, *ptr, val;
+ int timeout = 1000;
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
incr = (u64)qidx << 32;
- while (1) {
+ while (timeout) {
val = otx2_atomic64_add(incr, ptr);
sqe_head = (val >> 20) & 0x3F;
sqe_tail = (val >> 28) & 0x3F;
if (sqe_head == sqe_tail)
break;
usleep_range(1, 3);
+ timeout--;
}
}
}
@@ -580,6 +609,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
* RED accepts pkts if free pointers > 102 & <= 205.
* Drops pkts if free pointers < 102.
*/
+#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
@@ -741,6 +771,13 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
if (qidx < pfvf->hw.rx_queues) {
aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
aq->cq.drop_ena = 1;
+
+ /* Enable receive CQ backpressure */
+ aq->cq.bp_ena = 1;
+ aq->cq.bpid = pfvf->bpid[0];
+
+ /* Set backpressure level is same as cq pass level */
+ aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
}
/* Fill AQ info */
@@ -951,6 +988,7 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
qmem_free(pfvf->dev, pool->fc_addr);
}
devm_kfree(pfvf->dev, pfvf->qset.pool);
+ pfvf->qset.pool = NULL;
}
static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
@@ -996,6 +1034,14 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
aq->aura.fc_addr = pool->fc_addr->iova;
aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
+ /* Enable backpressure for RQ aura */
+ if (aura_id < pfvf->hw.rqpool_cnt) {
+ aq->aura.bp_ena = 0;
+ aq->aura.nix0_bpid = pfvf->bpid[0];
+ /* Set backpressure level for RQ's Aura */
+ aq->aura.bp = RQ_BP_LVL_AURA;
+ }
+
/* Fill AQ info */
aq->ctype = NPA_AQ_CTYPE_AURA;
aq->op = NPA_AQ_INSTOP_INIT;
@@ -1210,10 +1256,10 @@ int otx2_detach_resources(struct mbox *mbox)
{
struct rsrc_detach *detach;
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
detach = otx2_mbox_alloc_msg_detach_resources(mbox);
if (!detach) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return -ENOMEM;
}
@@ -1222,9 +1268,10 @@ int otx2_detach_resources(struct mbox *mbox)
/* Send detach request to AF */
otx2_mbox_msg_send(&mbox->mbox, 0);
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return 0;
}
+EXPORT_SYMBOL(otx2_detach_resources);
int otx2_attach_npa_nix(struct otx2_nic *pfvf)
{
@@ -1232,11 +1279,11 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
struct msg_req *msix;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
/* Get memory to put this msg */
attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox);
if (!attach) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
@@ -1246,7 +1293,7 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
/* Send attach request to AF */
err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -1261,16 +1308,16 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
/* Get NPA and NIX MSIX vector offsets */
msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox);
if (!msix) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID ||
pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) {
@@ -1281,12 +1328,13 @@ int otx2_attach_npa_nix(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_attach_npa_nix);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
{
struct hwctx_disable_req *req;
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
/* Request AQ to disable this context */
if (npa)
req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox);
@@ -1294,7 +1342,7 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox);
if (!req) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return;
}
@@ -1304,7 +1352,26 @@ void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
__func__);
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
+}
+
+int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
+{
+ struct nix_bp_cfg_req *req;
+
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->chan_base = 0;
+ req->chan_cnt = 1;
+ req->bpid_per_chan = 0;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
}
/* Mbox message handlers */
@@ -1330,6 +1397,7 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
pf->hw.txschq_list[lvl][schq] =
rsp->schq_list[lvl][schq];
}
+EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
struct npa_lf_alloc_rsp *rsp)
@@ -1337,6 +1405,7 @@ void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs;
pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes;
}
+EXPORT_SYMBOL(mbox_handler_npa_lf_alloc);
void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
struct nix_lf_alloc_rsp *rsp)
@@ -1347,6 +1416,7 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
}
+EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);
void mbox_handler_msix_offset(struct otx2_nic *pfvf,
struct msix_offset_rsp *rsp)
@@ -1354,6 +1424,19 @@ void mbox_handler_msix_offset(struct otx2_nic *pfvf,
pfvf->hw.npa_msixoff = rsp->npa_msixoff;
pfvf->hw.nix_msixoff = rsp->nix_msixoff;
}
+EXPORT_SYMBOL(mbox_handler_msix_offset);
+
+void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ int chan, chan_id;
+
+ for (chan = 0; chan < rsp->chan_cnt; chan++) {
+ chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F);
+ pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF;
+ }
+}
+EXPORT_SYMBOL(mbox_handler_nix_bp_enable);
void otx2_free_cints(struct otx2_nic *pfvf, int n)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 320f3b7bf57f..018c283a0ac4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -20,6 +20,8 @@
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
+#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
+#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
@@ -191,6 +193,17 @@ struct otx2_hw {
u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
};
+struct otx2_vf_config {
+ struct otx2_nic *pf;
+ struct delayed_work link_event_work;
+ bool intf_down; /* interface was either configured or not */
+};
+
+struct flr_work {
+ struct work_struct work;
+ struct otx2_nic *pf;
+};
+
struct refill_work {
struct delayed_work pool_refill_work;
struct otx2_nic *pf;
@@ -204,6 +217,8 @@ struct otx2_nic {
u16 rbsize; /* Receive buffer size */
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
+#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
+#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
u64 flags;
struct otx2_qset qset;
@@ -213,14 +228,23 @@ struct otx2_nic {
/* Mbox */
struct mbox mbox;
+ struct mbox *mbox_pfvf;
struct workqueue_struct *mbox_wq;
+ struct workqueue_struct *mbox_pfvf_wq;
+ u8 total_vfs;
u16 pcifunc; /* RVU PF_FUNC */
+ u16 bpid[NIX_MAX_BPID_CHAN];
+ struct otx2_vf_config *vf_configs;
struct cgx_link_user_info linfo;
u64 reset_count;
struct work_struct reset_task;
+ struct workqueue_struct *flr_wq;
+ struct flr_work *flr_wrk;
struct refill_work *refill_wrk;
+ struct workqueue_struct *otx2_wq;
+ struct work_struct rx_mode_work;
/* Ethtool stuff */
u32 msg_enable;
@@ -229,6 +253,11 @@ struct otx2_nic {
int nix_blkaddr;
};
+static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
+{
+ return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
+}
+
static inline bool is_96xx_A0(struct pci_dev *pdev)
{
return (pdev->revision == 0x00) &&
@@ -348,21 +377,6 @@ static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
hw_mbase + mbox->rx_start, msg_size + msgs_offset);
}
-static inline void otx2_mbox_lock_init(struct mbox *mbox)
-{
- mutex_init(&mbox->lock);
-}
-
-static inline void otx2_mbox_lock(struct mbox *mbox)
-{
- mutex_lock(&mbox->lock);
-}
-
-static inline void otx2_mbox_unlock(struct mbox *mbox)
-{
- mutex_unlock(&mbox->lock);
-}
-
/* With the absence of API for 128-bit IO memory access for arm64,
* implement required operations at place.
*/
@@ -558,6 +572,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
+int otx2_config_pause_frm(struct otx2_nic *pfvf);
/* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf);
@@ -578,6 +593,7 @@ dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
gfp_t gfp);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
+int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
@@ -598,6 +614,8 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
struct nix_txsch_alloc_rsp *rsp);
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp);
+void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
+ struct nix_bp_cfg_rsp *rsp);
/* Device stats APIs */
void otx2_get_dev_stats(struct otx2_nic *pfvf);
@@ -607,6 +625,7 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf);
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
void otx2_set_ethtool_ops(struct net_device *netdev);
+void otx2vf_set_ethtool_ops(struct net_device *netdev);
int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 60fcf82dd8cb..d59f5a9c7273 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -17,6 +17,7 @@
#include "otx2_common.h"
#define DRV_NAME "octeontx2-nicpf"
+#define DRV_VF_NAME "octeontx2-nicvf"
struct otx2_stat {
char name[ETH_GSTRING_LEN];
@@ -63,16 +64,6 @@ static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
-static void otx2_dev_open(struct net_device *netdev)
-{
- otx2_open(netdev);
-}
-
-static void otx2_dev_stop(struct net_device *netdev)
-{
- otx2_stop(netdev);
-}
-
static void otx2_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -232,7 +223,7 @@ static int otx2_set_channels(struct net_device *dev,
return -EINVAL;
if (if_up)
- otx2_dev_stop(dev);
+ dev->netdev_ops->ndo_stop(dev);
err = otx2_set_real_num_queues(dev, channel->tx_count,
channel->rx_count);
@@ -245,7 +236,7 @@ static int otx2_set_channels(struct net_device *dev,
fail:
if (if_up)
- otx2_dev_open(dev);
+ dev->netdev_ops->ndo_open(dev);
netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
pfvf->hw.tx_queues, pfvf->hw.rx_queues);
@@ -253,6 +244,51 @@ fail:
return err;
}
+static void otx2_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_pause_frm_cfg *req, *rsp;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return;
+
+ req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
+ if (!req)
+ return;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct cgx_pause_frm_cfg *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ pause->rx_pause = rsp->rx_pause;
+ pause->tx_pause = rsp->tx_pause;
+ }
+}
+
+static int otx2_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return -EOPNOTSUPP;
+
+ if (pause->rx_pause)
+ pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+
+ if (pause->tx_pause)
+ pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+
+ return otx2_config_pause_frm(pfvf);
+}
+
static void otx2_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
@@ -297,14 +333,15 @@ static int otx2_set_ringparam(struct net_device *netdev,
return 0;
if (if_up)
- otx2_dev_stop(netdev);
+ netdev->netdev_ops->ndo_stop(netdev);
/* Assigned to the nearest possible exponent. */
qs->sqe_cnt = tx_count;
qs->rqe_cnt = rx_count;
if (if_up)
- otx2_dev_open(netdev);
+ netdev->netdev_ops->ndo_open(netdev);
+
return 0;
}
@@ -329,17 +366,6 @@ static int otx2_set_coalesce(struct net_device *netdev,
struct otx2_hw *hw = &pfvf->hw;
int qidx;
- if (ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce ||
- ec->rx_coalesce_usecs_irq || ec->rx_max_coalesced_frames_irq ||
- ec->tx_coalesce_usecs_irq || ec->tx_max_coalesced_frames_irq ||
- ec->stats_block_coalesce_usecs || ec->pkt_rate_low ||
- ec->rx_coalesce_usecs_low || ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low || ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high || ec->rx_coalesce_usecs_high ||
- ec->rx_max_coalesced_frames_high || ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high || ec->rate_sample_interval)
- return -EOPNOTSUPP;
-
if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames)
return 0;
@@ -631,10 +657,15 @@ static u32 otx2_get_link(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
+ /* LBK link is internal and always UP */
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return 1;
return pfvf->linfo.link_up;
}
static const struct ethtool_ops otx2_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_link = otx2_get_link,
.get_drvinfo = otx2_get_drvinfo,
.get_strings = otx2_get_strings,
@@ -654,9 +685,110 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.set_rxfh = otx2_set_rxfh,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
+ .get_pauseparam = otx2_get_pauseparam,
+ .set_pauseparam = otx2_set_pauseparam,
};
void otx2_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &otx2_ethtool_ops;
}
+
+/* VF's ethtool APIs */
+static void otx2vf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+
+ strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
+ strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
+}
+
+static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+ int stats;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (stats = 0; stats < otx2_n_dev_stats; stats++) {
+ memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < otx2_n_drv_stats; stats++) {
+ memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ otx2_get_qset_strings(vf, &data, 0);
+
+ strcpy(data, "reset_count");
+ data += ETH_GSTRING_LEN;
+}
+
+static void otx2vf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+ int stat;
+
+ otx2_get_dev_stats(vf);
+ for (stat = 0; stat < otx2_n_dev_stats; stat++)
+ *(data++) = ((u64 *)&vf->hw.dev_stats)
+ [otx2_dev_stats[stat].index];
+
+ for (stat = 0; stat < otx2_n_drv_stats; stat++)
+ *(data++) = atomic_read(&((atomic_t *)&vf->hw.drv_stats)
+ [otx2_drv_stats[stat].index]);
+
+ otx2_get_qset_stats(vf, stats, &data);
+ *(data++) = vf->reset_count;
+}
+
+static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+ int qstats_count;
+
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ qstats_count = otx2_n_queue_stats *
+ (vf->hw.rx_queues + vf->hw.tx_queues);
+
+ return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
+}
+
+static const struct ethtool_ops otx2vf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .get_link = otx2_get_link,
+ .get_drvinfo = otx2vf_get_drvinfo,
+ .get_strings = otx2vf_get_strings,
+ .get_ethtool_stats = otx2vf_get_ethtool_stats,
+ .get_sset_count = otx2vf_get_sset_count,
+ .set_channels = otx2_set_channels,
+ .get_channels = otx2_get_channels,
+ .get_rxnfc = otx2_get_rxnfc,
+ .set_rxnfc = otx2_set_rxnfc,
+ .get_rxfh_key_size = otx2_get_rxfh_key_size,
+ .get_rxfh_indir_size = otx2_get_rxfh_indir_size,
+ .get_rxfh = otx2_get_rxfh,
+ .set_rxfh = otx2_set_rxfh,
+ .get_ringparam = otx2_get_ringparam,
+ .set_ringparam = otx2_set_ringparam,
+ .get_coalesce = otx2_get_coalesce,
+ .set_coalesce = otx2_set_coalesce,
+ .get_msglevel = otx2_get_msglevel,
+ .set_msglevel = otx2_set_msglevel,
+ .get_pauseparam = otx2_get_pauseparam,
+ .set_pauseparam = otx2_set_pauseparam,
+};
+
+void otx2vf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &otx2vf_ethtool_ops;
+}
+EXPORT_SYMBOL(otx2vf_set_ethtool_ops);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 85f9b9ba6bd5..411e5ea1031e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -24,7 +24,6 @@
#define DRV_NAME "octeontx2-nicpf"
#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
-#define DRV_VERSION "1.0"
/* Supported devices */
static const struct pci_device_id otx2_pf_id_table[] = {
@@ -32,10 +31,9 @@ static const struct pci_device_id otx2_pf_id_table[] = {
{ 0, } /* end of table */
};
-MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
enum {
@@ -61,6 +59,224 @@ static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
return err;
}
+static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
+{
+ int irq, vfs = pf->total_vfs;
+
+ /* Disable VFs ME interrupts */
+ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
+ free_irq(irq, pf);
+
+ /* Disable VFs FLR interrupts */
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
+ free_irq(irq, pf);
+
+ if (vfs <= 64)
+ return;
+
+ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
+ free_irq(irq, pf);
+
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
+ free_irq(irq, pf);
+}
+
+static void otx2_flr_wq_destroy(struct otx2_nic *pf)
+{
+ if (!pf->flr_wq)
+ return;
+ destroy_workqueue(pf->flr_wq);
+ pf->flr_wq = NULL;
+ devm_kfree(pf->dev, pf->flr_wrk);
+}
+
+static void otx2_flr_handler(struct work_struct *work)
+{
+ struct flr_work *flrwork = container_of(work, struct flr_work, work);
+ struct otx2_nic *pf = flrwork->pf;
+ struct mbox *mbox = &pf->mbox;
+ struct msg_req *req;
+ int vf, reg = 0;
+
+ vf = flrwork - pf->flr_wrk;
+
+ mutex_lock(&mbox->lock);
+ req = otx2_mbox_alloc_msg_vf_flr(mbox);
+ if (!req) {
+ mutex_unlock(&mbox->lock);
+ return;
+ }
+ req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
+ req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
+
+ if (!otx2_sync_mbox_msg(&pf->mbox)) {
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+ /* clear transcation pending bit */
+ otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+ }
+
+ mutex_unlock(&mbox->lock);
+}
+
+static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
+ int reg, dev, vf, start_vf, num_reg = 1;
+ u64 intr;
+
+ if (pf->total_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
+ if (!intr)
+ continue;
+ start_vf = 64 * reg;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ dev = vf + start_vf;
+ queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
+ /* Clear interrupt */
+ otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
+ /* Disable the interrupt */
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
+ BIT_ULL(vf));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
+ int vf, reg, num_reg = 1;
+ u64 intr;
+
+ if (pf->total_vfs > 64)
+ num_reg = 2;
+
+ for (reg = 0; reg < num_reg; reg++) {
+ intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
+ if (!intr)
+ continue;
+ for (vf = 0; vf < 64; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ /* clear trpend bit */
+ otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ /* clear interrupt */
+ otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
+{
+ struct otx2_hw *hw = &pf->hw;
+ char *irq_name;
+ int ret;
+
+ /* Register ME interrupt handler*/
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
+ ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
+ otx2_pf_me_intr_handler, 0, irq_name, pf);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for ME0\n");
+ }
+
+ /* Register FLR interrupt handler */
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
+ ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
+ otx2_pf_flr_intr_handler, 0, irq_name, pf);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for FLR0\n");
+ return ret;
+ }
+
+ if (numvfs > 64) {
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
+ rvu_get_pf(pf->pcifunc));
+ ret = request_irq(pci_irq_vector
+ (pf->pdev, RVU_PF_INT_VEC_VFME1),
+ otx2_pf_me_intr_handler, 0, irq_name, pf);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for ME1\n");
+ }
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
+ rvu_get_pf(pf->pcifunc));
+ ret = request_irq(pci_irq_vector
+ (pf->pdev, RVU_PF_INT_VEC_VFFLR1),
+ otx2_pf_flr_intr_handler, 0, irq_name, pf);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for FLR1\n");
+ return ret;
+ }
+ }
+
+ /* Enable ME interrupt for all VFs*/
+ otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+
+ /* Enable FLR interrupt for all VFs*/
+ otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+
+ if (numvfs > 64) {
+ numvfs -= 64;
+
+ otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+
+ otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+ }
+ return 0;
+}
+
+static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
+{
+ int vf;
+
+ pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq",
+ WQ_UNBOUND | WQ_HIGHPRI, 1);
+ if (!pf->flr_wq)
+ return -ENOMEM;
+
+ pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
+ sizeof(struct flr_work), GFP_KERNEL);
+ if (!pf->flr_wrk) {
+ destroy_workqueue(pf->flr_wq);
+ return -ENOMEM;
+ }
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ pf->flr_wrk[vf].pf = pf;
+ INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
+ }
+
+ return 0;
+}
+
static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
int first, int mdevs, u64 intr, int type)
{
@@ -115,9 +331,391 @@ static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
}
}
+static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
+ struct otx2_mbox *pfvf_mbox, void *bbuf_base,
+ int devid)
+{
+ struct otx2_mbox_dev *src_mdev = mdev;
+ int offset;
+
+ /* Msgs are already copied, trigger VF's mbox irq */
+ smp_wmb();
+
+ offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
+ writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
+
+ /* Restore VF's mbox bounce buffer region address */
+ src_mdev->mbase = bbuf_base;
+}
+
+static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
+ struct otx2_mbox *src_mbox,
+ int dir, int vf, int num_msgs)
+{
+ struct otx2_mbox_dev *src_mdev, *dst_mdev;
+ struct mbox_hdr *mbox_hdr;
+ struct mbox_hdr *req_hdr;
+ struct mbox *dst_mbox;
+ int dst_size, err;
+
+ if (dir == MBOX_DIR_PFAF) {
+ /* Set VF's mailbox memory as PF's bounce buffer memory, so
+ * that explicit copying of VF's msgs to PF=>AF mbox region
+ * and AF=>PF responses to VF's mbox region can be avoided.
+ */
+ src_mdev = &src_mbox->dev[vf];
+ mbox_hdr = src_mbox->hwbase +
+ src_mbox->rx_start + (vf * MBOX_SIZE);
+
+ dst_mbox = &pf->mbox;
+ dst_size = dst_mbox->mbox.tx_size -
+ ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
+ /* Check if msgs fit into destination area */
+ if (mbox_hdr->msg_size > dst_size)
+ return -EINVAL;
+
+ dst_mdev = &dst_mbox->mbox.dev[0];
+
+ mutex_lock(&pf->mbox.lock);
+ dst_mdev->mbase = src_mdev->mbase;
+ dst_mdev->msg_size = mbox_hdr->msg_size;
+ dst_mdev->num_msgs = num_msgs;
+ err = otx2_sync_mbox_msg(dst_mbox);
+ if (err) {
+ dev_warn(pf->dev,
+ "AF not responding to VF%d messages\n", vf);
+ /* restore PF mbase and exit */
+ dst_mdev->mbase = pf->mbox.bbuf_base;
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+ }
+ /* At this point, all the VF messages sent to AF are acked
+ * with proper responses and responses are copied to VF
+ * mailbox hence raise interrupt to VF.
+ */
+ req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
+ dst_mbox->mbox.rx_start);
+ req_hdr->num_msgs = num_msgs;
+
+ otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
+ pf->mbox.bbuf_base, vf);
+ mutex_unlock(&pf->mbox.lock);
+ } else if (dir == MBOX_DIR_PFVF_UP) {
+ src_mdev = &src_mbox->dev[0];
+ mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
+ req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
+ src_mbox->rx_start);
+ req_hdr->num_msgs = num_msgs;
+
+ dst_mbox = &pf->mbox_pfvf[0];
+ dst_size = dst_mbox->mbox_up.tx_size -
+ ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
+ /* Check if msgs fit into destination area */
+ if (mbox_hdr->msg_size > dst_size)
+ return -EINVAL;
+
+ dst_mdev = &dst_mbox->mbox_up.dev[vf];
+ dst_mdev->mbase = src_mdev->mbase;
+ dst_mdev->msg_size = mbox_hdr->msg_size;
+ dst_mdev->num_msgs = mbox_hdr->num_msgs;
+ err = otx2_sync_mbox_up_msg(dst_mbox, vf);
+ if (err) {
+ dev_warn(pf->dev,
+ "VF%d is not responding to mailbox\n", vf);
+ return err;
+ }
+ } else if (dir == MBOX_DIR_VFPF_UP) {
+ req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
+ src_mbox->rx_start);
+ req_hdr->num_msgs = num_msgs;
+ otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
+ &pf->mbox.mbox_up,
+ pf->mbox_pfvf[vf].bbuf_base,
+ 0);
+ }
+
+ return 0;
+}
+
+static void otx2_pfvf_mbox_handler(struct work_struct *work)
+{
+ struct mbox_msghdr *msg = NULL;
+ int offset, vf_idx, id, err;
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *req_hdr;
+ struct otx2_mbox *mbox;
+ struct mbox *vf_mbox;
+ struct otx2_nic *pf;
+
+ vf_mbox = container_of(work, struct mbox, mbox_wrk);
+ pf = vf_mbox->pfvf;
+ vf_idx = vf_mbox - pf->mbox_pfvf;
+
+ mbox = &pf->mbox_pfvf[0].mbox;
+ mdev = &mbox->dev[vf_idx];
+ req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+
+ offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < vf_mbox->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
+ offset);
+
+ if (msg->sig != OTX2_MBOX_REQ_SIG)
+ goto inval_msg;
+
+ /* Set VF's number in each of the msg */
+ msg->pcifunc &= RVU_PFVF_FUNC_MASK;
+ msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
+ offset = msg->next_msgoff;
+ }
+ err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
+ vf_mbox->num_msgs);
+ if (err)
+ goto inval_msg;
+ return;
+
+inval_msg:
+ otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
+ otx2_mbox_msg_send(mbox, vf_idx);
+}
+
+static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
+{
+ struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
+ struct otx2_nic *pf = vf_mbox->pfvf;
+ struct otx2_mbox_dev *mdev;
+ int offset, id, vf_idx = 0;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+
+ vf_idx = vf_mbox - pf->mbox_pfvf;
+ mbox = &pf->mbox_pfvf[0].mbox_up;
+ mdev = &mbox->dev[vf_idx];
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < vf_mbox->up_num_msgs; id++) {
+ msg = mdev->mbase + offset;
+
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(pf->dev,
+ "Mbox msg with unknown ID 0x%x\n", msg->id);
+ goto end;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(pf->dev,
+ "Mbox msg with wrong signature %x, ID 0x%x\n",
+ msg->sig, msg->id);
+ goto end;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_CGX_LINK_EVENT:
+ break;
+ default:
+ if (msg->rc)
+ dev_err(pf->dev,
+ "Mbox msg response has err %d, ID 0x%x\n",
+ msg->rc, msg->id);
+ break;
+ }
+
+end:
+ offset = mbox->rx_start + msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+
+ otx2_mbox_reset(mbox, vf_idx);
+}
+
+static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
+ int vfs = pf->total_vfs;
+ struct mbox *mbox;
+ u64 intr;
+
+ mbox = pf->mbox_pfvf;
+ /* Handle VF interrupts */
+ if (vfs > 64) {
+ intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
+ otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
+ TYPE_PFVF);
+ vfs -= 64;
+ }
+
+ intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
+
+ otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
+
+ return IRQ_HANDLED;
+}
+
+static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
+{
+ void __iomem *hwbase;
+ struct mbox *mbox;
+ int err, vf;
+ u64 base;
+
+ if (!numvfs)
+ return -EINVAL;
+
+ pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
+ sizeof(struct mbox), GFP_KERNEL);
+ if (!pf->mbox_pfvf)
+ return -ENOMEM;
+
+ pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!pf->mbox_pfvf_wq)
+ return -ENOMEM;
+
+ base = readq((void __iomem *)((u64)pf->reg_base + RVU_PF_VF_BAR4_ADDR));
+ hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
+
+ if (!hwbase) {
+ err = -ENOMEM;
+ goto free_wq;
+ }
+
+ mbox = &pf->mbox_pfvf[0];
+ err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
+ MBOX_DIR_PFVF, numvfs);
+ if (err)
+ goto free_iomem;
+
+ err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
+ MBOX_DIR_PFVF_UP, numvfs);
+ if (err)
+ goto free_iomem;
+
+ for (vf = 0; vf < numvfs; vf++) {
+ mbox->pfvf = pf;
+ INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
+ INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
+ mbox++;
+ }
+
+ return 0;
+
+free_iomem:
+ if (hwbase)
+ iounmap(hwbase);
+free_wq:
+ destroy_workqueue(pf->mbox_pfvf_wq);
+ return err;
+}
+
+static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox_pfvf[0];
+
+ if (!mbox)
+ return;
+
+ if (pf->mbox_pfvf_wq) {
+ destroy_workqueue(pf->mbox_pfvf_wq);
+ pf->mbox_pfvf_wq = NULL;
+ }
+
+ if (mbox->mbox.hwbase)
+ iounmap(mbox->mbox.hwbase);
+
+ otx2_mbox_destroy(&mbox->mbox);
+}
+
+static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ /* Clear PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
+
+ /* Enable PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+ if (numvfs > 64) {
+ numvfs -= 64;
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+ }
+}
+
+static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ int vector;
+
+ /* Disable PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
+
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
+ vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
+ free_irq(vector, pf);
+
+ if (numvfs > 64) {
+ otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
+ vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
+ free_irq(vector, pf);
+ }
+}
+
+static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ struct otx2_hw *hw = &pf->hw;
+ char *irq_name;
+ int err;
+
+ /* Register MBOX0 interrupt handler */
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
+ if (pf->pcifunc)
+ snprintf(irq_name, NAME_SIZE,
+ "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
+ else
+ snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
+ err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
+ otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
+ return err;
+ }
+
+ if (numvfs > 64) {
+ /* Register MBOX1 interrupt handler */
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
+ if (pf->pcifunc)
+ snprintf(irq_name, NAME_SIZE,
+ "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
+ else
+ snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
+ err = request_irq(pci_irq_vector(pf->pdev,
+ RVU_PF_INT_VEC_VFPF_MBOX1),
+ otx2_pfvf_mbox_intr_handler,
+ 0, irq_name, pf);
+ if (err) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
+ return err;
+ }
+ }
+
+ otx2_enable_pfvf_mbox_intr(pf, numvfs);
+
+ return 0;
+}
+
static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
struct mbox_msghdr *msg)
{
+ int devid;
+
if (msg->id >= MBOX_MSG_MAX) {
dev_err(pf->dev,
"Mbox msg with unknown ID 0x%x\n", msg->id);
@@ -131,6 +729,26 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
return;
}
+ /* message response heading VF */
+ devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
+ if (devid) {
+ struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
+ struct delayed_work *dwork;
+
+ switch (msg->id) {
+ case MBOX_MSG_NIX_LF_START_RX:
+ config->intf_down = false;
+ dwork = &config->link_event_work;
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ break;
+ case MBOX_MSG_NIX_LF_STOP_RX:
+ config->intf_down = true;
+ break;
+ }
+
+ return;
+ }
+
switch (msg->id) {
case MBOX_MSG_READY:
pf->pcifunc = msg->pcifunc;
@@ -148,6 +766,9 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
mbox_handler_nix_txsch_alloc(pf,
(struct nix_txsch_alloc_rsp *)msg);
break;
+ case MBOX_MSG_NIX_BP_ENABLE:
+ mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
+ break;
case MBOX_MSG_CGX_STATS:
mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
break;
@@ -209,9 +830,22 @@ int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
struct cgx_link_info_msg *msg,
struct msg_rsp *rsp)
{
+ int i;
+
/* Copy the link info sent by AF */
pf->linfo = msg->link_info;
+ /* notify VFs about link event */
+ for (i = 0; i < pci_num_vf(pf->pdev); i++) {
+ struct otx2_vf_config *config = &pf->vf_configs[i];
+ struct delayed_work *dwork = &config->link_event_work;
+
+ if (config->intf_down)
+ continue;
+
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ }
+
/* interface has not been fully configured yet */
if (pf->flags & OTX2_FLAG_INTF_DOWN)
return 0;
@@ -283,6 +917,12 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
otx2_process_mbox_msg_up(pf, msg);
offset = mbox->rx_start + msg->next_msgoff;
}
+ if (devid) {
+ otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
+ MBOX_DIR_PFVF_UP, devid - 1,
+ af_mbox->up_num_msgs);
+ return;
+ }
otx2_mbox_msg_send(mbox, 0);
}
@@ -359,7 +999,6 @@ static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
struct mbox *mbox = &pf->mbox;
if (pf->mbox_wq) {
- flush_workqueue(pf->mbox_wq);
destroy_workqueue(pf->mbox_wq);
pf->mbox_wq = NULL;
}
@@ -412,7 +1051,7 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
- otx2_mbox_lock_init(&pf->mbox);
+ mutex_init(&mbox->lock);
return 0;
exit:
@@ -425,19 +1064,19 @@ static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
- otx2_mbox_lock(&pf->mbox);
+ mutex_lock(&pf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
else
msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
if (!msg) {
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pf->mbox);
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
return err;
}
@@ -446,19 +1085,19 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
- otx2_mbox_lock(&pf->mbox);
+ mutex_lock(&pf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
else
msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
if (!msg) {
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pf->mbox);
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
return err;
}
@@ -480,6 +1119,7 @@ int otx2_set_real_num_queues(struct net_device *netdev,
"Failed to set no of Rx queues: %d\n", rx_queues);
return err;
}
+EXPORT_SYMBOL(otx2_set_real_num_queues);
static irqreturn_t otx2_q_intr_handler(int irq, void *data)
{
@@ -643,7 +1283,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
/* Get the size of receive buffers to allocate */
pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN);
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
/* NPA init */
err = otx2_config_npa(pf);
if (err)
@@ -654,38 +1294,41 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
if (err)
goto err_free_npa_lf;
+ /* Enable backpressure */
+ otx2_nix_config_bp(pf, true);
+
/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
err = otx2_rq_aura_pool_init(pf);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
goto err_free_nix_lf;
}
/* Init Auras and pools used by NIX SQ, for queueing SQEs */
err = otx2_sq_aura_pool_init(pf);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
goto err_free_rq_ptrs;
}
err = otx2_txsch_alloc(pf);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
goto err_free_sq_ptrs;
}
err = otx2_config_nix_queues(pf);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
goto err_free_txsch;
}
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
err = otx2_txschq_config(pf, lvl);
if (err) {
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
goto err_free_nix_queues;
}
}
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return err;
err_free_nix_queues:
@@ -703,7 +1346,7 @@ err_free_rq_ptrs:
otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
otx2_aura_pool_free(pf);
err_free_nix_lf:
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
if (req) {
if (otx2_sync_mbox_msg(mbox))
@@ -717,7 +1360,7 @@ err_free_npa_lf:
dev_err(pf->dev, "%s failed to free npalf\n", __func__);
}
exit:
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
return err;
}
@@ -737,6 +1380,12 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
if (err)
dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
+ mutex_lock(&mbox->lock);
+ /* Disable backpressure */
+ if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
+ otx2_nix_config_bp(pf, false);
+ mutex_unlock(&mbox->lock);
+
/* Disable RQs */
otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
@@ -756,28 +1405,28 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_free_cq_res(pf);
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
/* Reset NIX LF */
req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
if (req) {
if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
/* Disable NPA Pool and Aura hw context */
otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
otx2_aura_pool_free(pf);
- otx2_mbox_lock(mbox);
+ mutex_lock(&mbox->lock);
/* Reset NPA LF */
req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
if (req) {
if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free npalf\n", __func__);
}
- otx2_mbox_unlock(mbox);
+ mutex_unlock(&mbox->lock);
}
int otx2_open(struct net_device *netdev)
@@ -906,6 +1555,9 @@ int otx2_open(struct net_device *netdev)
if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
otx2_handle_link_event(pf);
+ /* Restore pause frame settings */
+ otx2_config_pause_frm(pf);
+
err = otx2_rxtx_enable(pf, true);
if (err)
goto err_free_cints;
@@ -929,6 +1581,7 @@ err_free_mem:
kfree(qset->napi);
return err;
}
+EXPORT_SYMBOL(otx2_open);
int otx2_stop(struct net_device *netdev)
{
@@ -989,6 +1642,7 @@ int otx2_stop(struct net_device *netdev)
sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
return 0;
}
+EXPORT_SYMBOL(otx2_stop);
static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
{
@@ -1025,15 +1679,23 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
static void otx2_set_rx_mode(struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
+
+ queue_work(pf->otx2_wq, &pf->rx_mode_work);
+}
+
+static void otx2_do_set_rx_mode(struct work_struct *work)
+{
+ struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
+ struct net_device *netdev = pf->netdev;
struct nix_rx_mode *req;
if (!(netdev->flags & IFF_UP))
return;
- otx2_mbox_lock(&pf->mbox);
+ mutex_lock(&pf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
if (!req) {
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
return;
}
@@ -1046,7 +1708,7 @@ static void otx2_set_rx_mode(struct net_device *netdev)
req->mode |= NIX_RX_MODE_ALLMULTI;
otx2_sync_mbox_msg(&pf->mbox);
- otx2_mbox_unlock(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
}
static int otx2_set_features(struct net_device *netdev,
@@ -1086,6 +1748,17 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_get_stats64 = otx2_get_stats64,
};
+static int otx2_wq_init(struct otx2_nic *pf)
+{
+ pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
+ if (!pf->otx2_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode);
+ INIT_WORK(&pf->reset_task, otx2_reset_task);
+ return 0;
+}
+
static int otx2_check_pf_usable(struct otx2_nic *nic)
{
u64 rev;
@@ -1117,7 +1790,6 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
otx2_disable_mbox_intr(pf);
pci_free_irq_vectors(hw->pdev);
- pci_free_irq_vectors(hw->pdev);
err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
if (err < 0) {
dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
@@ -1172,6 +1844,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pf->netdev = netdev;
pf->pdev = pdev;
pf->dev = dev;
+ pf->total_vfs = pci_sriov_get_totalvfs(pdev);
pf->flags |= OTX2_FLAG_INTF_DOWN;
hw = &pf->hw;
@@ -1270,21 +1943,29 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = OTX2_MAX_MTU;
- INIT_WORK(&pf->reset_task, otx2_reset_task);
-
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
goto err_detach_rsrc;
}
+ err = otx2_wq_init(pf);
+ if (err)
+ goto err_unreg_netdev;
+
otx2_set_ethtool_ops(netdev);
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
+ /* Enable pause frames by default */
+ pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
+ pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+
return 0;
+err_unreg_netdev:
+ unregister_netdev(netdev);
err_detach_rsrc:
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
@@ -1301,6 +1982,121 @@ err_release_regions:
return err;
}
+static void otx2_vf_link_event_task(struct work_struct *work)
+{
+ struct otx2_vf_config *config;
+ struct cgx_link_info_msg *req;
+ struct mbox_msghdr *msghdr;
+ struct otx2_nic *pf;
+ int vf_idx;
+
+ config = container_of(work, struct otx2_vf_config,
+ link_event_work.work);
+ vf_idx = config - config->pf->vf_configs;
+ pf = config->pf;
+
+ msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
+ sizeof(*req), sizeof(struct msg_rsp));
+ if (!msghdr) {
+ dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
+ return;
+ }
+
+ req = (struct cgx_link_info_msg *)msghdr;
+ req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
+
+ otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
+}
+
+static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int ret, i;
+
+ /* Init PF <=> VF mailbox stuff */
+ ret = otx2_pfvf_mbox_init(pf, numvfs);
+ if (ret)
+ return ret;
+
+ ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
+ if (ret)
+ goto free_mbox;
+
+ pf->vf_configs = kcalloc(numvfs, sizeof(struct otx2_vf_config),
+ GFP_KERNEL);
+ if (!pf->vf_configs) {
+ ret = -ENOMEM;
+ goto free_intr;
+ }
+
+ for (i = 0; i < numvfs; i++) {
+ pf->vf_configs[i].pf = pf;
+ pf->vf_configs[i].intf_down = true;
+ INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
+ otx2_vf_link_event_task);
+ }
+
+ ret = otx2_pf_flr_init(pf, numvfs);
+ if (ret)
+ goto free_configs;
+
+ ret = otx2_register_flr_me_intr(pf, numvfs);
+ if (ret)
+ goto free_flr;
+
+ ret = pci_enable_sriov(pdev, numvfs);
+ if (ret)
+ goto free_flr_intr;
+
+ return numvfs;
+free_flr_intr:
+ otx2_disable_flr_me_intr(pf);
+free_flr:
+ otx2_flr_wq_destroy(pf);
+free_configs:
+ kfree(pf->vf_configs);
+free_intr:
+ otx2_disable_pfvf_mbox_intr(pf, numvfs);
+free_mbox:
+ otx2_pfvf_mbox_destroy(pf);
+ return ret;
+}
+
+static int otx2_sriov_disable(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int numvfs = pci_num_vf(pdev);
+ int i;
+
+ if (!numvfs)
+ return 0;
+
+ pci_disable_sriov(pdev);
+
+ for (i = 0; i < pci_num_vf(pdev); i++)
+ cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
+ kfree(pf->vf_configs);
+
+ otx2_disable_flr_me_intr(pf);
+ otx2_flr_wq_destroy(pf);
+ otx2_disable_pfvf_mbox_intr(pf, numvfs);
+ otx2_pfvf_mbox_destroy(pf);
+
+ return 0;
+}
+
+static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ if (numvfs == 0)
+ return otx2_sriov_disable(pdev);
+ else
+ return otx2_sriov_enable(pdev, numvfs);
+}
+
static void otx2_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -1315,6 +2111,10 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_cgx_config_linkevents(pf, false);
unregister_netdev(netdev);
+ otx2_sriov_disable(pf->pdev);
+ if (pf->otx2_wq)
+ destroy_workqueue(pf->otx2_wq);
+
otx2_detach_resources(&pf->mbox);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
@@ -1331,6 +2131,7 @@ static struct pci_driver otx2_pf_driver = {
.probe = otx2_probe,
.shutdown = otx2_remove,
.remove = otx2_remove,
+ .sriov_configure = otx2_sriov_configure
};
static int __init otx2_rvupf_init_module(void)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index 7963d418886a..867f646e0802 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -45,6 +45,19 @@
#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+/* RVU VF registers */
+#define RVU_VF_VFPF_MBOX0 (0x00000)
+#define RVU_VF_VFPF_MBOX1 (0x00008)
+#define RVU_VF_VFPF_MBOXX(a) (0x00 | (a) << 3)
+#define RVU_VF_INT (0x20)
+#define RVU_VF_INT_W1S (0x28)
+#define RVU_VF_INT_ENA_W1S (0x30)
+#define RVU_VF_INT_ENA_W1C (0x38)
+#define RVU_VF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3)
+#define RVU_VF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
+#define RVU_VF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
+#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+
#define RVU_FUNC_BLKADDR_SHIFT 20
#define RVU_FUNC_BLKADDR_MASK 0x1FULL
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index bef4c20fe314..45abe0cd0e7b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -138,6 +138,25 @@ static void otx2_set_rxhash(struct otx2_nic *pfvf,
skb_set_hash(skb, hash, hash_type);
}
+static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
+ int qidx)
+{
+ struct nix_rx_sg_s *sg = &cqe->sg;
+ void *end, *start;
+ u64 *seg_addr;
+ int seg;
+
+ start = (void *)sg;
+ end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
+ while (start < end) {
+ sg = (struct nix_rx_sg_s *)start;
+ seg_addr = &sg->seg_addr;
+ for (seg = 0; seg < sg->segs; seg++, seg_addr++)
+ otx2_aura_freeptr(pfvf, qidx, *seg_addr & ~0x07ULL);
+ start += sizeof(*sg);
+ }
+}
+
static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
struct nix_cqe_rx_s *cqe, int qidx)
{
@@ -189,16 +208,17 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
/* For now ignore all the NPC parser errors and
* pass the packets to stack.
*/
- return false;
+ if (cqe->sg.segs == 1)
+ return false;
}
/* If RXALL is enabled pass on packets to stack. */
- if (cqe->sg.segs && (pfvf->netdev->features & NETIF_F_RXALL))
+ if (cqe->sg.segs == 1 && (pfvf->netdev->features & NETIF_F_RXALL))
return false;
/* Free buffer back to pool */
if (cqe->sg.segs)
- otx2_aura_freeptr(pfvf, qidx, cqe->sg.seg_addr & ~0x07ULL);
+ otx2_free_rcv_seg(pfvf, cqe, qidx);
return true;
}
@@ -210,7 +230,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_rx_parse_s *parse = &cqe->parse;
struct sk_buff *skb = NULL;
- if (unlikely(parse->errlev || parse->errcode)) {
+ if (unlikely(parse->errlev || parse->errcode || cqe->sg.segs > 1)) {
if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
return;
}
@@ -284,6 +304,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
+ otx2_get_page(cq->rbpool);
return processed_cqe;
}
@@ -778,6 +799,7 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
return true;
}
+EXPORT_SYMBOL(otx2_sq_append_skb);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
{
@@ -788,11 +810,15 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
if (!cqe->sg.subdc)
continue;
+ processed_cqe++;
+ if (cqe->sg.segs > 1) {
+ otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
+ continue;
+ }
iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE);
put_page(virt_to_page(phys_to_virt(pa)));
- processed_cqe++;
}
/* Free CQEs to HW */
@@ -831,18 +857,18 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
struct msg_req *msg;
int err;
- otx2_mbox_lock(&pfvf->mbox);
+ mutex_lock(&pfvf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
else
msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
if (!msg) {
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pfvf->mbox);
- otx2_mbox_unlock(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
return err;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
new file mode 100644
index 000000000000..187c633a7af5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Virtual Function ethernet driver */
+
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "otx2_common.h"
+#include "otx2_reg.h"
+
+#define DRV_NAME "octeontx2-nicvf"
+#define DRV_STRING "Marvell OcteonTX2 NIC Virtual Function Driver"
+
+static const struct pci_device_id otx2_vf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) },
+ { }
+};
+
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, otx2_vf_id_table);
+
+/* RVU VF Interrupt Vector Enumeration */
+enum {
+ RVU_VF_INT_VEC_MBOX = 0x0,
+};
+
+static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf,
+ struct mbox_msghdr *msg)
+{
+ if (msg->id >= MBOX_MSG_MAX) {
+ dev_err(vf->dev,
+ "Mbox msg with unknown ID %d\n", msg->id);
+ return;
+ }
+
+ if (msg->sig != OTX2_MBOX_RSP_SIG) {
+ dev_err(vf->dev,
+ "Mbox msg with wrong signature %x, ID %d\n",
+ msg->sig, msg->id);
+ return;
+ }
+
+ if (msg->rc == MBOX_MSG_INVALID) {
+ dev_err(vf->dev,
+ "PF/AF says the sent msg(s) %d were invalid\n",
+ msg->id);
+ return;
+ }
+
+ switch (msg->id) {
+ case MBOX_MSG_READY:
+ vf->pcifunc = msg->pcifunc;
+ break;
+ case MBOX_MSG_MSIX_OFFSET:
+ mbox_handler_msix_offset(vf, (struct msix_offset_rsp *)msg);
+ break;
+ case MBOX_MSG_NPA_LF_ALLOC:
+ mbox_handler_npa_lf_alloc(vf, (struct npa_lf_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_LF_ALLOC:
+ mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_TXSCH_ALLOC:
+ mbox_handler_nix_txsch_alloc(vf,
+ (struct nix_txsch_alloc_rsp *)msg);
+ break;
+ case MBOX_MSG_NIX_BP_ENABLE:
+ mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg);
+ break;
+ default:
+ if (msg->rc)
+ dev_err(vf->dev,
+ "Mbox msg response has err %d, ID %d\n",
+ msg->rc, msg->id);
+ }
+}
+
+static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
+{
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ struct mbox *af_mbox;
+ int offset, id;
+
+ af_mbox = container_of(work, struct mbox, mbox_wrk);
+ mbox = &af_mbox->mbox;
+ mdev = &mbox->dev[0];
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (af_mbox->num_msgs == 0)
+ return;
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < af_mbox->num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ mdev->msgs_acked++;
+ }
+
+ otx2_mbox_reset(mbox, 0);
+}
+
+static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
+ struct mbox_msghdr *req)
+{
+ struct msg_rsp *rsp;
+ int err;
+
+ /* Check if valid, if not reply with a invalid msg */
+ if (req->sig != OTX2_MBOX_REQ_SIG) {
+ otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
+ return -ENODEV;
+ }
+
+ switch (req->id) {
+ case MBOX_MSG_CGX_LINK_EVENT:
+ rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(
+ &vf->mbox.mbox_up, 0,
+ sizeof(struct msg_rsp));
+ if (!rsp)
+ return -ENOMEM;
+
+ rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
+ rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
+ rsp->hdr.pcifunc = 0;
+ rsp->hdr.rc = 0;
+ err = otx2_mbox_up_handler_cgx_link_event(
+ vf, (struct cgx_link_info_msg *)req, rsp);
+ return err;
+ default:
+ otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
+{
+ struct otx2_mbox_dev *mdev;
+ struct mbox_hdr *rsp_hdr;
+ struct mbox_msghdr *msg;
+ struct otx2_mbox *mbox;
+ struct mbox *vf_mbox;
+ struct otx2_nic *vf;
+ int offset, id;
+
+ vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
+ vf = vf_mbox->pfvf;
+ mbox = &vf_mbox->mbox_up;
+ mdev = &mbox->dev[0];
+
+ rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (vf_mbox->up_num_msgs == 0)
+ return;
+
+ offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+
+ for (id = 0; id < vf_mbox->up_num_msgs; id++) {
+ msg = (struct mbox_msghdr *)(mdev->mbase + offset);
+ otx2vf_process_mbox_msg_up(vf, msg);
+ offset = mbox->rx_start + msg->next_msgoff;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+}
+
+static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
+{
+ struct otx2_nic *vf = (struct otx2_nic *)vf_irq;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+
+ /* Clear the IRQ */
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ /* Check for PF => VF response messages */
+ mbox = &vf->mbox.mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs) {
+ vf->mbox.num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ memset(mbox->hwbase + mbox->rx_start, 0,
+ ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
+ }
+ /* Check for PF => VF notification messages */
+ mbox = &vf->mbox.mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs) {
+ vf->mbox.up_num_msgs = hdr->num_msgs;
+ hdr->num_msgs = 0;
+ memset(mbox->hwbase + mbox->rx_start, 0,
+ ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void otx2vf_disable_mbox_intr(struct otx2_nic *vf)
+{
+ int vector = pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX);
+
+ /* Disable VF => PF mailbox IRQ */
+ otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0));
+ free_irq(vector, vf);
+}
+
+static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
+{
+ struct otx2_hw *hw = &vf->hw;
+ struct msg_req *req;
+ char *irq_name;
+ int err;
+
+ /* Register mailbox interrupt handler */
+ irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox");
+ err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
+ otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
+ if (err) {
+ dev_err(vf->dev,
+ "RVUPF: IRQ registration failed for VFAF mbox irq\n");
+ return err;
+ }
+
+ /* Enable mailbox interrupt for msgs coming from PF.
+ * First clear to avoid spurious interrupts, if any.
+ */
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+ otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
+
+ if (!probe_pf)
+ return 0;
+
+ /* Check mailbox communication with PF */
+ req = otx2_mbox_alloc_msg_ready(&vf->mbox);
+ if (!req) {
+ otx2vf_disable_mbox_intr(vf);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&vf->mbox);
+ if (err) {
+ dev_warn(vf->dev,
+ "AF not responding to mailbox, deferring probe\n");
+ otx2vf_disable_mbox_intr(vf);
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
+static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf)
+{
+ struct mbox *mbox = &vf->mbox;
+
+ if (vf->mbox_wq) {
+ flush_workqueue(vf->mbox_wq);
+ destroy_workqueue(vf->mbox_wq);
+ vf->mbox_wq = NULL;
+ }
+
+ if (mbox->mbox.hwbase)
+ iounmap((void __iomem *)mbox->mbox.hwbase);
+
+ otx2_mbox_destroy(&mbox->mbox);
+ otx2_mbox_destroy(&mbox->mbox_up);
+}
+
+static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
+{
+ struct mbox *mbox = &vf->mbox;
+ void __iomem *hwbase;
+ int err;
+
+ mbox->pfvf = vf;
+ vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 1);
+ if (!vf->mbox_wq)
+ return -ENOMEM;
+
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e PF0) and this VF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start(vf->pdev, PCI_MBOX_BAR_NUM),
+ pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM));
+ if (!hwbase) {
+ dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base,
+ MBOX_DIR_VFPF, 1);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_init(&mbox->mbox_up, hwbase, vf->pdev, vf->reg_base,
+ MBOX_DIR_VFPF_UP, 1);
+ if (err)
+ goto exit;
+
+ err = otx2_mbox_bbuf_init(mbox, vf->pdev);
+ if (err)
+ goto exit;
+
+ INIT_WORK(&mbox->mbox_wrk, otx2vf_vfaf_mbox_handler);
+ INIT_WORK(&mbox->mbox_up_wrk, otx2vf_vfaf_mbox_up_handler);
+ mutex_init(&mbox->lock);
+
+ return 0;
+exit:
+ destroy_workqueue(vf->mbox_wq);
+ return err;
+}
+
+static int otx2vf_open(struct net_device *netdev)
+{
+ struct otx2_nic *vf;
+ int err;
+
+ err = otx2_open(netdev);
+ if (err)
+ return err;
+
+ /* LBKs do not receive link events so tell everyone we are up here */
+ vf = netdev_priv(netdev);
+ if (is_otx2_lbkvf(vf->pdev)) {
+ pr_info("%s NIC Link is UP\n", netdev->name);
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+ }
+
+ return 0;
+}
+
+static int otx2vf_stop(struct net_device *netdev)
+{
+ return otx2_stop(netdev);
+}
+
+static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+ int qidx = skb_get_queue_mapping(skb);
+ struct otx2_snd_queue *sq;
+ struct netdev_queue *txq;
+
+ sq = &vf->qset.sq[qidx];
+ txq = netdev_get_tx_queue(netdev, qidx);
+
+ if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
+ netif_tx_stop_queue(txq);
+
+ /* Check again, incase SQBs got freed up */
+ smp_mb();
+ if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
+ > sq->sqe_thresh)
+ netif_tx_wake_queue(txq);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ bool if_up = netif_running(netdev);
+ int err = 0;
+
+ if (if_up)
+ otx2vf_stop(netdev);
+
+ netdev_info(netdev, "Changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+
+ if (if_up)
+ err = otx2vf_open(netdev);
+
+ return err;
+}
+
+static void otx2vf_reset_task(struct work_struct *work)
+{
+ struct otx2_nic *vf = container_of(work, struct otx2_nic, reset_task);
+
+ rtnl_lock();
+
+ if (netif_running(vf->netdev)) {
+ otx2vf_stop(vf->netdev);
+ vf->reset_count++;
+ otx2vf_open(vf->netdev);
+ }
+
+ rtnl_unlock();
+}
+
+static const struct net_device_ops otx2vf_netdev_ops = {
+ .ndo_open = otx2vf_open,
+ .ndo_stop = otx2vf_stop,
+ .ndo_start_xmit = otx2vf_xmit,
+ .ndo_set_mac_address = otx2_set_mac_address,
+ .ndo_change_mtu = otx2vf_change_mtu,
+ .ndo_get_stats64 = otx2_get_stats64,
+ .ndo_tx_timeout = otx2_tx_timeout,
+};
+
+static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
+{
+ struct otx2_hw *hw = &vf->hw;
+ int num_vec, err;
+
+ num_vec = hw->nix_msixoff;
+ num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
+
+ otx2vf_disable_mbox_intr(vf);
+ pci_free_irq_vectors(hw->pdev);
+ err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(vf->dev, "%s: Failed to realloc %d IRQ vectors\n",
+ __func__, num_vec);
+ return err;
+ }
+
+ return otx2vf_register_mbox_intr(vf, false);
+}
+
+static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int num_vec = pci_msix_vec_count(pdev);
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct otx2_nic *vf;
+ struct otx2_hw *hw;
+ int err, qcount;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_release_regions;
+ }
+
+ pci_set_master(pdev);
+
+ qcount = num_online_cpus();
+ netdev = alloc_etherdev_mqs(sizeof(*vf), qcount, qcount);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_set_drvdata(pdev, netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ vf = netdev_priv(netdev);
+ vf->netdev = netdev;
+ vf->pdev = pdev;
+ vf->dev = dev;
+ vf->iommu_domain = iommu_get_domain_for_dev(dev);
+
+ vf->flags |= OTX2_FLAG_INTF_DOWN;
+ hw = &vf->hw;
+ hw->pdev = vf->pdev;
+ hw->rx_queues = qcount;
+ hw->tx_queues = qcount;
+ hw->max_queues = qcount;
+
+ hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
+ GFP_KERNEL);
+ if (!hw->irq_name)
+ goto err_free_netdev;
+
+ hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
+ sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!hw->affinity_mask)
+ goto err_free_netdev;
+
+ err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
+ if (err < 0) {
+ dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
+ __func__, num_vec);
+ goto err_free_netdev;
+ }
+
+ vf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!vf->reg_base) {
+ dev_err(dev, "Unable to map physical function CSRs, aborting\n");
+ err = -ENOMEM;
+ goto err_free_irq_vectors;
+ }
+
+ /* Init VF <=> PF mailbox stuff */
+ err = otx2vf_vfaf_mbox_init(vf);
+ if (err)
+ goto err_free_irq_vectors;
+
+ /* Register mailbox interrupt */
+ err = otx2vf_register_mbox_intr(vf, true);
+ if (err)
+ goto err_mbox_destroy;
+
+ /* Request AF to attach NPA and LIX LFs to this AF */
+ err = otx2_attach_npa_nix(vf);
+ if (err)
+ goto err_disable_mbox_intr;
+
+ err = otx2vf_realloc_msix_vectors(vf);
+ if (err)
+ goto err_mbox_destroy;
+
+ err = otx2_set_real_num_queues(netdev, qcount, qcount);
+ if (err)
+ goto err_detach_rsrc;
+
+ otx2_setup_dev_hw_settings(vf);
+
+ /* Assign default mac address */
+ otx2_get_mac_from_af(netdev);
+
+ netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
+ netdev->features = netdev->hw_features;
+
+ netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
+ netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
+
+ netdev->netdev_ops = &otx2vf_netdev_ops;
+
+ /* MTU range: 68 - 9190 */
+ netdev->min_mtu = OTX2_MIN_MTU;
+ netdev->max_mtu = OTX2_MAX_MTU;
+
+ INIT_WORK(&vf->reset_task, otx2vf_reset_task);
+
+ /* To distinguish, for LBK VFs set netdev name explicitly */
+ if (is_otx2_lbkvf(vf->pdev)) {
+ int n;
+
+ n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK;
+ /* Need to subtract 1 to get proper VF number */
+ n -= 1;
+ snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
+ }
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(dev, "Failed to register netdevice\n");
+ goto err_detach_rsrc;
+ }
+
+ otx2vf_set_ethtool_ops(netdev);
+
+ /* Enable pause frames by default */
+ vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
+ vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+
+ return 0;
+
+err_detach_rsrc:
+ otx2_detach_resources(&vf->mbox);
+err_disable_mbox_intr:
+ otx2vf_disable_mbox_intr(vf);
+err_mbox_destroy:
+ otx2vf_vfaf_mbox_destroy(vf);
+err_free_irq_vectors:
+ pci_free_irq_vectors(hw->pdev);
+err_free_netdev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_release_regions:
+ pci_release_regions(pdev);
+ return err;
+}
+
+static void otx2vf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct otx2_nic *vf;
+
+ if (!netdev)
+ return;
+
+ vf = netdev_priv(netdev);
+
+ otx2vf_disable_mbox_intr(vf);
+
+ otx2_detach_resources(&vf->mbox);
+ otx2vf_vfaf_mbox_destroy(vf);
+ pci_free_irq_vectors(vf->pdev);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+
+ pci_release_regions(pdev);
+}
+
+static struct pci_driver otx2vf_driver = {
+ .name = DRV_NAME,
+ .id_table = otx2_vf_id_table,
+ .probe = otx2vf_probe,
+ .remove = otx2vf_remove,
+ .shutdown = otx2vf_remove,
+};
+
+static int __init otx2vf_init_module(void)
+{
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ return pci_register_driver(&otx2vf_driver);
+}
+
+static void __exit otx2vf_cleanup_module(void)
+{
+ pci_unregister_driver(&otx2vf_driver);
+}
+
+module_init(otx2vf_init_module);
+module_exit(otx2vf_cleanup_module);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 97f270d30cce..3c89206f18a7 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -876,6 +876,7 @@ static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
}
static const struct ethtool_ops skge_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = skge_get_drvinfo,
.get_regs_len = skge_get_regs_len,
.get_regs = skge_get_regs,
diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h
index 6fa7b6a34c08..6928abcec0a3 100644
--- a/drivers/net/ethernet/marvell/skge.h
+++ b/drivers/net/ethernet/marvell/skge.h
@@ -15,12 +15,6 @@
#define PCI_VPD_ROM_SZ 7L<<14 /* VPD ROM size 0=256, 1=512, ... */
#define PCI_REV_DESC 1<<2 /* Reverse Descriptor bytes */
-#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
- PCI_STATUS_SIG_SYSTEM_ERROR | \
- PCI_STATUS_REC_MASTER_ABORT | \
- PCI_STATUS_REC_TARGET_ABORT | \
- PCI_STATUS_PARITY)
-
enum csr_regs {
B0_RAP = 0x0000,
B0_CTST = 0x0004,
@@ -2426,7 +2420,7 @@ struct skge_hw {
spinlock_t phy_lock;
struct tasklet_struct phy_task;
- char irq_name[0]; /* skge@pci:000:04:00.0 */
+ char irq_name[]; /* skge@pci:000:04:00.0 */
};
enum pause_control {
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index ebfd0ceac884..241f00716979 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4400,6 +4400,10 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
}
static const struct ethtool_ops sky2_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_RX_USECS_IRQ |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ,
.get_drvinfo = sky2_get_drvinfo,
.get_wol = sky2_get_wol,
.set_wol = sky2_set_wol,
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index b02b6523083c..b2dddd8a246c 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -252,12 +252,6 @@ enum {
};
-#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
- PCI_STATUS_SIG_SYSTEM_ERROR | \
- PCI_STATUS_REC_MASTER_ABORT | \
- PCI_STATUS_REC_TARGET_ABORT | \
- PCI_STATUS_PARITY)
-
enum csr_regs {
B0_RAP = 0x0000,
B0_CTST = 0x0004,
@@ -2309,7 +2303,7 @@ struct sky2_hw {
struct work_struct restart_work;
wait_queue_head_t msi_wait;
- char irq_name[0];
+ char irq_name[];
};
static inline int sky2_is_copper(const struct sky2_hw *hw)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 8c6cfd15481c..8d28f90acfe7 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -412,9 +412,10 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
-static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phy)
+static void mtk_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index 64ed725aec28..73eae80e1cb7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -38,8 +38,21 @@
#define CR_ENABLE_BIT_OFFSET 0xF3F04
#define MAX_NUM_OF_DUMPS_TO_STORE (8)
-static const char *region_cr_space_str = "cr-space";
-static const char *region_fw_health_str = "fw-health";
+#define REGION_CR_SPACE "cr-space"
+#define REGION_FW_HEALTH "fw-health"
+
+static const char * const region_cr_space_str = REGION_CR_SPACE;
+static const char * const region_fw_health_str = REGION_FW_HEALTH;
+
+static const struct devlink_region_ops region_cr_space_ops = {
+ .name = REGION_CR_SPACE,
+ .destructor = &kvfree,
+};
+
+static const struct devlink_region_ops region_fw_health_ops = {
+ .name = REGION_FW_HEALTH,
+ .destructor = &kvfree,
+};
/* Set to true in case cr enable bit was set to true before crdump */
static bool crdump_enbale_bit_set;
@@ -99,7 +112,7 @@ static void mlx4_crdump_collect_crspace(struct mlx4_dev *dev,
readl(cr_space + offset);
err = devlink_region_snapshot_create(crdump->region_crspace,
- crspace_data, id, &kvfree);
+ crspace_data, id);
if (err) {
kvfree(crspace_data);
mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
@@ -138,7 +151,7 @@ static void mlx4_crdump_collect_fw_health(struct mlx4_dev *dev,
readl(health_buf_start + offset);
err = devlink_region_snapshot_create(crdump->region_fw_health,
- health_data, id, &kvfree);
+ health_data, id);
if (err) {
kvfree(health_data);
mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
@@ -159,6 +172,7 @@ int mlx4_crdump_collect(struct mlx4_dev *dev)
struct pci_dev *pdev = dev->persist->pdev;
unsigned long cr_res_size;
u8 __iomem *cr_space;
+ int err;
u32 id;
if (!dev->caps.health_buffer_addrs) {
@@ -179,15 +193,22 @@ int mlx4_crdump_collect(struct mlx4_dev *dev)
return -ENODEV;
}
- crdump_enable_crspace_access(dev, cr_space);
-
/* Get the available snapshot ID for the dumps */
- id = devlink_region_snapshot_id_get(devlink);
+ err = devlink_region_snapshot_id_get(devlink, &id);
+ if (err) {
+ mlx4_err(dev, "crdump: devlink get snapshot id err %d\n", err);
+ return err;
+ }
+
+ crdump_enable_crspace_access(dev, cr_space);
/* Try to capture dumps */
mlx4_crdump_collect_crspace(dev, cr_space, id);
mlx4_crdump_collect_fw_health(dev, cr_space, id);
+ /* Release reference on the snapshot id */
+ devlink_region_snapshot_id_put(devlink, id);
+
crdump_disable_crspace_access(dev, cr_space);
iounmap(cr_space);
@@ -205,7 +226,7 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
/* Create cr-space region */
crdump->region_crspace =
devlink_region_create(devlink,
- region_cr_space_str,
+ &region_cr_space_ops,
MAX_NUM_OF_DUMPS_TO_STORE,
pci_resource_len(pdev, 0));
if (IS_ERR(crdump->region_crspace))
@@ -216,7 +237,7 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
/* Create fw-health region */
crdump->region_fw_health =
devlink_region_create(devlink,
- region_fw_health_str,
+ &region_fw_health_ops,
MAX_NUM_OF_DUMPS_TO_STORE,
HEALTH_BUFFER_SIZE);
if (IS_ERR(crdump->region_fw_health))
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 8bf1f08fdee2..8a5ea2543670 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -2121,6 +2121,10 @@ static int mlx4_en_set_phys_id(struct net_device *dev,
}
const struct ethtool_ops mlx4_en_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_PKT_RATE_RX_USECS,
.get_drvinfo = mlx4_en_get_drvinfo,
.get_link_ksettings = mlx4_en_get_link_ksettings,
.set_link_ksettings = mlx4_en_set_link_ksettings,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index a1f20b205299..312e0a1ad43d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -78,6 +78,16 @@ config MLX5_ESWITCH
Legacy SRIOV mode (L2 mac vlan steering based).
Switchdev mode (eswitch offloads).
+config MLX5_TC_CT
+ bool "MLX5 TC connection tracking offload support"
+ depends on MLX5_CORE_EN && NET_SWITCHDEV && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
+ default y
+ help
+ Say Y here if you want to support offloading connection tracking rules
+ via tc ct action.
+
+ If unsure, set to Y
+
config MLX5_CORE_EN_DCB
bool "Data Center Bridging (DCB) Support"
default y
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index d3e06cec8317..6d32915000fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -16,7 +16,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
- diag/fw_tracer.o diag/crdump.o devlink.o
+ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o
#
# Netdev basic
@@ -25,7 +25,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/umem.o \
- en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o
+ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o
#
# Netdev extra
@@ -34,15 +34,16 @@ mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \
- lib/geneve.o en/tc_tun_vxlan.o en/tc_tun_gre.o \
+ lib/geneve.o en/mapping.o en/tc_tun_vxlan.o en/tc_tun_gre.o \
en/tc_tun_geneve.o diag/en_tc_tracepoint.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
+mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
#
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
- ecpf.o rdma.o eswitch_offloads_chains.o
+ ecpf.o rdma.o esw/chains.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 50862275544e..1972ddd12704 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -193,7 +193,7 @@ bool mlx5_device_registered(struct mlx5_core_dev *dev)
return found;
}
-int mlx5_register_device(struct mlx5_core_dev *dev)
+void mlx5_register_device(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
struct mlx5_interface *intf;
@@ -203,8 +203,6 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
list_for_each_entry(intf, &intf_list, list)
mlx5_add_device(intf, priv);
mutex_unlock(&mlx5_intf_mutex);
-
- return 0;
}
void mlx5_unregister_device(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index ac108f1e5bd6..bdeb291f6b67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -90,7 +90,8 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- return mlx5_unload_one(dev, false);
+ mlx5_unload_one(dev, false);
+ return 0;
}
static int mlx5_devlink_reload_up(struct devlink *devlink,
@@ -190,11 +191,6 @@ static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id,
return 0;
}
-enum mlx5_devlink_param_id {
- MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
- MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
-};
-
static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -210,14 +206,38 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
return 0;
}
+#ifdef CONFIG_MLX5_ESWITCH
+static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ int group_num = val.vu32;
+
+ if (group_num < 1 || group_num > 1024) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported group number, supported range is 1-1024");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+#endif
+
static const struct devlink_param mlx5_devlink_params[] = {
- DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set,
mlx5_devlink_fs_mode_validate),
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, mlx5_devlink_enable_roce_validate),
+#ifdef CONFIG_MLX5_ESWITCH
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ "fdb_large_groups", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL,
+ mlx5_devlink_large_group_num_validate),
+#endif
};
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
@@ -230,13 +250,20 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
else
strcpy(value.vstr, "smfs");
devlink_param_driverinit_value_set(devlink,
- MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
+ MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
value);
value.vbool = MLX5_CAP_GEN(dev, roce);
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
value);
+
+#ifdef CONFIG_MLX5_ESWITCH
+ value.vu32 = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
+ devlink_param_driverinit_value_set(devlink,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ value);
+#endif
}
int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index d0ba03774ddf..f0de327a59be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -6,6 +6,12 @@
#include <net/devlink.h>
+enum mlx5_devlink_param_id {
+ MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+};
+
struct devlink *mlx5_devlink_alloc(void);
void mlx5_devlink_free(struct devlink *devlink);
int mlx5_devlink_register(struct devlink *devlink, struct device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 94d7b69a95c7..c9c9b479bda5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -188,7 +188,7 @@ static int mlx5_fw_tracer_create_mkey(struct mlx5_fw_tracer *tracer)
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2));
- mtt = (u64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
for (i = 0 ; i < TRACER_BUFFER_PAGE_NUM ; i++)
mtt[i] = cpu_to_be64(tracer->buff.dma + i * PAGE_SIZE);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
new file mode 100644
index 000000000000..17ab7efe693d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "rsc_dump.h"
+#include "lib/mlx5.h"
+
+#define MLX5_SGMT_TYPE(SGMT) MLX5_SGMT_TYPE_##SGMT
+#define MLX5_SGMT_STR_ASSING(SGMT)[MLX5_SGMT_TYPE(SGMT)] = #SGMT
+static const char *const mlx5_rsc_sgmt_name[] = {
+ MLX5_SGMT_STR_ASSING(HW_CQPC),
+ MLX5_SGMT_STR_ASSING(HW_SQPC),
+ MLX5_SGMT_STR_ASSING(HW_RQPC),
+ MLX5_SGMT_STR_ASSING(FULL_SRQC),
+ MLX5_SGMT_STR_ASSING(FULL_CQC),
+ MLX5_SGMT_STR_ASSING(FULL_EQC),
+ MLX5_SGMT_STR_ASSING(FULL_QPC),
+ MLX5_SGMT_STR_ASSING(SND_BUFF),
+ MLX5_SGMT_STR_ASSING(RCV_BUFF),
+ MLX5_SGMT_STR_ASSING(SRQ_BUFF),
+ MLX5_SGMT_STR_ASSING(CQ_BUFF),
+ MLX5_SGMT_STR_ASSING(EQ_BUFF),
+ MLX5_SGMT_STR_ASSING(SX_SLICE),
+ MLX5_SGMT_STR_ASSING(SX_SLICE_ALL),
+ MLX5_SGMT_STR_ASSING(RDB),
+ MLX5_SGMT_STR_ASSING(RX_SLICE_ALL),
+};
+
+struct mlx5_rsc_dump {
+ u32 pdn;
+ struct mlx5_core_mkey mkey;
+ u16 fw_segment_type[MLX5_SGMT_TYPE_NUM];
+};
+
+struct mlx5_rsc_dump_cmd {
+ u64 mem_size;
+ u8 cmd[MLX5_ST_SZ_BYTES(resource_dump)];
+};
+
+static int mlx5_rsc_dump_sgmt_get_by_name(char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlx5_rsc_sgmt_name); i++)
+ if (!strcmp(name, mlx5_rsc_sgmt_name[i]))
+ return i;
+
+ return -EINVAL;
+}
+
+static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page)
+{
+ void *data = page_address(page);
+ enum mlx5_sgmt_type sgmt_idx;
+ int num_of_items;
+ char *sgmt_name;
+ void *member;
+ void *menu;
+ int i;
+
+ menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
+ num_of_items = MLX5_GET(resource_dump_menu_segment, menu, num_of_records);
+
+ for (i = 0; i < num_of_items; i++) {
+ member = MLX5_ADDR_OF(resource_dump_menu_segment, menu, record[i]);
+ sgmt_name = MLX5_ADDR_OF(resource_dump_menu_record, member, segment_name);
+ sgmt_idx = mlx5_rsc_dump_sgmt_get_by_name(sgmt_name);
+ if (sgmt_idx == -EINVAL)
+ continue;
+ rsc_dump->fw_segment_type[sgmt_idx] = MLX5_GET(resource_dump_menu_record,
+ member, segment_type);
+ }
+}
+
+static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
+ struct page *page)
+{
+ struct mlx5_rsc_dump *rsc_dump = dev->rsc_dump;
+ struct device *ddev = &dev->pdev->dev;
+ u32 out_seq_num;
+ u32 in_seq_num;
+ dma_addr_t dma;
+ int err;
+
+ dma = dma_map_page(ddev, page, 0, cmd->mem_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ddev, dma)))
+ return -ENOMEM;
+
+ in_seq_num = MLX5_GET(resource_dump, cmd->cmd, seq_num);
+ MLX5_SET(resource_dump, cmd->cmd, mkey, rsc_dump->mkey.key);
+ MLX5_SET64(resource_dump, cmd->cmd, address, dma);
+
+ err = mlx5_core_access_reg(dev, cmd->cmd, sizeof(cmd->cmd), cmd->cmd,
+ sizeof(cmd->cmd), MLX5_REG_RESOURCE_DUMP, 0, 1);
+ if (err) {
+ mlx5_core_err(dev, "Resource dump: Failed to access err %d\n", err);
+ goto out;
+ }
+ out_seq_num = MLX5_GET(resource_dump, cmd->cmd, seq_num);
+ if (out_seq_num && (in_seq_num + 1 != out_seq_num))
+ err = -EIO;
+out:
+ dma_unmap_page(ddev, dma, cmd->mem_size, DMA_FROM_DEVICE);
+ return err;
+}
+
+struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev,
+ struct mlx5_rsc_key *key)
+{
+ struct mlx5_rsc_dump_cmd *cmd;
+ int sgmt_type;
+
+ if (IS_ERR_OR_NULL(dev->rsc_dump))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ sgmt_type = dev->rsc_dump->fw_segment_type[key->rsc];
+ if (!sgmt_type && key->rsc != MLX5_SGMT_TYPE_MENU)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ mlx5_core_err(dev, "Resource dump: Failed to allocate command\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ MLX5_SET(resource_dump, cmd->cmd, segment_type, sgmt_type);
+ MLX5_SET(resource_dump, cmd->cmd, index1, key->index1);
+ MLX5_SET(resource_dump, cmd->cmd, index2, key->index2);
+ MLX5_SET(resource_dump, cmd->cmd, num_of_obj1, key->num_of_obj1);
+ MLX5_SET(resource_dump, cmd->cmd, num_of_obj2, key->num_of_obj2);
+ MLX5_SET(resource_dump, cmd->cmd, size, key->size);
+ cmd->mem_size = key->size;
+ return cmd;
+}
+
+void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd)
+{
+ kfree(cmd);
+}
+
+int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
+ struct page *page, int *size)
+{
+ bool more_dump;
+ int err;
+
+ if (IS_ERR_OR_NULL(dev->rsc_dump))
+ return -EOPNOTSUPP;
+
+ err = mlx5_rsc_dump_trigger(dev, cmd, page);
+ if (err) {
+ mlx5_core_err(dev, "Resource dump: Failed to trigger dump, %d\n", err);
+ return err;
+ }
+ *size = MLX5_GET(resource_dump, cmd->cmd, size);
+ more_dump = MLX5_GET(resource_dump, cmd->cmd, more_dump);
+
+ return more_dump;
+}
+
+#define MLX5_RSC_DUMP_MENU_SEGMENT 0xffff
+static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
+{
+ struct mlx5_rsc_dump_cmd *cmd = NULL;
+ struct mlx5_rsc_key key = {};
+ struct page *page;
+ int size;
+ int err;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ key.rsc = MLX5_SGMT_TYPE_MENU;
+ key.size = PAGE_SIZE;
+ cmd = mlx5_rsc_dump_cmd_create(dev, &key);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto free_page;
+ }
+ MLX5_SET(resource_dump, cmd->cmd, segment_type, MLX5_RSC_DUMP_MENU_SEGMENT);
+
+ do {
+ err = mlx5_rsc_dump_next(dev, cmd, page, &size);
+ if (err < 0)
+ goto destroy_cmd;
+
+ mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page);
+
+ } while (err > 0);
+
+destroy_cmd:
+ mlx5_rsc_dump_cmd_destroy(cmd);
+free_page:
+ __free_page(page);
+
+ return err;
+}
+
+static int mlx5_rsc_dump_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+ struct mlx5_core_mkey *mkey)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ void *mkc;
+ u32 *in;
+ int err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+
+ MLX5_SET(mkc, mkc, pd, pdn);
+ MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+ err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+
+struct mlx5_rsc_dump *mlx5_rsc_dump_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_rsc_dump *rsc_dump;
+
+ if (!MLX5_CAP_DEBUG(dev, resource_dump)) {
+ mlx5_core_dbg(dev, "Resource dump: capability not present\n");
+ return NULL;
+ }
+ rsc_dump = kzalloc(sizeof(*rsc_dump), GFP_KERNEL);
+ if (!rsc_dump)
+ return ERR_PTR(-ENOMEM);
+
+ return rsc_dump;
+}
+
+void mlx5_rsc_dump_destroy(struct mlx5_core_dev *dev)
+{
+ if (IS_ERR_OR_NULL(dev->rsc_dump))
+ return;
+ kfree(dev->rsc_dump);
+}
+
+int mlx5_rsc_dump_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_rsc_dump *rsc_dump = dev->rsc_dump;
+ int err;
+
+ if (IS_ERR_OR_NULL(dev->rsc_dump))
+ return 0;
+
+ err = mlx5_core_alloc_pd(dev, &rsc_dump->pdn);
+ if (err) {
+ mlx5_core_warn(dev, "Resource dump: Failed to allocate PD %d\n", err);
+ return err;
+ }
+ err = mlx5_rsc_dump_create_mkey(dev, rsc_dump->pdn, &rsc_dump->mkey);
+ if (err) {
+ mlx5_core_err(dev, "Resource dump: Failed to create mkey, %d\n", err);
+ goto free_pd;
+ }
+ err = mlx5_rsc_dump_menu(dev);
+ if (err) {
+ mlx5_core_err(dev, "Resource dump: Failed to read menu, %d\n", err);
+ goto destroy_mkey;
+ }
+ return err;
+
+destroy_mkey:
+ mlx5_core_destroy_mkey(dev, &rsc_dump->mkey);
+free_pd:
+ mlx5_core_dealloc_pd(dev, rsc_dump->pdn);
+ return err;
+}
+
+void mlx5_rsc_dump_cleanup(struct mlx5_core_dev *dev)
+{
+ if (IS_ERR_OR_NULL(dev->rsc_dump))
+ return;
+
+ mlx5_core_destroy_mkey(dev, &dev->rsc_dump->mkey);
+ mlx5_core_dealloc_pd(dev, dev->rsc_dump->pdn);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h
new file mode 100644
index 000000000000..148270073e71
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_RSC_DUMP_H
+#define __MLX5_RSC_DUMP_H
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+
+enum mlx5_sgmt_type {
+ MLX5_SGMT_TYPE_HW_CQPC,
+ MLX5_SGMT_TYPE_HW_SQPC,
+ MLX5_SGMT_TYPE_HW_RQPC,
+ MLX5_SGMT_TYPE_FULL_SRQC,
+ MLX5_SGMT_TYPE_FULL_CQC,
+ MLX5_SGMT_TYPE_FULL_EQC,
+ MLX5_SGMT_TYPE_FULL_QPC,
+ MLX5_SGMT_TYPE_SND_BUFF,
+ MLX5_SGMT_TYPE_RCV_BUFF,
+ MLX5_SGMT_TYPE_SRQ_BUFF,
+ MLX5_SGMT_TYPE_CQ_BUFF,
+ MLX5_SGMT_TYPE_EQ_BUFF,
+ MLX5_SGMT_TYPE_SX_SLICE,
+ MLX5_SGMT_TYPE_SX_SLICE_ALL,
+ MLX5_SGMT_TYPE_RDB,
+ MLX5_SGMT_TYPE_RX_SLICE_ALL,
+ MLX5_SGMT_TYPE_MENU,
+ MLX5_SGMT_TYPE_TERMINATE,
+
+ MLX5_SGMT_TYPE_NUM, /* Keep last */
+};
+
+struct mlx5_rsc_key {
+ enum mlx5_sgmt_type rsc;
+ int index1;
+ int index2;
+ int num_of_obj1;
+ int num_of_obj2;
+ int size;
+};
+
+#define MLX5_RSC_DUMP_ALL 0xFFFF
+struct mlx5_rsc_dump_cmd;
+struct mlx5_rsc_dump;
+
+struct mlx5_rsc_dump *mlx5_rsc_dump_create(struct mlx5_core_dev *dev);
+void mlx5_rsc_dump_destroy(struct mlx5_core_dev *dev);
+
+int mlx5_rsc_dump_init(struct mlx5_core_dev *dev);
+void mlx5_rsc_dump_cleanup(struct mlx5_core_dev *dev);
+
+struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev,
+ struct mlx5_rsc_key *key);
+void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd);
+
+int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
+ struct page *page, int *size);
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index c9606b8ab6ef..12a61bf82c14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -204,7 +204,7 @@ struct mlx5e_tx_wqe {
struct mlx5e_rx_wqe_ll {
struct mlx5_wqe_srq_next_seg next;
- struct mlx5_wqe_data_seg data[0];
+ struct mlx5_wqe_data_seg data[];
};
struct mlx5e_rx_wqe_cyc {
@@ -738,7 +738,6 @@ struct mlx5e_channel {
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
int ix;
int cpu;
- cpumask_var_t xps_cpumask;
};
struct mlx5e_channels {
@@ -814,6 +813,15 @@ struct mlx5e_xsk {
bool ever_used;
};
+/* Temporary storage for variables that are allocated when struct mlx5e_priv is
+ * initialized, and used where we can't allocate them because that functions
+ * must not fail. Use with care and make sure the same variable is not used
+ * simultaneously by multiple users.
+ */
+struct mlx5e_scratchpad {
+ cpumask_var_t cpumask;
+};
+
struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
@@ -873,10 +881,12 @@ struct mlx5e_priv {
#endif
struct devlink_health_reporter *tx_reporter;
struct devlink_health_reporter *rx_reporter;
+ struct devlink_port dl_port;
struct mlx5e_xsk xsk;
#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
struct mlx5e_hv_vhca_stats_agent stats_agent;
#endif
+ struct mlx5e_scratchpad scratchpad;
};
struct mlx5e_profile {
@@ -1036,14 +1046,22 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs);
void mlx5e_close_channels(struct mlx5e_channels *chs);
-/* Function pointer to be used to modify WH settings while
+/* Function pointer to be used to modify HW or kernel settings while
* switching channels
*/
-typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
+typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context);
+#define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \
+int fn##_ctx(struct mlx5e_priv *priv, void *context) \
+{ \
+ return fn(priv); \
+}
int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *new_chs,
- mlx5e_fp_hw_modify hw_modify);
+ mlx5e_fp_preactivate preactivate,
+ void *context);
+int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
+int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
@@ -1124,10 +1142,10 @@ void mlx5e_update_ndo_stats(struct mlx5e_priv *priv);
void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
int mlx5e_bits_invert(unsigned long a, int size);
-typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv);
int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
+int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
- change_hw_mtu_cb set_mtu_cb);
+ mlx5e_fp_preactivate preactivate);
/* ethtool helpers */
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
@@ -1153,6 +1171,12 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings);
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings);
+int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
+int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
+ const u8 hfunc);
+int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rule_locs);
+int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
new file mode 100644
index 000000000000..f8b2de4b04be
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#include "en/devlink.h"
+
+int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
+{
+ struct devlink *devlink = priv_to_devlink(priv->mdev);
+
+ if (mlx5_core_is_pf(priv->mdev))
+ devlink_port_attrs_set(&priv->dl_port,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ PCI_FUNC(priv->mdev->pdev->devfn),
+ false, 0,
+ NULL, 0);
+ else
+ devlink_port_attrs_set(&priv->dl_port,
+ DEVLINK_PORT_FLAVOUR_VIRTUAL,
+ 0, false, 0, NULL, 0);
+
+ return devlink_port_register(devlink, &priv->dl_port, 1);
+}
+
+void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv)
+{
+ devlink_port_type_eth_set(&priv->dl_port, priv->netdev);
+}
+
+void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
+{
+ devlink_port_unregister(&priv->dl_port);
+}
+
+struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return &priv->dl_port;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
new file mode 100644
index 000000000000..83123a801adc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5E_EN_DEVLINK_H
+#define __MLX5E_EN_DEVLINK_H
+
+#include <net/devlink.h>
+#include "en.h"
+
+int mlx5e_devlink_port_register(struct mlx5e_priv *priv);
+void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv);
+void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv);
+struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 20b907dc1e29..3a199a03d929 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -3,6 +3,7 @@
#include "health.h"
#include "lib/eq.h"
+#include "lib/mlx5.h"
int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name)
{
@@ -197,10 +198,114 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
struct devlink_health_reporter *reporter, char *err_str,
struct mlx5e_err_ctx *err_ctx)
{
- netdev_err(priv->netdev, err_str);
+ netdev_err(priv->netdev, "%s\n", err_str);
if (!reporter)
return err_ctx->recover(err_ctx->ctx);
return devlink_health_report(reporter, err_str, err_ctx);
}
+
+#define MLX5_HEALTH_DEVLINK_MAX_SIZE 1024
+static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg,
+ const void *value, u32 value_len)
+
+{
+ u32 data_size;
+ u32 offset;
+ int err;
+
+ for (offset = 0; offset < value_len; offset += data_size) {
+ data_size = value_len - offset;
+ if (data_size > MLX5_HEALTH_DEVLINK_MAX_SIZE)
+ data_size = MLX5_HEALTH_DEVLINK_MAX_SIZE;
+ err = devlink_fmsg_binary_put(fmsg, value + offset, data_size);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_rsc_dump_cmd *cmd;
+ struct page *page;
+ int cmd_err, err;
+ int end_err;
+ int size;
+
+ if (IS_ERR_OR_NULL(mdev->rsc_dump))
+ return -EOPNOTSUPP;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ err = devlink_fmsg_binary_pair_nest_start(fmsg, "data");
+ if (err)
+ return err;
+
+ cmd = mlx5_rsc_dump_cmd_create(mdev, key);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto free_page;
+ }
+
+ do {
+ cmd_err = mlx5_rsc_dump_next(mdev, cmd, page, &size);
+ if (cmd_err < 0) {
+ err = cmd_err;
+ goto destroy_cmd;
+ }
+
+ err = mlx5e_health_rsc_fmsg_binary(fmsg, page_address(page), size);
+ if (err)
+ goto destroy_cmd;
+
+ } while (cmd_err > 0);
+
+destroy_cmd:
+ mlx5_rsc_dump_cmd_destroy(cmd);
+ end_err = devlink_fmsg_binary_pair_nest_end(fmsg);
+ if (end_err)
+ err = end_err;
+free_page:
+ __free_page(page);
+ return err;
+}
+
+int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ int queue_idx, char *lbl)
+{
+ struct mlx5_rsc_key key = {};
+ int err;
+
+ key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
+ key.index1 = queue_idx;
+ key.size = PAGE_SIZE;
+ key.num_of_obj1 = 1;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, lbl);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "index", queue_idx);
+ if (err)
+ return err;
+
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return devlink_fmsg_obj_nest_end(fmsg);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
index e54f70d9af22..38f97f79ef16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -5,6 +5,7 @@
#define __MLX5E_EN_HEALTH_H
#include "en.h"
+#include "diag/rsc_dump.h"
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
@@ -35,6 +36,7 @@ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
struct mlx5e_err_ctx {
int (*recover)(void *ctx);
+ int (*dump)(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, void *ctx);
void *ctx;
};
@@ -47,6 +49,8 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
int mlx5e_health_create_reporters(struct mlx5e_priv *priv);
void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv);
void mlx5e_health_channels_update(struct mlx5e_priv *priv);
-
-
+int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key,
+ struct devlink_fmsg *fmsg);
+int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ int queue_idx, char *lbl);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
new file mode 100644
index 000000000000..ea321e528749
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2018 Mellanox Technologies */
+
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+#include <linux/hashtable.h>
+
+#include "mapping.h"
+
+#define MAPPING_GRACE_PERIOD 2000
+
+struct mapping_ctx {
+ struct xarray xarray;
+ DECLARE_HASHTABLE(ht, 8);
+ struct mutex lock; /* Guards hashtable and xarray */
+ unsigned long max_id;
+ size_t data_size;
+ bool delayed_removal;
+ struct delayed_work dwork;
+ struct list_head pending_list;
+ spinlock_t pending_list_lock; /* Guards pending list */
+};
+
+struct mapping_item {
+ struct rcu_head rcu;
+ struct list_head list;
+ unsigned long timeout;
+ struct hlist_node node;
+ int cnt;
+ u32 id;
+ char data[];
+};
+
+int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id)
+{
+ struct mapping_item *mi;
+ int err = -ENOMEM;
+ u32 hash_key;
+
+ mutex_lock(&ctx->lock);
+
+ hash_key = jhash(data, ctx->data_size, 0);
+ hash_for_each_possible(ctx->ht, mi, node, hash_key) {
+ if (!memcmp(data, mi->data, ctx->data_size))
+ goto attach;
+ }
+
+ mi = kzalloc(sizeof(*mi) + ctx->data_size, GFP_KERNEL);
+ if (!mi)
+ goto err_alloc;
+
+ memcpy(mi->data, data, ctx->data_size);
+ hash_add(ctx->ht, &mi->node, hash_key);
+
+ err = xa_alloc(&ctx->xarray, &mi->id, mi, XA_LIMIT(1, ctx->max_id),
+ GFP_KERNEL);
+ if (err)
+ goto err_assign;
+attach:
+ ++mi->cnt;
+ *id = mi->id;
+
+ mutex_unlock(&ctx->lock);
+
+ return 0;
+
+err_assign:
+ hash_del(&mi->node);
+ kfree(mi);
+err_alloc:
+ mutex_unlock(&ctx->lock);
+
+ return err;
+}
+
+static void mapping_remove_and_free(struct mapping_ctx *ctx,
+ struct mapping_item *mi)
+{
+ xa_erase(&ctx->xarray, mi->id);
+ kfree_rcu(mi, rcu);
+}
+
+static void mapping_free_item(struct mapping_ctx *ctx,
+ struct mapping_item *mi)
+{
+ if (!ctx->delayed_removal) {
+ mapping_remove_and_free(ctx, mi);
+ return;
+ }
+
+ mi->timeout = jiffies + msecs_to_jiffies(MAPPING_GRACE_PERIOD);
+
+ spin_lock(&ctx->pending_list_lock);
+ list_add_tail(&mi->list, &ctx->pending_list);
+ spin_unlock(&ctx->pending_list_lock);
+
+ schedule_delayed_work(&ctx->dwork, MAPPING_GRACE_PERIOD);
+}
+
+int mapping_remove(struct mapping_ctx *ctx, u32 id)
+{
+ unsigned long index = id;
+ struct mapping_item *mi;
+ int err = -ENOENT;
+
+ mutex_lock(&ctx->lock);
+ mi = xa_load(&ctx->xarray, index);
+ if (!mi)
+ goto out;
+ err = 0;
+
+ if (--mi->cnt > 0)
+ goto out;
+
+ hash_del(&mi->node);
+ mapping_free_item(ctx, mi);
+out:
+ mutex_unlock(&ctx->lock);
+
+ return err;
+}
+
+int mapping_find(struct mapping_ctx *ctx, u32 id, void *data)
+{
+ unsigned long index = id;
+ struct mapping_item *mi;
+ int err = -ENOENT;
+
+ rcu_read_lock();
+ mi = xa_load(&ctx->xarray, index);
+ if (!mi)
+ goto err_find;
+
+ memcpy(data, mi->data, ctx->data_size);
+ err = 0;
+
+err_find:
+ rcu_read_unlock();
+ return err;
+}
+
+static void
+mapping_remove_and_free_list(struct mapping_ctx *ctx, struct list_head *list)
+{
+ struct mapping_item *mi;
+
+ list_for_each_entry(mi, list, list)
+ mapping_remove_and_free(ctx, mi);
+}
+
+static void mapping_work_handler(struct work_struct *work)
+{
+ unsigned long min_timeout = 0, now = jiffies;
+ struct mapping_item *mi, *next;
+ LIST_HEAD(pending_items);
+ struct mapping_ctx *ctx;
+
+ ctx = container_of(work, struct mapping_ctx, dwork.work);
+
+ spin_lock(&ctx->pending_list_lock);
+ list_for_each_entry_safe(mi, next, &ctx->pending_list, list) {
+ if (time_after(now, mi->timeout))
+ list_move(&mi->list, &pending_items);
+ else if (!min_timeout ||
+ time_before(mi->timeout, min_timeout))
+ min_timeout = mi->timeout;
+ }
+ spin_unlock(&ctx->pending_list_lock);
+
+ mapping_remove_and_free_list(ctx, &pending_items);
+
+ if (min_timeout)
+ schedule_delayed_work(&ctx->dwork, abs(min_timeout - now));
+}
+
+static void mapping_flush_work(struct mapping_ctx *ctx)
+{
+ if (!ctx->delayed_removal)
+ return;
+
+ cancel_delayed_work_sync(&ctx->dwork);
+ mapping_remove_and_free_list(ctx, &ctx->pending_list);
+}
+
+struct mapping_ctx *
+mapping_create(size_t data_size, u32 max_id, bool delayed_removal)
+{
+ struct mapping_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->max_id = max_id ? max_id : UINT_MAX;
+ ctx->data_size = data_size;
+
+ if (delayed_removal) {
+ INIT_DELAYED_WORK(&ctx->dwork, mapping_work_handler);
+ INIT_LIST_HEAD(&ctx->pending_list);
+ spin_lock_init(&ctx->pending_list_lock);
+ ctx->delayed_removal = true;
+ }
+
+ mutex_init(&ctx->lock);
+ xa_init_flags(&ctx->xarray, XA_FLAGS_ALLOC1);
+
+ return ctx;
+}
+
+void mapping_destroy(struct mapping_ctx *ctx)
+{
+ mapping_flush_work(ctx);
+ xa_destroy(&ctx->xarray);
+ mutex_destroy(&ctx->lock);
+
+ kfree(ctx);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
new file mode 100644
index 000000000000..285525cc5470
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies */
+
+#ifndef __MLX5_MAPPING_H__
+#define __MLX5_MAPPING_H__
+
+struct mapping_ctx;
+
+int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id);
+int mapping_remove(struct mapping_ctx *ctx, u32 id);
+int mapping_find(struct mapping_ctx *ctx, u32 id, void *data);
+
+/* mapping uses an xarray to map data to ids in add(), and for find().
+ * For locking, it uses a internal xarray spin lock for add()/remove(),
+ * find() uses rcu_read_lock().
+ * Choosing delayed_removal postpones the removal of a previously mapped
+ * id by MAPPING_GRACE_PERIOD milliseconds.
+ * This is to avoid races against hardware, where we mark the packet in
+ * hardware with a previous id, and quick remove() and add() reusing the same
+ * previous id. Then find() will get the new mapping instead of the old
+ * which was used to mark the packet.
+ */
+struct mapping_ctx *mapping_create(size_t data_size, u32 max_id,
+ bool delayed_removal);
+void mapping_destroy(struct mapping_ctx *ctx);
+
+#endif /* __MLX5_MAPPING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index fce6eccdcf8b..2c4a670c8ffd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -343,64 +343,76 @@ out:
return err;
}
-static u32 fec_supported_speeds[] = {
- 10000,
- 40000,
- 25000,
- 50000,
- 56000,
- 100000
+enum mlx5e_fec_supported_link_mode {
+ MLX5E_FEC_SUPPORTED_LINK_MODES_10G_40G,
+ MLX5E_FEC_SUPPORTED_LINK_MODES_25G,
+ MLX5E_FEC_SUPPORTED_LINK_MODES_50G,
+ MLX5E_FEC_SUPPORTED_LINK_MODES_56G,
+ MLX5E_FEC_SUPPORTED_LINK_MODES_100G,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X,
+ MLX5E_MAX_FEC_SUPPORTED_LINK_MODE,
};
-#define MLX5E_FEC_SUPPORTED_SPEEDS ARRAY_SIZE(fec_supported_speeds)
+#define MLX5E_FEC_FIRST_50G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X
+
+#define MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, policy, write, link) \
+ do { \
+ u16 *_policy = &(policy); \
+ u32 *_buf = buf; \
+ \
+ if (write) \
+ MLX5_SET(pplm_reg, _buf, fec_override_admin_##link, *_policy); \
+ else \
+ *_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
+ } while (0)
+
+#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
+ do { \
+ u16 *__policy = &(policy); \
+ bool _write = (write); \
+ \
+ if (_write && *__policy) \
+ *__policy = find_first_bit((u_long *)__policy, \
+ sizeof(u16) * BITS_PER_BYTE);\
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
+ if (!_write && *__policy) \
+ *__policy = 1 << *__policy; \
+ } while (0)
/* get/set FEC admin field for a given speed */
-static int mlx5e_fec_admin_field(u32 *pplm,
- u8 *fec_policy,
- bool write,
- u32 speed)
+static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
+ enum mlx5e_fec_supported_link_mode link_mode)
{
- switch (speed) {
- case 10000:
- case 40000:
- if (!write)
- *fec_policy = MLX5_GET(pplm_reg, pplm,
- fec_override_admin_10g_40g);
- else
- MLX5_SET(pplm_reg, pplm,
- fec_override_admin_10g_40g, *fec_policy);
+ switch (link_mode) {
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_10G_40G:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 10g_40g);
break;
- case 25000:
- if (!write)
- *fec_policy = MLX5_GET(pplm_reg, pplm,
- fec_override_admin_25g);
- else
- MLX5_SET(pplm_reg, pplm,
- fec_override_admin_25g, *fec_policy);
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_25G:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 25g);
break;
- case 50000:
- if (!write)
- *fec_policy = MLX5_GET(pplm_reg, pplm,
- fec_override_admin_50g);
- else
- MLX5_SET(pplm_reg, pplm,
- fec_override_admin_50g, *fec_policy);
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_50G:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 50g);
break;
- case 56000:
- if (!write)
- *fec_policy = MLX5_GET(pplm_reg, pplm,
- fec_override_admin_56g);
- else
- MLX5_SET(pplm_reg, pplm,
- fec_override_admin_56g, *fec_policy);
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_56G:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 56g);
break;
- case 100000:
- if (!write)
- *fec_policy = MLX5_GET(pplm_reg, pplm,
- fec_override_admin_100g);
- else
- MLX5_SET(pplm_reg, pplm,
- fec_override_admin_100g, *fec_policy);
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_100G:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
+ MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 50g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
+ MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 100g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
+ MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 200g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
+ MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 400g_8x);
break;
default:
return -EINVAL;
@@ -408,32 +420,40 @@ static int mlx5e_fec_admin_field(u32 *pplm,
return 0;
}
+#define MLX5E_GET_FEC_OVERRIDE_CAP(buf, link) \
+ MLX5_GET(pplm_reg, buf, fec_override_cap_##link)
+
/* returns FEC capabilities for a given speed */
-static int mlx5e_get_fec_cap_field(u32 *pplm,
- u8 *fec_cap,
- u32 speed)
+static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap,
+ enum mlx5e_fec_supported_link_mode link_mode)
{
- switch (speed) {
- case 10000:
- case 40000:
- *fec_cap = MLX5_GET(pplm_reg, pplm,
- fec_override_cap_10g_40g);
+ switch (link_mode) {
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_10G_40G:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 10g_40g);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_25G:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 25g);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_50G:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 50g);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_56G:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 56g);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODES_100G:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 100g);
break;
- case 25000:
- *fec_cap = MLX5_GET(pplm_reg, pplm,
- fec_override_cap_25g);
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 50g_1x);
break;
- case 50000:
- *fec_cap = MLX5_GET(pplm_reg, pplm,
- fec_override_cap_50g);
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 100g_2x);
break;
- case 56000:
- *fec_cap = MLX5_GET(pplm_reg, pplm,
- fec_override_cap_56g);
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 200g_4x);
break;
- case 100000:
- *fec_cap = MLX5_GET(pplm_reg, pplm,
- fec_override_cap_100g);
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_8x);
break;
default:
return -EINVAL;
@@ -441,13 +461,14 @@ static int mlx5e_get_fec_cap_field(u32 *pplm,
return 0;
}
-int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps)
+bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy)
{
+ bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
- u32 current_fec_speed;
int err;
+ int i;
if (!MLX5_CAP_GEN(dev, pcam_reg))
return -EOPNOTSUPP;
@@ -458,23 +479,30 @@ int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps)
MLX5_SET(pplm_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
if (err)
- return err;
+ return false;
- err = mlx5e_port_linkspeed(dev, &current_fec_speed);
- if (err)
- return err;
+ for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) {
+ u16 fec_caps;
- return mlx5e_get_fec_cap_field(out, fec_caps, current_fec_speed);
+ if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ break;
+
+ mlx5e_get_fec_cap_field(out, &fec_caps, i);
+ if (fec_caps & fec_policy)
+ return true;
+ }
+ return false;
}
int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
- u8 *fec_configured_mode)
+ u16 *fec_configured_mode)
{
+ bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
- u32 link_speed;
int err;
+ int i;
if (!MLX5_CAP_GEN(dev, pcam_reg))
return -EOPNOTSUPP;
@@ -490,24 +518,28 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
*fec_mode_active = MLX5_GET(pplm_reg, out, fec_mode_active);
if (!fec_configured_mode)
- return 0;
+ goto out;
- err = mlx5e_port_linkspeed(dev, &link_speed);
- if (err)
- return err;
+ *fec_configured_mode = 0;
+ for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) {
+ if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ break;
- return mlx5e_fec_admin_field(out, fec_configured_mode, 0, link_speed);
+ mlx5e_fec_admin_field(out, fec_configured_mode, 0, i);
+ if (*fec_configured_mode != 0)
+ goto out;
+ }
+out:
+ return 0;
}
-int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
+int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy)
{
- u8 fec_policy_nofec = BIT(MLX5E_FEC_NOFEC);
- bool fec_mode_not_supp_in_speed = false;
+ bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
- u8 fec_policy_auto = 0;
- u8 fec_caps = 0;
+ u16 fec_policy_auto = 0;
int err;
int i;
@@ -517,6 +549,9 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
if (!MLX5_CAP_PCAM_REG(dev, pplm))
return -EOPNOTSUPP;
+ if (fec_policy >= (1 << MLX5E_FEC_LLRS_272_257_1) && !fec_50g_per_lane)
+ return -EOPNOTSUPP;
+
MLX5_SET(pplm_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
if (err)
@@ -524,25 +559,31 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
MLX5_SET(pplm_reg, out, local_port, 1);
- for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS; i++) {
- mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]);
- /* policy supported for link speed, or policy is auto */
- if (fec_caps & fec_policy || fec_policy == fec_policy_auto) {
- mlx5e_fec_admin_field(out, &fec_policy, 1,
- fec_supported_speeds[i]);
- } else {
- /* turn off FEC if supported. Else, leave it the same */
- if (fec_caps & fec_policy_nofec)
- mlx5e_fec_admin_field(out, &fec_policy_nofec, 1,
- fec_supported_speeds[i]);
- fec_mode_not_supp_in_speed = true;
- }
- }
+ for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) {
+ u16 conf_fec = fec_policy;
+ u16 fec_caps = 0;
+
+ if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ break;
- if (fec_mode_not_supp_in_speed)
- mlx5_core_dbg(dev,
- "FEC policy 0x%x is not supported for some speeds",
- fec_policy);
+ /* RS fec in ethtool is mapped to MLX5E_FEC_RS_528_514
+ * to link modes up to 25G per lane and to
+ * MLX5E_FEC_RS_544_514 in the new link modes based on
+ * 50 G per lane
+ */
+ if (conf_fec == (1 << MLX5E_FEC_RS_528_514) &&
+ i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE)
+ conf_fec = (1 << MLX5E_FEC_RS_544_514);
+
+ mlx5e_get_fec_cap_field(out, &fec_caps, i);
+
+ /* policy supported for link speed */
+ if (fec_caps & conf_fec)
+ mlx5e_fec_admin_field(out, &conf_fec, 1, i);
+ else
+ /* set FEC to auto*/
+ mlx5e_fec_admin_field(out, &fec_policy_auto, 1, i);
+ }
return mlx5_core_access_reg(dev, out, sz, out, sz, MLX5_REG_PPLM, 0, 1);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index 4a7f4497692b..a2ddd446dd59 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -60,15 +60,17 @@ int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
-int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps);
+bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy);
int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
- u8 *fec_configured_mode);
-int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy);
+ u16 *fec_configured_mode);
+int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy);
enum {
MLX5E_FEC_NOFEC,
MLX5E_FEC_FIRECODE,
MLX5E_FEC_RS_528_514,
+ MLX5E_FEC_RS_544_514 = 7,
+ MLX5E_FEC_LLRS_272_257_1 = 9,
};
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index a01e2de2488f..c209579fc213 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -102,19 +102,6 @@ out:
return err;
}
-void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
-{
- struct mlx5e_priv *priv = icosq->channel->priv;
- char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
- struct mlx5e_err_ctx err_ctx = {};
-
- err_ctx.ctx = icosq;
- err_ctx.recover = mlx5e_rx_reporter_err_icosq_cqe_recover;
- sprintf(err_str, "ERR CQE on ICOSQ: 0x%x", icosq->sqn);
-
- mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
-}
-
static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
{
struct net_device *dev = rq->netdev;
@@ -171,19 +158,6 @@ out:
return err;
}
-void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
-{
- struct mlx5e_priv *priv = rq->channel->priv;
- char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
- struct mlx5e_err_ctx err_ctx = {};
-
- err_ctx.ctx = rq;
- err_ctx.recover = mlx5e_rx_reporter_err_rq_cqe_recover;
- sprintf(err_str, "ERR CQE on RQ: 0x%x", rq->rqn);
-
- mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
-}
-
static int mlx5e_rx_reporter_timeout_recover(void *ctx)
{
struct mlx5e_icosq *icosq;
@@ -201,21 +175,6 @@ static int mlx5e_rx_reporter_timeout_recover(void *ctx)
return err;
}
-void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
-{
- struct mlx5e_icosq *icosq = &rq->channel->icosq;
- struct mlx5e_priv *priv = rq->channel->priv;
- char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
- struct mlx5e_err_ctx err_ctx = {};
-
- err_ctx.ctx = rq;
- err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
- sprintf(err_str, "RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x\n",
- icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn);
-
- mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
-}
-
static int mlx5e_rx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
{
return err_ctx->recover(err_ctx->ctx);
@@ -371,10 +330,235 @@ unlock:
return err;
}
+static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ void *ctx)
+{
+ struct mlx5e_txqsq *icosq = ctx;
+ struct mlx5_rsc_key key = {};
+ int err;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "SX Slice");
+ if (err)
+ return err;
+
+ key.size = PAGE_SIZE;
+ key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "ICOSQ");
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "QPC");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
+ key.index1 = icosq->sqn;
+ key.num_of_obj1 = 1;
+
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "send_buff");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
+ key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
+
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return mlx5e_reporter_named_obj_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ void *ctx)
+{
+ struct mlx5_rsc_key key = {};
+ struct mlx5e_rq *rq = ctx;
+ int err;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "RX Slice");
+ if (err)
+ return err;
+
+ key.size = PAGE_SIZE;
+ key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "RQ");
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "QPC");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
+ key.index1 = rq->rqn;
+ key.num_of_obj1 = 1;
+
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "receive_buff");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_RCV_BUFF;
+ key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return mlx5e_reporter_named_obj_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5_rsc_key key = {};
+ int i, err;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "RX Slice");
+ if (err)
+ return err;
+
+ key.size = PAGE_SIZE;
+ key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
+ if (err)
+ return err;
+
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
+
+ err = mlx5e_health_queue_dump(priv, fmsg, rq->rqn, "RQ");
+ if (err)
+ return err;
+ }
+
+ return devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
+ struct mlx5e_err_ctx *err_ctx,
+ struct devlink_fmsg *fmsg)
+{
+ return err_ctx->dump(priv, fmsg, err_ctx->ctx);
+}
+
+static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *context,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_err_ctx *err_ctx = context;
+
+ return err_ctx ? mlx5e_rx_reporter_dump_from_ctx(priv, err_ctx, fmsg) :
+ mlx5e_rx_reporter_dump_all_rqs(priv, fmsg);
+}
+
+void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
+{
+ struct mlx5e_icosq *icosq = &rq->channel->icosq;
+ struct mlx5e_priv *priv = rq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = rq;
+ err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
+ err_ctx.dump = mlx5e_rx_reporter_dump_rq;
+ snprintf(err_str, sizeof(err_str),
+ "RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x",
+ icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
+void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
+{
+ struct mlx5e_priv *priv = rq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = rq;
+ err_ctx.recover = mlx5e_rx_reporter_err_rq_cqe_recover;
+ err_ctx.dump = mlx5e_rx_reporter_dump_rq;
+ snprintf(err_str, sizeof(err_str), "ERR CQE on RQ: 0x%x", rq->rqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
+void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
+{
+ struct mlx5e_priv *priv = icosq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = icosq;
+ err_ctx.recover = mlx5e_rx_reporter_err_icosq_cqe_recover;
+ err_ctx.dump = mlx5e_rx_reporter_dump_icosq;
+ snprintf(err_str, sizeof(err_str), "ERR CQE on ICOSQ: 0x%x", icosq->sqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
.name = "rx",
.recover = mlx5e_rx_reporter_recover,
.diagnose = mlx5e_rx_reporter_diagnose,
+ .dump = mlx5e_rx_reporter_dump,
};
#define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
@@ -387,7 +571,7 @@ int mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
reporter = devlink_health_reporter_create(devlink,
&mlx5_rx_reporter_ops,
MLX5E_REPORTER_RX_GRACEFUL_PERIOD,
- true, priv);
+ priv);
if (IS_ERR(reporter)) {
netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n",
PTR_ERR(reporter));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index b468549e96ff..9805fc085512 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -82,19 +82,6 @@ out:
return err;
}
-void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
-{
- struct mlx5e_priv *priv = sq->channel->priv;
- char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
- struct mlx5e_err_ctx err_ctx = {0};
-
- err_ctx.ctx = sq;
- err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
- sprintf(err_str, "ERR CQE on SQ: 0x%x", sq->sqn);
-
- mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
-}
-
static int mlx5e_tx_reporter_timeout_recover(void *ctx)
{
struct mlx5_eq_comp *eq;
@@ -110,22 +97,6 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
return err;
}
-int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
-{
- struct mlx5e_priv *priv = sq->channel->priv;
- char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
- struct mlx5e_err_ctx err_ctx;
-
- err_ctx.ctx = sq;
- err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
- sprintf(err_str,
- "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
- sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
- jiffies_to_usecs(jiffies - sq->txq->trans_start));
-
- return mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
-}
-
/* state lock cannot be grabbed within this function.
* It can cause a dead lock or a read-after-free.
*/
@@ -275,10 +246,162 @@ unlock:
return err;
}
+static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ void *ctx)
+{
+ struct mlx5_rsc_key key = {};
+ struct mlx5e_txqsq *sq = ctx;
+ int err;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "SX Slice");
+ if (err)
+ return err;
+
+ key.size = PAGE_SIZE;
+ key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "SQ");
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "QPC");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
+ key.index1 = sq->sqn;
+ key.num_of_obj1 = 1;
+
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "send_buff");
+ if (err)
+ return err;
+
+ key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
+ key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return mlx5e_reporter_named_obj_nest_end(fmsg);
+}
+
+static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5_rsc_key key = {};
+ int i, tc, err;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "SX Slice");
+ if (err)
+ return err;
+
+ key.size = PAGE_SIZE;
+ key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
+ err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
+ if (err)
+ return err;
+
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_channel *c = priv->channels.c[i];
+
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ struct mlx5e_txqsq *sq = &c->sq[tc];
+
+ err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ");
+ if (err)
+ return err;
+ }
+ }
+ return devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static int mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
+ struct mlx5e_err_ctx *err_ctx,
+ struct devlink_fmsg *fmsg)
+{
+ return err_ctx->dump(priv, fmsg, err_ctx->ctx);
+}
+
+static int mlx5e_tx_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *context,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_err_ctx *err_ctx = context;
+
+ return err_ctx ? mlx5e_tx_reporter_dump_from_ctx(priv, err_ctx, fmsg) :
+ mlx5e_tx_reporter_dump_all_sqs(priv, fmsg);
+}
+
+void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_priv *priv = sq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = sq;
+ err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
+ err_ctx.dump = mlx5e_tx_reporter_dump_sq;
+ snprintf(err_str, sizeof(err_str), "ERR CQE on SQ: 0x%x", sq->sqn);
+
+ mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
+}
+
+int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_priv *priv = sq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = sq;
+ err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
+ err_ctx.dump = mlx5e_tx_reporter_dump_sq;
+ snprintf(err_str, sizeof(err_str),
+ "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
+ sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
+ jiffies_to_usecs(jiffies - sq->txq->trans_start));
+
+ return mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
+}
+
static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
.name = "tx",
.recover = mlx5e_tx_reporter_recover,
.diagnose = mlx5e_tx_reporter_diagnose,
+ .dump = mlx5e_tx_reporter_dump,
};
#define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
@@ -293,7 +416,7 @@ int mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
reporter =
devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops,
MLX5_REPORTER_TX_GRACEFUL_PERIOD,
- true, priv);
+ priv);
if (IS_ERR(reporter)) {
netdev_warn(priv->netdev,
"Failed to create tx reporter, err = %ld\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
new file mode 100644
index 000000000000..ad3e3a65d403
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -0,0 +1,1369 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_labels.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <uapi/linux/tc_act/tc_pedit.h>
+#include <net/tc_act/tc_ct.h>
+#include <net/flow_offload.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <linux/workqueue.h>
+
+#include "esw/chains.h"
+#include "en/tc_ct.h"
+#include "en.h"
+#include "en_tc.h"
+#include "en_rep.h"
+
+#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen * 8)
+#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
+#define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
+#define MLX5_CT_STATE_TRK_BIT BIT(2)
+
+#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8)
+#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
+#define MLX5_FTE_ID_MASK MLX5_FTE_ID_MAX
+
+#define ct_dbg(fmt, args...)\
+ netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
+
+struct mlx5_tc_ct_priv {
+ struct mlx5_eswitch *esw;
+ const struct net_device *netdev;
+ struct idr fte_ids;
+ struct idr tuple_ids;
+ struct rhashtable zone_ht;
+ struct mlx5_flow_table *ct;
+ struct mlx5_flow_table *ct_nat;
+ struct mlx5_flow_table *post_ct;
+ struct mutex control_lock; /* guards parallel adds/dels */
+};
+
+struct mlx5_ct_flow {
+ struct mlx5_esw_flow_attr pre_ct_attr;
+ struct mlx5_esw_flow_attr post_ct_attr;
+ struct mlx5_flow_handle *pre_ct_rule;
+ struct mlx5_flow_handle *post_ct_rule;
+ struct mlx5_ct_ft *ft;
+ u32 fte_id;
+ u32 chain_mapping;
+};
+
+struct mlx5_ct_zone_rule {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_esw_flow_attr attr;
+ int tupleid;
+ bool nat;
+};
+
+struct mlx5_ct_ft {
+ struct rhash_head node;
+ u16 zone;
+ refcount_t refcount;
+ struct nf_flowtable *nf_ft;
+ struct mlx5_tc_ct_priv *ct_priv;
+ struct rhashtable ct_entries_ht;
+ struct list_head ct_entries_list;
+};
+
+struct mlx5_ct_entry {
+ struct list_head list;
+ u16 zone;
+ struct rhash_head node;
+ struct flow_rule *flow_rule;
+ struct mlx5_fc *counter;
+ unsigned long lastuse;
+ unsigned long cookie;
+ unsigned long restore_cookie;
+ struct mlx5_ct_zone_rule zone_rules[2];
+};
+
+static const struct rhashtable_params cts_ht_params = {
+ .head_offset = offsetof(struct mlx5_ct_entry, node),
+ .key_offset = offsetof(struct mlx5_ct_entry, cookie),
+ .key_len = sizeof(((struct mlx5_ct_entry *)0)->cookie),
+ .automatic_shrinking = true,
+ .min_size = 16 * 1024,
+};
+
+static const struct rhashtable_params zone_params = {
+ .head_offset = offsetof(struct mlx5_ct_ft, node),
+ .key_offset = offsetof(struct mlx5_ct_ft, zone),
+ .key_len = sizeof(((struct mlx5_ct_ft *)0)->zone),
+ .automatic_shrinking = true,
+};
+
+static struct mlx5_tc_ct_priv *
+mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ return uplink_priv->ct_priv;
+}
+
+static int
+mlx5_tc_ct_set_tuple_match(struct mlx5_flow_spec *spec,
+ struct flow_rule *rule)
+{
+ void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
+ u16 addr_type = 0;
+ u8 ip_proto = 0;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
+ ntohs(match.mask->n_proto));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ ntohs(match.key->n_proto));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
+ match.mask->ip_proto);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ match.key->ip_proto);
+
+ ip_proto = match.key->ip_proto;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &match.mask->src, sizeof(match.mask->src));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &match.key->src, sizeof(match.key->src));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &match.mask->dst, sizeof(match.mask->dst));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &match.key->dst, sizeof(match.key->dst));
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &match.mask->src, sizeof(match.mask->src));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &match.key->src, sizeof(match.key->src));
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &match.mask->dst, sizeof(match.mask->dst));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &match.key->dst, sizeof(match.key->dst));
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ tcp_sport, ntohs(match.mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ tcp_sport, ntohs(match.key->src));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ tcp_dport, ntohs(match.mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ tcp_dport, ntohs(match.key->dst));
+ break;
+
+ case IPPROTO_UDP:
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ udp_sport, ntohs(match.mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ udp_sport, ntohs(match.key->src));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ udp_dport, ntohs(match.mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ udp_dport, ntohs(match.key->dst));
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp match;
+
+ flow_rule_match_tcp(rule, &match);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
+ ntohs(match.mask->flags));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
+ ntohs(match.key->flags));
+ }
+
+ return 0;
+}
+
+static void
+mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_ct_entry *entry,
+ bool nat)
+{
+ struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
+ struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
+ struct mlx5_eswitch *esw = ct_priv->esw;
+
+ ct_dbg("Deleting ct entry rule in zone %d", entry->zone);
+
+ mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
+ mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
+ idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid);
+}
+
+static void
+mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_ct_entry *entry)
+{
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+
+ mlx5_fc_destroy(ct_priv->esw->dev, entry->counter);
+}
+
+static struct flow_action_entry *
+mlx5_tc_ct_get_ct_metadata_action(struct flow_rule *flow_rule)
+{
+ struct flow_action *flow_action = &flow_rule->action;
+ struct flow_action_entry *act;
+ int i;
+
+ flow_action_for_each(i, act, flow_action) {
+ if (act->id == FLOW_ACTION_CT_METADATA)
+ return act;
+ }
+
+ return NULL;
+}
+
+static int
+mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts,
+ u8 ct_state,
+ u32 mark,
+ u32 label,
+ u32 tupleid)
+{
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ int err;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ CTSTATE_TO_REG, ct_state);
+ if (err)
+ return err;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ MARK_TO_REG, mark);
+ if (err)
+ return err;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ LABELS_TO_REG, label);
+ if (err)
+ return err;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ TUPLEID_TO_REG, tupleid);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry *act,
+ char *modact)
+{
+ u32 offset = act->mangle.offset, field;
+
+ switch (act->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ MLX5_SET(set_action_in, modact, length, 0);
+ if (offset == offsetof(struct iphdr, saddr))
+ field = MLX5_ACTION_IN_FIELD_OUT_SIPV4;
+ else if (offset == offsetof(struct iphdr, daddr))
+ field = MLX5_ACTION_IN_FIELD_OUT_DIPV4;
+ else
+ return -EOPNOTSUPP;
+ break;
+
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ MLX5_SET(set_action_in, modact, length, 0);
+ if (offset == offsetof(struct ipv6hdr, saddr))
+ field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0;
+ else if (offset == offsetof(struct ipv6hdr, saddr) + 4)
+ field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32;
+ else if (offset == offsetof(struct ipv6hdr, saddr) + 8)
+ field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64;
+ else if (offset == offsetof(struct ipv6hdr, saddr) + 12)
+ field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96;
+ else if (offset == offsetof(struct ipv6hdr, daddr))
+ field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0;
+ else if (offset == offsetof(struct ipv6hdr, daddr) + 4)
+ field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32;
+ else if (offset == offsetof(struct ipv6hdr, daddr) + 8)
+ field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64;
+ else if (offset == offsetof(struct ipv6hdr, daddr) + 12)
+ field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96;
+ else
+ return -EOPNOTSUPP;
+ break;
+
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ MLX5_SET(set_action_in, modact, length, 16);
+ if (offset == offsetof(struct tcphdr, source))
+ field = MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT;
+ else if (offset == offsetof(struct tcphdr, dest))
+ field = MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT;
+ else
+ return -EOPNOTSUPP;
+ break;
+
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ MLX5_SET(set_action_in, modact, length, 16);
+ if (offset == offsetof(struct udphdr, source))
+ field = MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT;
+ else if (offset == offsetof(struct udphdr, dest))
+ field = MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT;
+ else
+ return -EOPNOTSUPP;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, modact, offset, 0);
+ MLX5_SET(set_action_in, modact, field, field);
+ MLX5_SET(set_action_in, modact, data, act->mangle.val);
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts)
+{
+ struct flow_action *flow_action = &flow_rule->action;
+ struct mlx5_core_dev *mdev = ct_priv->esw->dev;
+ struct flow_action_entry *act;
+ size_t action_size;
+ char *modact;
+ int err, i;
+
+ action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE: {
+ err = alloc_mod_hdr_actions(mdev,
+ MLX5_FLOW_NAMESPACE_FDB,
+ mod_acts);
+ if (err)
+ return err;
+
+ modact = mod_acts->actions +
+ mod_acts->num_actions * action_size;
+
+ err = mlx5_tc_ct_parse_mangle_to_mod_act(act, modact);
+ if (err)
+ return err;
+
+ mod_acts->num_actions++;
+ }
+ break;
+
+ case FLOW_ACTION_CT_METADATA:
+ /* Handled earlier */
+ continue;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_esw_flow_attr *attr,
+ struct flow_rule *flow_rule,
+ u32 tupleid,
+ bool nat)
+{
+ struct mlx5e_tc_mod_hdr_acts mod_acts = {};
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_modify_hdr *mod_hdr;
+ struct flow_action_entry *meta;
+ int err;
+
+ meta = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
+ if (!meta)
+ return -EOPNOTSUPP;
+
+ if (meta->ct_metadata.labels[1] ||
+ meta->ct_metadata.labels[2] ||
+ meta->ct_metadata.labels[3]) {
+ ct_dbg("Failed to offload ct entry due to unsupported label");
+ return -EOPNOTSUPP;
+ }
+
+ if (nat) {
+ err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule,
+ &mod_acts);
+ if (err)
+ goto err_mapping;
+ }
+
+ err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts,
+ (MLX5_CT_STATE_ESTABLISHED_BIT |
+ MLX5_CT_STATE_TRK_BIT),
+ meta->ct_metadata.mark,
+ meta->ct_metadata.labels[0],
+ tupleid);
+ if (err)
+ goto err_mapping;
+
+ mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB,
+ mod_acts.num_actions,
+ mod_acts.actions);
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ goto err_mapping;
+ }
+ attr->modify_hdr = mod_hdr;
+
+ dealloc_mod_hdr_actions(&mod_acts);
+ return 0;
+
+err_mapping:
+ dealloc_mod_hdr_actions(&mod_acts);
+ return err;
+}
+
+static int
+mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ bool nat)
+{
+ struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
+ struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_flow_spec *spec = NULL;
+ u32 tupleid = 1;
+ int err;
+
+ zone_rule->nat = nat;
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* Get tuple unique id */
+ err = idr_alloc_u32(&ct_priv->tuple_ids, zone_rule, &tupleid,
+ TUPLE_ID_MAX, GFP_KERNEL);
+ if (err) {
+ netdev_warn(ct_priv->netdev,
+ "Failed to allocate tuple id, err: %d\n", err);
+ goto err_idr_alloc;
+ }
+ zone_rule->tupleid = tupleid;
+
+ err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
+ tupleid, nat);
+ if (err) {
+ ct_dbg("Failed to create ct entry mod hdr");
+ goto err_mod_hdr;
+ }
+
+ attr->action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->dest_chain = 0;
+ attr->dest_ft = ct_priv->post_ct;
+ attr->fdb = nat ? ct_priv->ct_nat : ct_priv->ct;
+ attr->outer_match_level = MLX5_MATCH_L4;
+ attr->counter = entry->counter;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
+
+ mlx5_tc_ct_set_tuple_match(spec, flow_rule);
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
+ entry->zone & MLX5_CT_ZONE_MASK,
+ MLX5_CT_ZONE_MASK);
+
+ zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+ if (IS_ERR(zone_rule->rule)) {
+ err = PTR_ERR(zone_rule->rule);
+ ct_dbg("Failed to add ct entry rule, nat: %d", nat);
+ goto err_rule;
+ }
+
+ kfree(spec);
+ ct_dbg("Offloaded ct entry rule in zone %d", entry->zone);
+
+ return 0;
+
+err_rule:
+ mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
+err_mod_hdr:
+ idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid);
+err_idr_alloc:
+ kfree(spec);
+ return err;
+}
+
+static int
+mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry)
+{
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ int err;
+
+ entry->counter = mlx5_fc_create(esw->dev, true);
+ if (IS_ERR(entry->counter)) {
+ err = PTR_ERR(entry->counter);
+ ct_dbg("Failed to create counter for ct entry");
+ return err;
+ }
+
+ err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false);
+ if (err)
+ goto err_orig;
+
+ err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true);
+ if (err)
+ goto err_nat;
+
+ return 0;
+
+err_nat:
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+err_orig:
+ mlx5_fc_destroy(esw->dev, entry->counter);
+ return err;
+}
+
+static int
+mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *flow_rule = flow_cls_offload_flow_rule(flow);
+ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+ struct flow_action_entry *meta_action;
+ unsigned long cookie = flow->cookie;
+ struct mlx5_ct_entry *entry;
+ int err;
+
+ meta_action = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
+ if (!meta_action)
+ return -EOPNOTSUPP;
+
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
+ cts_ht_params);
+ if (entry)
+ return 0;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->zone = ft->zone;
+ entry->flow_rule = flow_rule;
+ entry->cookie = flow->cookie;
+ entry->restore_cookie = meta_action->ct_metadata.cookie;
+
+ err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry);
+ if (err)
+ goto err_rules;
+
+ err = rhashtable_insert_fast(&ft->ct_entries_ht, &entry->node,
+ cts_ht_params);
+ if (err)
+ goto err_insert;
+
+ list_add(&entry->list, &ft->ct_entries_list);
+
+ return 0;
+
+err_insert:
+ mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+err_rules:
+ kfree(entry);
+ netdev_warn(ct_priv->netdev,
+ "Failed to offload ct entry, err: %d\n", err);
+ return err;
+}
+
+static int
+mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *flow)
+{
+ unsigned long cookie = flow->cookie;
+ struct mlx5_ct_entry *entry;
+
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
+ cts_ht_params);
+ if (!entry)
+ return -ENOENT;
+
+ mlx5_tc_ct_entry_del_rules(ft->ct_priv, entry);
+ WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht,
+ &entry->node,
+ cts_ht_params));
+ list_del(&entry->list);
+ kfree(entry);
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *f)
+{
+ unsigned long cookie = f->cookie;
+ struct mlx5_ct_entry *entry;
+ u64 lastuse, packets, bytes;
+
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
+ cts_ht_params);
+ if (!entry)
+ return -ENOENT;
+
+ mlx5_fc_query_cached(entry->counter, &bytes, &packets, &lastuse);
+ flow_stats_update(&f->stats, bytes, packets, lastuse,
+ FLOW_ACTION_HW_STATS_DELAYED);
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload *f = type_data;
+ struct mlx5_ct_ft *ft = cb_priv;
+
+ if (type != TC_SETUP_CLSFLOWER)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return mlx5_tc_ct_block_flow_offload_add(ft, f);
+ case FLOW_CLS_DESTROY:
+ return mlx5_tc_ct_block_flow_offload_del(ft, f);
+ case FLOW_CLS_STATS:
+ return mlx5_tc_ct_block_flow_offload_stats(ft, f);
+ default:
+ break;
+ };
+
+ return -EOPNOTSUPP;
+}
+
+int
+mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+ struct flow_dissector_key_ct *mask, *key;
+ bool trk, est, untrk, unest, new;
+ u32 ctstate = 0, ctstate_mask = 0;
+ u16 ct_state_on, ct_state_off;
+ u16 ct_state, ct_state_mask;
+ struct flow_match_ct match;
+
+ if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT))
+ return 0;
+
+ if (!ct_priv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload of ct matching isn't available");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_ct(f->rule, &match);
+
+ key = match.key;
+ mask = match.mask;
+
+ ct_state = key->ct_state;
+ ct_state_mask = mask->ct_state;
+
+ if (ct_state_mask & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
+ TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
+ TCA_FLOWER_KEY_CT_FLAGS_NEW)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "only ct_state trk, est and new are supported for offload");
+ return -EOPNOTSUPP;
+ }
+
+ if (mask->ct_labels[1] || mask->ct_labels[2] || mask->ct_labels[3]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "only lower 32bits of ct_labels are supported for offload");
+ return -EOPNOTSUPP;
+ }
+
+ ct_state_on = ct_state & ct_state_mask;
+ ct_state_off = (ct_state & ct_state_mask) ^ ct_state_mask;
+ trk = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
+ new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW;
+ est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
+ untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
+ unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
+
+ ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0;
+ ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
+ ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0;
+ ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
+
+ if (new) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "matching on ct_state +new isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (mask->ct_zone)
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
+ key->ct_zone, MLX5_CT_ZONE_MASK);
+ if (ctstate_mask)
+ mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG,
+ ctstate, ctstate_mask);
+ if (mask->ct_mark)
+ mlx5e_tc_match_to_reg_match(spec, MARK_TO_REG,
+ key->ct_mark, mask->ct_mark);
+ if (mask->ct_labels[0])
+ mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG,
+ key->ct_labels[0],
+ mask->ct_labels[0]);
+
+ return 0;
+}
+
+int
+mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
+ struct mlx5_esw_flow_attr *attr,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+
+ if (!ct_priv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload of ct action isn't available");
+ return -EOPNOTSUPP;
+ }
+
+ attr->ct_attr.zone = act->ct.zone;
+ attr->ct_attr.ct_action = act->ct.action;
+ attr->ct_attr.nf_ft = act->ct.flow_table;
+
+ return 0;
+}
+
+static struct mlx5_ct_ft *
+mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
+ struct nf_flowtable *nf_ft)
+{
+ struct mlx5_ct_ft *ft;
+ int err;
+
+ ft = rhashtable_lookup_fast(&ct_priv->zone_ht, &zone, zone_params);
+ if (ft) {
+ refcount_inc(&ft->refcount);
+ return ft;
+ }
+
+ ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+ if (!ft)
+ return ERR_PTR(-ENOMEM);
+
+ ft->zone = zone;
+ ft->nf_ft = nf_ft;
+ ft->ct_priv = ct_priv;
+ INIT_LIST_HEAD(&ft->ct_entries_list);
+ refcount_set(&ft->refcount, 1);
+
+ err = rhashtable_init(&ft->ct_entries_ht, &cts_ht_params);
+ if (err)
+ goto err_init;
+
+ err = rhashtable_insert_fast(&ct_priv->zone_ht, &ft->node,
+ zone_params);
+ if (err)
+ goto err_insert;
+
+ err = nf_flow_table_offload_add_cb(ft->nf_ft,
+ mlx5_tc_ct_block_flow_offload, ft);
+ if (err)
+ goto err_add_cb;
+
+ return ft;
+
+err_add_cb:
+ rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
+err_insert:
+ rhashtable_destroy(&ft->ct_entries_ht);
+err_init:
+ kfree(ft);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_tc_ct_flush_ft(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
+{
+ struct mlx5_ct_entry *entry;
+
+ list_for_each_entry(entry, &ft->ct_entries_list, list)
+ mlx5_tc_ct_entry_del_rules(ft->ct_priv, entry);
+}
+
+static void
+mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
+{
+ if (!refcount_dec_and_test(&ft->refcount))
+ return;
+
+ nf_flow_table_offload_del_cb(ft->nf_ft,
+ mlx5_tc_ct_block_flow_offload, ft);
+ mlx5_tc_ct_flush_ft(ct_priv, ft);
+ rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
+ rhashtable_destroy(&ft->ct_entries_ht);
+ kfree(ft);
+}
+
+/* We translate the tc filter with CT action to the following HW model:
+ *
+ * +-------------------+ +--------------------+ +--------------+
+ * + pre_ct (tc chain) +----->+ CT (nat or no nat) +--->+ post_ct +----->
+ * + original match + | + tuple + zone match + | + fte_id match + |
+ * +-------------------+ | +--------------------+ | +--------------+ |
+ * v v v
+ * set chain miss mapping set mark original
+ * set fte_id set label filter
+ * set zone set established actions
+ * set tunnel_id do nat (if needed)
+ * do decap
+ */
+static int
+__mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *orig_spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_handle **flow_rule)
+{
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+ bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
+ struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
+ struct mlx5_flow_spec *post_ct_spec = NULL;
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_esw_flow_attr *pre_ct_attr;
+ struct mlx5_modify_hdr *mod_hdr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_ct_flow *ct_flow;
+ int chain_mapping = 0, err;
+ struct mlx5_ct_ft *ft;
+ u32 fte_id = 1;
+
+ post_ct_spec = kzalloc(sizeof(*post_ct_spec), GFP_KERNEL);
+ ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
+ if (!post_ct_spec || !ct_flow) {
+ kfree(post_ct_spec);
+ kfree(ct_flow);
+ return -ENOMEM;
+ }
+
+ /* Register for CT established events */
+ ft = mlx5_tc_ct_add_ft_cb(ct_priv, attr->ct_attr.zone,
+ attr->ct_attr.nf_ft);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ ct_dbg("Failed to register to ft callback");
+ goto err_ft;
+ }
+ ct_flow->ft = ft;
+
+ err = idr_alloc_u32(&ct_priv->fte_ids, ct_flow, &fte_id,
+ MLX5_FTE_ID_MAX, GFP_KERNEL);
+ if (err) {
+ netdev_warn(priv->netdev,
+ "Failed to allocate fte id, err: %d\n", err);
+ goto err_idr;
+ }
+ ct_flow->fte_id = fte_id;
+
+ /* Base esw attributes of both rules on original rule attribute */
+ pre_ct_attr = &ct_flow->pre_ct_attr;
+ memcpy(pre_ct_attr, attr, sizeof(*attr));
+ memcpy(&ct_flow->post_ct_attr, attr, sizeof(*attr));
+
+ /* Modify the original rule's action to fwd and modify, leave decap */
+ pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ /* Write chain miss tag for miss in ct table as we
+ * don't go though all prios of this chain as normal tc rules
+ * miss.
+ */
+ err = mlx5_esw_chains_get_chain_mapping(esw, attr->chain,
+ &chain_mapping);
+ if (err) {
+ ct_dbg("Failed to get chain register mapping for chain");
+ goto err_get_chain;
+ }
+ ct_flow->chain_mapping = chain_mapping;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ CHAIN_TO_REG, chain_mapping);
+ if (err) {
+ ct_dbg("Failed to set chain register mapping");
+ goto err_mapping;
+ }
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts, ZONE_TO_REG,
+ attr->ct_attr.zone &
+ MLX5_CT_ZONE_MASK);
+ if (err) {
+ ct_dbg("Failed to set zone register mapping");
+ goto err_mapping;
+ }
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ FTEID_TO_REG, fte_id);
+ if (err) {
+ ct_dbg("Failed to set fte_id register mapping");
+ goto err_mapping;
+ }
+
+ /* If original flow is decap, we do it before going into ct table
+ * so add a rewrite for the tunnel match_id.
+ */
+ if ((pre_ct_attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
+ attr->chain == 0) {
+ u32 tun_id = mlx5e_tc_get_flow_tun_id(flow);
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ TUNNEL_TO_REG,
+ tun_id);
+ if (err) {
+ ct_dbg("Failed to set tunnel register mapping");
+ goto err_mapping;
+ }
+ }
+
+ mod_hdr = mlx5_modify_header_alloc(esw->dev,
+ MLX5_FLOW_NAMESPACE_FDB,
+ pre_mod_acts.num_actions,
+ pre_mod_acts.actions);
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ ct_dbg("Failed to create pre ct mod hdr");
+ goto err_mapping;
+ }
+ pre_ct_attr->modify_hdr = mod_hdr;
+
+ /* Post ct rule matches on fte_id and executes original rule's
+ * tc rule action
+ */
+ mlx5e_tc_match_to_reg_match(post_ct_spec, FTEID_TO_REG,
+ fte_id, MLX5_FTE_ID_MASK);
+
+ /* Put post_ct rule on post_ct fdb */
+ ct_flow->post_ct_attr.chain = 0;
+ ct_flow->post_ct_attr.prio = 0;
+ ct_flow->post_ct_attr.fdb = ct_priv->post_ct;
+
+ ct_flow->post_ct_attr.inner_match_level = MLX5_MATCH_NONE;
+ ct_flow->post_ct_attr.outer_match_level = MLX5_MATCH_NONE;
+ ct_flow->post_ct_attr.action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
+ rule = mlx5_eswitch_add_offloaded_rule(esw, post_ct_spec,
+ &ct_flow->post_ct_attr);
+ ct_flow->post_ct_rule = rule;
+ if (IS_ERR(ct_flow->post_ct_rule)) {
+ err = PTR_ERR(ct_flow->post_ct_rule);
+ ct_dbg("Failed to add post ct rule");
+ goto err_insert_post_ct;
+ }
+
+ /* Change original rule point to ct table */
+ pre_ct_attr->dest_chain = 0;
+ pre_ct_attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct;
+ ct_flow->pre_ct_rule = mlx5_eswitch_add_offloaded_rule(esw,
+ orig_spec,
+ pre_ct_attr);
+ if (IS_ERR(ct_flow->pre_ct_rule)) {
+ err = PTR_ERR(ct_flow->pre_ct_rule);
+ ct_dbg("Failed to add pre ct rule");
+ goto err_insert_orig;
+ }
+
+ attr->ct_attr.ct_flow = ct_flow;
+ *flow_rule = ct_flow->post_ct_rule;
+ dealloc_mod_hdr_actions(&pre_mod_acts);
+ kfree(post_ct_spec);
+
+ return 0;
+
+err_insert_orig:
+ mlx5_eswitch_del_offloaded_rule(ct_priv->esw, ct_flow->post_ct_rule,
+ &ct_flow->post_ct_attr);
+err_insert_post_ct:
+ mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
+err_mapping:
+ dealloc_mod_hdr_actions(&pre_mod_acts);
+ mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping);
+err_get_chain:
+ idr_remove(&ct_priv->fte_ids, fte_id);
+err_idr:
+ mlx5_tc_ct_del_ft_cb(ct_priv, ft);
+err_ft:
+ kfree(post_ct_spec);
+ kfree(ct_flow);
+ netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err);
+ return err;
+}
+
+static int
+__mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *orig_spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts,
+ struct mlx5_flow_handle **flow_rule)
+{
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+ struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_esw_flow_attr *pre_ct_attr;
+ struct mlx5_modify_hdr *mod_hdr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_ct_flow *ct_flow;
+ int err;
+
+ ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
+ if (!ct_flow)
+ return -ENOMEM;
+
+ /* Base esw attributes on original rule attribute */
+ pre_ct_attr = &ct_flow->pre_ct_attr;
+ memcpy(pre_ct_attr, attr, sizeof(*attr));
+
+ err = mlx5_tc_ct_entry_set_registers(ct_priv, mod_acts, 0, 0, 0, 0);
+ if (err) {
+ ct_dbg("Failed to set register for ct clear");
+ goto err_set_registers;
+ }
+
+ mod_hdr = mlx5_modify_header_alloc(esw->dev,
+ MLX5_FLOW_NAMESPACE_FDB,
+ mod_acts->num_actions,
+ mod_acts->actions);
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ ct_dbg("Failed to add create ct clear mod hdr");
+ goto err_set_registers;
+ }
+
+ dealloc_mod_hdr_actions(mod_acts);
+ pre_ct_attr->modify_hdr = mod_hdr;
+ pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ rule = mlx5_eswitch_add_offloaded_rule(esw, orig_spec, pre_ct_attr);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ ct_dbg("Failed to add ct clear rule");
+ goto err_insert;
+ }
+
+ attr->ct_attr.ct_flow = ct_flow;
+ ct_flow->pre_ct_rule = rule;
+ *flow_rule = rule;
+
+ return 0;
+
+err_insert:
+ mlx5_modify_header_dealloc(priv->mdev, mod_hdr);
+err_set_registers:
+ netdev_warn(priv->netdev,
+ "Failed to offload ct clear flow, err %d\n", err);
+ return err;
+}
+
+struct mlx5_flow_handle *
+mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+{
+ bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+ struct mlx5_flow_handle *rule;
+ int err;
+
+ if (!ct_priv)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&ct_priv->control_lock);
+ if (clear_action)
+ err = __mlx5_tc_ct_flow_offload_clear(priv, flow, spec, attr,
+ mod_hdr_acts, &rule);
+ else
+ err = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr,
+ &rule);
+ mutex_unlock(&ct_priv->control_lock);
+ if (err)
+ return ERR_PTR(err);
+
+ return rule;
+}
+
+static void
+__mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_ct_flow *ct_flow)
+{
+ struct mlx5_esw_flow_attr *pre_ct_attr = &ct_flow->pre_ct_attr;
+ struct mlx5_eswitch *esw = ct_priv->esw;
+
+ mlx5_eswitch_del_offloaded_rule(esw, ct_flow->pre_ct_rule,
+ pre_ct_attr);
+ mlx5_modify_header_dealloc(esw->dev, pre_ct_attr->modify_hdr);
+
+ if (ct_flow->post_ct_rule) {
+ mlx5_eswitch_del_offloaded_rule(esw, ct_flow->post_ct_rule,
+ &ct_flow->post_ct_attr);
+ mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping);
+ idr_remove(&ct_priv->fte_ids, ct_flow->fte_id);
+ mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
+ }
+
+ kfree(ct_flow);
+}
+
+void
+mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+ struct mlx5_ct_flow *ct_flow = attr->ct_attr.ct_flow;
+
+ /* We are called on error to clean up stuff from parsing
+ * but we don't have anything for now
+ */
+ if (!ct_flow)
+ return;
+
+ mutex_lock(&ct_priv->control_lock);
+ __mlx5_tc_ct_delete_flow(ct_priv, ct_flow);
+ mutex_unlock(&ct_priv->control_lock);
+}
+
+static int
+mlx5_tc_ct_init_check_support(struct mlx5_eswitch *esw,
+ const char **err_msg)
+{
+#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ /* cannot restore chain ID on HW miss */
+
+ *err_msg = "tc skb extension missing";
+ return -EOPNOTSUPP;
+#endif
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) {
+ *err_msg = "firmware level support is missing";
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) {
+ /* vlan workaround should be avoided for multi chain rules.
+ * This is just a sanity check as pop vlan action should
+ * be supported by any FW that supports ignore_flow_level
+ */
+
+ *err_msg = "firmware vlan actions support is missing";
+ return -EOPNOTSUPP;
+ }
+
+ if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev,
+ fdb_modify_header_fwd_to_table)) {
+ /* CT always writes to registers which are mod header actions.
+ * Therefore, mod header and goto is required
+ */
+
+ *err_msg = "firmware fwd and modify support is missing";
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
+ *err_msg = "register loopback isn't supported";
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void
+mlx5_tc_ct_init_err(struct mlx5e_rep_priv *rpriv, const char *msg, int err)
+{
+ if (msg)
+ netdev_warn(rpriv->netdev,
+ "tc ct offload not supported, %s, err: %d\n",
+ msg, err);
+ else
+ netdev_warn(rpriv->netdev,
+ "tc ct offload not supported, err: %d\n",
+ err);
+}
+
+int
+mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
+{
+ struct mlx5_tc_ct_priv *ct_priv;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+ struct mlx5e_priv *priv;
+ const char *msg;
+ int err;
+
+ rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
+ priv = netdev_priv(rpriv->netdev);
+ esw = priv->mdev->priv.eswitch;
+
+ err = mlx5_tc_ct_init_check_support(esw, &msg);
+ if (err) {
+ mlx5_tc_ct_init_err(rpriv, msg, err);
+ goto err_support;
+ }
+
+ ct_priv = kzalloc(sizeof(*ct_priv), GFP_KERNEL);
+ if (!ct_priv) {
+ mlx5_tc_ct_init_err(rpriv, NULL, -ENOMEM);
+ goto err_alloc;
+ }
+
+ ct_priv->esw = esw;
+ ct_priv->netdev = rpriv->netdev;
+ ct_priv->ct = mlx5_esw_chains_create_global_table(esw);
+ if (IS_ERR(ct_priv->ct)) {
+ err = PTR_ERR(ct_priv->ct);
+ mlx5_tc_ct_init_err(rpriv, "failed to create ct table", err);
+ goto err_ct_tbl;
+ }
+
+ ct_priv->ct_nat = mlx5_esw_chains_create_global_table(esw);
+ if (IS_ERR(ct_priv->ct_nat)) {
+ err = PTR_ERR(ct_priv->ct_nat);
+ mlx5_tc_ct_init_err(rpriv, "failed to create ct nat table",
+ err);
+ goto err_ct_nat_tbl;
+ }
+
+ ct_priv->post_ct = mlx5_esw_chains_create_global_table(esw);
+ if (IS_ERR(ct_priv->post_ct)) {
+ err = PTR_ERR(ct_priv->post_ct);
+ mlx5_tc_ct_init_err(rpriv, "failed to create post ct table",
+ err);
+ goto err_post_ct_tbl;
+ }
+
+ idr_init(&ct_priv->fte_ids);
+ idr_init(&ct_priv->tuple_ids);
+ mutex_init(&ct_priv->control_lock);
+ rhashtable_init(&ct_priv->zone_ht, &zone_params);
+
+ /* Done, set ct_priv to know it initializted */
+ uplink_priv->ct_priv = ct_priv;
+
+ return 0;
+
+err_post_ct_tbl:
+ mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct_nat);
+err_ct_nat_tbl:
+ mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct);
+err_ct_tbl:
+ kfree(ct_priv);
+err_alloc:
+err_support:
+
+ return 0;
+}
+
+void
+mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
+{
+ struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
+
+ if (!ct_priv)
+ return;
+
+ mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->post_ct);
+ mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat);
+ mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct);
+
+ rhashtable_destroy(&ct_priv->zone_ht);
+ mutex_destroy(&ct_priv->control_lock);
+ idr_destroy(&ct_priv->tuple_ids);
+ idr_destroy(&ct_priv->fte_ids);
+ kfree(ct_priv);
+
+ uplink_priv->ct_priv = NULL;
+}
+
+bool
+mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+ struct sk_buff *skb, u32 tupleid)
+{
+ struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
+ struct mlx5_ct_zone_rule *zone_rule;
+ struct mlx5_ct_entry *entry;
+
+ if (!ct_priv || !tupleid)
+ return true;
+
+ zone_rule = idr_find(&ct_priv->tuple_ids, tupleid);
+ if (!zone_rule)
+ return false;
+
+ entry = container_of(zone_rule, struct mlx5_ct_entry,
+ zone_rules[zone_rule->nat]);
+ tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
new file mode 100644
index 000000000000..091d305b633e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_TC_CT_H__
+#define __MLX5_EN_TC_CT_H__
+
+#include <net/pkt_cls.h>
+#include <linux/mlx5/fs.h>
+#include <net/tc_act/tc_ct.h>
+
+#include "en.h"
+
+struct mlx5_esw_flow_attr;
+struct mlx5e_tc_mod_hdr_acts;
+struct mlx5_rep_uplink_priv;
+struct mlx5e_tc_flow;
+struct mlx5e_priv;
+
+struct mlx5_ct_flow;
+
+struct nf_flowtable;
+
+struct mlx5_ct_attr {
+ u16 zone;
+ u16 ct_action;
+ struct mlx5_ct_flow *ct_flow;
+ struct nf_flowtable *nf_ft;
+};
+
+#define zone_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_2,\
+ .moffset = 0,\
+ .mlen = 2,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_2) + 2,\
+}
+
+#define ctstate_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_2,\
+ .moffset = 2,\
+ .mlen = 2,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_2),\
+}
+
+#define mark_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_3,\
+ .moffset = 0,\
+ .mlen = 4,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_3),\
+}
+
+#define labels_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_4,\
+ .moffset = 0,\
+ .mlen = 4,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_4),\
+}
+
+#define fteid_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5,\
+ .moffset = 0,\
+ .mlen = 4,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_5),\
+}
+
+#define tupleid_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\
+ .moffset = 0,\
+ .mlen = 3,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_1),\
+}
+
+#define TUPLE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[TUPLEID_TO_REG].mlen * 8)
+#define TUPLE_ID_MAX GENMASK(TUPLE_ID_BITS - 1, 0)
+
+#if IS_ENABLED(CONFIG_MLX5_TC_CT)
+
+int
+mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv);
+void
+mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv);
+
+int
+mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct netlink_ext_ack *extack);
+int
+mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
+ struct mlx5_esw_flow_attr *attr,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack);
+
+struct mlx5_flow_handle *
+mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+void
+mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_esw_flow_attr *attr);
+
+bool
+mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+ struct sk_buff *skb, u32 tupleid);
+
+#else /* CONFIG_MLX5_TC_CT */
+
+static inline int
+mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
+{
+ return 0;
+}
+
+static inline void
+mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
+{
+}
+
+static inline int
+mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct netlink_ext_ack *extack)
+{
+ if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT))
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
+ netdev_warn(priv->netdev, "mlx5 tc ct offload isn't enabled.\n");
+ return -EOPNOTSUPP;
+}
+
+static inline int
+mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
+ struct mlx5_esw_flow_attr *attr,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
+ netdev_warn(priv->netdev, "mlx5 tc ct offload isn't enabled.\n");
+ return -EOPNOTSUPP;
+}
+
+static inline struct mlx5_flow_handle *
+mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void
+mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_esw_flow_attr *attr)
+{
+}
+
+static inline bool
+mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+ struct sk_buff *skb, u32 tupleid)
+{
+ if (!tupleid)
+ return true;
+
+ return false;
+}
+
+#endif /* !IS_ENABLED(CONFIG_MLX5_TC_CT) */
+#endif /* __MLX5_EN_TC_CT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index af4ebd2951b5..b45c3f46570b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -66,6 +66,9 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
return -EOPNOTSUPP;
+ if (mlx5e_eswitch_uplink_rep(priv->netdev) && *out_dev != priv->netdev)
+ return -EOPNOTSUPP;
+
return 0;
}
@@ -469,10 +472,15 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
- void *headers_c,
- void *headers_v, u8 *match_level)
+ u8 *match_level)
{
struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
+ struct netlink_ext_ack *extack = f->common.extack;
int err = 0;
if (!tunnel) {
@@ -499,6 +507,109 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
goto out;
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_match_control match;
+ u16 addr_type;
+
+ flow_rule_match_enc_control(rule, &match);
+ addr_type = match.key->addr_type;
+
+ /* For tunnel addr_type used same key id`s as for non-tunnel */
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_enc_ipv4_addrs(rule, &match);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4,
+ ntohl(match.mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4,
+ ntohl(match.key->src));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
+ ntohl(match.mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
+ ntohl(match.key->dst));
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
+ ethertype);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ ETH_P_IP);
+ } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_enc_ipv6_addrs(rule, &match);
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
+ ipv6));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
+ ipv6));
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
+ ipv6));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
+ ipv6));
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
+ ethertype);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ ETH_P_IPV6);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_enc_ip(rule, &match);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
+ match.mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
+ match.key->tos & 0x3);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
+ match.mask->tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
+ match.key->tos >> 2);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
+ match.mask->ttl);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
+ match.key->ttl);
+
+ if (match.mask->ttl &&
+ !MLX5_CAP_ESW_FLOWTABLE_FDB
+ (priv->mdev,
+ ft_field_support.outer_ipv4_ttl)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on TTL is not supported");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ /* Enforce DMAC when offloading incoming tunneled flows.
+ * Flow counters require a match on the DMAC.
+ */
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dmac_47_16), priv->netdev->dev_addr);
+
+ /* let software handle IP fragments */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
+
+ return 0;
+
out:
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 6f9a78c85ffd..1630f0ec3ad7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -76,8 +76,7 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
- void *headers_c,
- void *headers_v, u8 *match_level);
+ u8 *match_level);
int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index cf58c9637904..29626c6c9c25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -433,7 +433,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
if (!ipsec)
return;
- drain_workqueue(ipsec->wq);
destroy_workqueue(ipsec->wq);
ida_destroy(&ipsec->halloc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 2c75b2752f58..014639ea06e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -175,28 +175,20 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
- struct mlx5_flow_spec *spec;
enum mlx5e_traffic_types tt;
int err = 0;
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec) {
- err = -ENOMEM;
- goto out;
- }
-
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
tt = arfs_get_tt(type);
if (tt == -EINVAL) {
netdev_err(priv->netdev, "%s: bad arfs_type: %d\n",
__func__, type);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
dest.tir_num = tir[tt].tirn;
- arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
+ arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL,
&flow_act,
&dest, 1);
if (IS_ERR(arfs_t->default_rule)) {
@@ -205,8 +197,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
__func__, type);
}
-out:
- kvfree(spec);
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 01f2918063af..47874d34156b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1098,49 +1098,59 @@ void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
mlx5e_dcbnl_dscp_app(priv, DELETE);
}
-static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv,
- struct mlx5e_params *params)
+static void mlx5e_params_calc_trust_tx_min_inline_mode(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ u8 trust_state)
{
- mlx5_query_min_inline(priv->mdev, &params->tx_min_inline_mode);
- if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP &&
+ mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
+ if (trust_state == MLX5_QPTS_TRUST_DSCP &&
params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
}
-static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
+static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context)
+{
+ u8 *trust_state = context;
+ int err;
+
+ err = mlx5_set_trust_state(priv->mdev, *trust_state);
+ if (err)
+ return err;
+ priv->dcbx_dp.trust_state = *trust_state;
+
+ return 0;
+}
+
+static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
{
struct mlx5e_channels new_channels = {};
+ bool reset_channels = true;
+ int err = 0;
mutex_lock(&priv->state_lock);
new_channels.params = priv->channels.params;
- mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
+ mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params,
+ trust_state);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
- goto out;
+ reset_channels = false;
}
/* Skip if tx_min_inline is the same */
if (new_channels.params.tx_min_inline_mode ==
priv->channels.params.tx_min_inline_mode)
- goto out;
+ reset_channels = false;
- mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ if (reset_channels)
+ err = mlx5e_safe_switch_channels(priv, &new_channels,
+ mlx5e_update_trust_state_hw,
+ &trust_state);
+ else
+ err = mlx5e_update_trust_state_hw(priv, &trust_state);
-out:
mutex_unlock(&priv->state_lock);
-}
-
-static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
-{
- int err;
-
- err = mlx5_set_trust_state(priv->mdev, trust_state);
- if (err)
- return err;
- priv->dcbx_dp.trust_state = trust_state;
- mlx5e_trust_update_sq_inline_mode(priv);
return err;
}
@@ -1171,7 +1181,8 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
if (err)
return err;
- mlx5e_trust_update_tx_min_inline_mode(priv, &priv->channels.params);
+ mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
+ priv->dcbx_dp.trust_state);
err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d674cb679895..6d703ddee4e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -357,7 +357,7 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
goto unlock;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
unlock:
mutex_unlock(&priv->state_lock);
@@ -432,9 +432,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
*cur_params = new_channels.params;
- if (!netif_is_rxfh_configured(priv->netdev))
- mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, count);
+ mlx5e_num_channels_changed(priv);
goto out;
}
@@ -442,12 +440,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
if (arfs_enabled)
mlx5e_arfs_disable(priv);
- if (!netif_is_rxfh_configured(priv->netdev))
- mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, count);
-
/* Switch to new channels, set new parameters and close old ones */
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels,
+ mlx5e_num_channels_changed_ctx, NULL);
if (arfs_enabled) {
int err2 = mlx5e_arfs_enable(priv);
@@ -580,7 +575,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
goto out;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
out:
mutex_unlock(&priv->state_lock);
@@ -633,6 +628,8 @@ static const u32 pplm_fec_2_ethtool[] = {
[MLX5E_FEC_NOFEC] = ETHTOOL_FEC_OFF,
[MLX5E_FEC_FIRECODE] = ETHTOOL_FEC_BASER,
[MLX5E_FEC_RS_528_514] = ETHTOOL_FEC_RS,
+ [MLX5E_FEC_RS_544_514] = ETHTOOL_FEC_RS,
+ [MLX5E_FEC_LLRS_272_257_1] = ETHTOOL_FEC_LLRS,
};
static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size)
@@ -650,45 +647,48 @@ static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size)
return 0;
}
-/* we use ETHTOOL_FEC_* offset and apply it to ETHTOOL_LINK_MODE_FEC_*_BIT */
-static u32 ethtool_fec2ethtool_caps(u_long ethtool_fec_code)
-{
- u32 offset;
-
- offset = find_first_bit(&ethtool_fec_code, sizeof(u32));
- offset -= ETHTOOL_FEC_OFF_BIT;
- offset += ETHTOOL_LINK_MODE_FEC_NONE_BIT;
+#define MLX5E_ADVERTISE_SUPPORTED_FEC(mlx5_fec, ethtool_fec) \
+ do { \
+ if (mlx5e_fec_in_caps(dev, 1 << (mlx5_fec))) \
+ __set_bit(ethtool_fec, \
+ link_ksettings->link_modes.supported);\
+ } while (0)
- return offset;
-}
+static const u32 pplm_fec_2_ethtool_linkmodes[] = {
+ [MLX5E_FEC_NOFEC] = ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ [MLX5E_FEC_FIRECODE] = ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ [MLX5E_FEC_RS_528_514] = ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ [MLX5E_FEC_RS_544_514] = ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ [MLX5E_FEC_LLRS_272_257_1] = ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+};
static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
struct ethtool_link_ksettings *link_ksettings)
{
- u_long fec_caps = 0;
- u32 active_fec = 0;
- u32 offset;
+ u_long active_fec = 0;
u32 bitn;
int err;
- err = mlx5e_get_fec_caps(dev, (u8 *)&fec_caps);
+ err = mlx5e_get_fec_mode(dev, (u32 *)&active_fec, NULL);
if (err)
return (err == -EOPNOTSUPP) ? 0 : err;
- err = mlx5e_get_fec_mode(dev, &active_fec, NULL);
- if (err)
- return err;
-
- for_each_set_bit(bitn, &fec_caps, ARRAY_SIZE(pplm_fec_2_ethtool)) {
- u_long ethtool_bitmask = pplm_fec_2_ethtool[bitn];
-
- offset = ethtool_fec2ethtool_caps(ethtool_bitmask);
- __set_bit(offset, link_ksettings->link_modes.supported);
- }
-
- active_fec = pplm2ethtool_fec(active_fec, sizeof(u32) * BITS_PER_BYTE);
- offset = ethtool_fec2ethtool_caps(active_fec);
- __set_bit(offset, link_ksettings->link_modes.advertising);
+ MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_NOFEC,
+ ETHTOOL_LINK_MODE_FEC_NONE_BIT);
+ MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_FIRECODE,
+ ETHTOOL_LINK_MODE_FEC_BASER_BIT);
+ MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_RS_528_514,
+ ETHTOOL_LINK_MODE_FEC_RS_BIT);
+ MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_LLRS_272_257_1,
+ ETHTOOL_LINK_MODE_FEC_LLRS_BIT);
+
+ /* active fec is a bit set, find out which bit is set and
+ * advertise the corresponding ethtool bit
+ */
+ bitn = find_first_bit(&active_fec, sizeof(u32) * BITS_PER_BYTE);
+ if (bitn < ARRAY_SIZE(pplm_fec_2_ethtool_linkmodes))
+ __set_bit(pplm_fec_2_ethtool_linkmodes[bitn],
+ link_ksettings->link_modes.advertising);
return 0;
}
@@ -773,6 +773,7 @@ static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings
static void get_speed_duplex(struct net_device *netdev,
u32 eth_proto_oper, bool force_legacy,
+ u16 data_rate_oper,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -784,7 +785,10 @@ static void get_speed_duplex(struct net_device *netdev,
speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
if (!speed) {
- speed = SPEED_UNKNOWN;
+ if (data_rate_oper)
+ speed = 100 * data_rate_oper;
+ else
+ speed = SPEED_UNKNOWN;
goto out;
}
@@ -873,17 +877,18 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
- u32 rx_pause = 0;
- u32 tx_pause = 0;
- u32 eth_proto_cap;
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {};
u32 eth_proto_admin;
- u32 eth_proto_lp;
- u32 eth_proto_oper;
u8 an_disable_admin;
- u8 an_status;
+ u16 data_rate_oper;
+ u32 eth_proto_oper;
+ u32 eth_proto_cap;
u8 connector_type;
+ u32 rx_pause = 0;
+ u32 tx_pause = 0;
+ u32 eth_proto_lp;
bool admin_ext;
+ u8 an_status;
bool ext;
int err;
@@ -917,6 +922,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
an_status = MLX5_GET(ptys_reg, out, an_status);
connector_type = MLX5_GET(ptys_reg, out, connector_type);
+ data_rate_oper = MLX5_GET(ptys_reg, out, data_rate_oper);
mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
@@ -927,7 +933,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
admin_ext);
get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext,
- link_ksettings);
+ data_rate_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -1126,8 +1132,8 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
-static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rss_params *rss = &priv->rss_params;
@@ -1146,8 +1152,8 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return 0;
}
-static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rss_params *rss = &priv->rss_params;
@@ -1511,7 +1517,7 @@ static int mlx5e_get_fecparam(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- u8 fec_configured = 0;
+ u16 fec_configured = 0;
u32 fec_active = 0;
int err;
@@ -1527,7 +1533,7 @@ static int mlx5e_get_fecparam(struct net_device *netdev,
return -EOPNOTSUPP;
fecparam->fec = pplm2ethtool_fec((u_long)fec_configured,
- sizeof(u8) * BITS_PER_BYTE);
+ sizeof(u16) * BITS_PER_BYTE);
return 0;
}
@@ -1537,10 +1543,14 @@ static int mlx5e_set_fecparam(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- u8 fec_policy = 0;
+ u16 fec_policy = 0;
int mode;
int err;
+ if (bitmap_weight((unsigned long *)&fecparam->fec,
+ ETHTOOL_FEC_LLRS_BIT + 1) > 1)
+ return -EOPNOTSUPP;
+
for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
if (!(pplm_fec_2_ethtool[mode] & fecparam->fec))
continue;
@@ -1739,7 +1749,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
return 0;
}
- return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
}
static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable)
@@ -1772,7 +1782,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
return 0;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
if (err)
return err;
@@ -1829,7 +1839,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
return 0;
}
- return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
}
static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
@@ -1873,7 +1883,7 @@ static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
return 0;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
return err;
}
@@ -1938,7 +1948,8 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
-static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
+int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -1955,12 +1966,15 @@ static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u
return mlx5e_ethtool_get_rxnfc(dev, info, rule_locs);
}
-static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
return mlx5e_ethtool_set_rxnfc(dev, cmd);
}
const struct ethtool_ops mlx5e_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = mlx5e_get_strings,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 4ef3dc79f73c..dd7f338425eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -63,6 +63,7 @@
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
#include "en/hv_vhca_stats.h"
+#include "en/devlink.h"
#include "lib/mlx5.h"
@@ -1811,29 +1812,6 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
-static int mlx5e_alloc_xps_cpumask(struct mlx5e_channel *c,
- struct mlx5e_params *params)
-{
- int num_comp_vectors = mlx5_comp_vectors_count(c->mdev);
- int irq;
-
- if (!zalloc_cpumask_var(&c->xps_cpumask, GFP_KERNEL))
- return -ENOMEM;
-
- for (irq = c->ix; irq < num_comp_vectors; irq += params->num_channels) {
- int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(c->mdev, irq));
-
- cpumask_set_cpu(cpu, c->xps_cpumask);
- }
-
- return 0;
-}
-
-static void mlx5e_free_xps_cpumask(struct mlx5e_channel *c)
-{
- free_cpumask_var(c->xps_cpumask);
-}
-
static int mlx5e_open_queues(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
@@ -1984,10 +1962,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->irq_desc = irq_to_desc(irq);
c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
- err = mlx5e_alloc_xps_cpumask(c, params);
- if (err)
- goto err_free_channel;
-
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
err = mlx5e_open_queues(c, params, cparam);
@@ -2010,9 +1984,7 @@ err_close_queues:
err_napi_del:
netif_napi_del(&c->napi);
- mlx5e_free_xps_cpumask(c);
-err_free_channel:
kvfree(c);
return err;
@@ -2026,7 +1998,6 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_icosq(&c->icosq);
mlx5e_activate_rq(&c->rq);
- netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix);
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_activate_xsk(c);
@@ -2051,7 +2022,6 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
mlx5e_close_xsk(c);
mlx5e_close_queues(c);
netif_napi_del(&c->napi);
- mlx5e_free_xps_cpumask(c);
kvfree(c);
}
@@ -2801,6 +2771,8 @@ free_in:
return err;
}
+static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro);
+
static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u16 mtu)
{
@@ -2850,6 +2822,8 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
return 0;
}
+MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_set_dev_port_mtu);
+
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
{
struct mlx5e_params *params = &priv->channels.params;
@@ -2886,6 +2860,54 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
netdev_set_tc_queue(netdev, tc, nch, 0);
}
+static void mlx5e_update_netdev_queues(struct mlx5e_priv *priv, u16 count)
+{
+ int num_txqs = count * priv->channels.params.num_tc;
+ int num_rxqs = count * priv->profile->rq_groups;
+ struct net_device *netdev = priv->netdev;
+
+ mlx5e_netdev_set_tcs(netdev);
+ netif_set_real_num_tx_queues(netdev, num_txqs);
+ netif_set_real_num_rx_queues(netdev, num_rxqs);
+}
+
+static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
+ struct mlx5e_params *params)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int num_comp_vectors, ix, irq;
+
+ num_comp_vectors = mlx5_comp_vectors_count(mdev);
+
+ for (ix = 0; ix < params->num_channels; ix++) {
+ cpumask_clear(priv->scratchpad.cpumask);
+
+ for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
+ int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq));
+
+ cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
+ }
+
+ netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
+ }
+}
+
+int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
+{
+ u16 count = priv->channels.params.num_channels;
+
+ mlx5e_update_netdev_queues(priv, count);
+ mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
+
+ if (!netif_is_rxfh_configured(priv->netdev))
+ mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, count);
+
+ return 0;
+}
+
+MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed);
+
static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
{
int i, ch;
@@ -2907,14 +2929,6 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
- int num_txqs = priv->channels.num * priv->channels.params.num_tc;
- int num_rxqs = priv->channels.num * priv->profile->rq_groups;
- struct net_device *netdev = priv->netdev;
-
- mlx5e_netdev_set_tcs(netdev);
- netif_set_real_num_tx_queues(netdev, num_txqs);
- netif_set_real_num_rx_queues(netdev, num_rxqs);
-
mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels);
mlx5e_xdp_tx_enable(priv);
@@ -2947,42 +2961,52 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
mlx5e_deactivate_channels(&priv->channels);
}
-static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
- struct mlx5e_channels *new_chs,
- mlx5e_fp_hw_modify hw_modify)
+static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
+ struct mlx5e_channels *new_chs,
+ mlx5e_fp_preactivate preactivate,
+ void *context)
{
struct net_device *netdev = priv->netdev;
- int new_num_txqs;
+ struct mlx5e_channels old_chs;
int carrier_ok;
-
- new_num_txqs = new_chs->num * new_chs->params.num_tc;
+ int err = 0;
carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
- if (new_num_txqs < netdev->real_num_tx_queues)
- netif_set_real_num_tx_queues(netdev, new_num_txqs);
-
mlx5e_deactivate_priv_channels(priv);
- mlx5e_close_channels(&priv->channels);
+ old_chs = priv->channels;
priv->channels = *new_chs;
- /* New channels are ready to roll, modify HW settings if needed */
- if (hw_modify)
- hw_modify(priv);
+ /* New channels are ready to roll, call the preactivate hook if needed
+ * to modify HW settings or update kernel parameters.
+ */
+ if (preactivate) {
+ err = preactivate(priv, context);
+ if (err) {
+ priv->channels = old_chs;
+ goto out;
+ }
+ }
+ mlx5e_close_channels(&old_chs);
priv->profile->update_rx(priv);
+
+out:
mlx5e_activate_priv_channels(priv);
/* return carrier back if needed */
if (carrier_ok)
netif_carrier_on(netdev);
+
+ return err;
}
int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *new_chs,
- mlx5e_fp_hw_modify hw_modify)
+ mlx5e_fp_preactivate preactivate,
+ void *context)
{
int err;
@@ -2990,8 +3014,16 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
if (err)
return err;
- mlx5e_switch_priv_channels(priv, new_chs, hw_modify);
+ err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context);
+ if (err)
+ goto err_close;
+
return 0;
+
+err_close:
+ mlx5e_close_channels(new_chs);
+
+ return err;
}
int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
@@ -2999,7 +3031,7 @@ int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
struct mlx5e_channels new_channels = {};
new_channels.params = priv->channels.params;
- return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
}
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
@@ -3448,7 +3480,8 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
goto out;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels,
+ mlx5e_num_channels_changed_ctx, NULL);
if (err)
goto out;
@@ -3661,7 +3694,8 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
goto out;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
+ err = mlx5e_safe_switch_channels(priv, &new_channels,
+ mlx5e_modify_tirs_lro_ctx, NULL);
out:
mutex_unlock(&priv->state_lock);
return err;
@@ -3880,7 +3914,7 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
}
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
- change_hw_mtu_cb set_mtu_cb)
+ mlx5e_fp_preactivate preactivate)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_channels new_channels = {};
@@ -3929,13 +3963,13 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (!reset) {
params->sw_mtu = new_mtu;
- if (set_mtu_cb)
- set_mtu_cb(priv);
+ if (preactivate)
+ preactivate(priv, NULL);
netdev->mtu = params->sw_mtu;
goto out;
}
- err = mlx5e_safe_switch_channels(priv, &new_channels, set_mtu_cb);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, preactivate, NULL);
if (err)
goto out;
@@ -3948,7 +3982,7 @@ out:
static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
{
- return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
+ return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
}
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
@@ -4409,7 +4443,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
mlx5e_set_rq_type(priv->mdev, &new_channels.params);
old_prog = priv->channels.params.xdp_prog;
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
if (err)
goto unlock;
} else {
@@ -4589,6 +4623,7 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
#endif
+ .ndo_get_devlink_port = mlx5e_get_devlink_port,
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -4787,9 +4822,8 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv,
mlx5e_build_rq_params(mdev, params);
/* HW LRO */
-
- /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
- if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+ if (MLX5_CAP_ETH(mdev, lro_cap) &&
+ params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
/* No XSK params: checking the availability of striding RQ in general. */
if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
params->lro_en = !slow_pci_heuristic(mdev);
@@ -5230,6 +5264,9 @@ int mlx5e_netdev_init(struct net_device *netdev,
priv->max_nch = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
priv->max_opened_tc = 1;
+ if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
mutex_init(&priv->state_lock);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
@@ -5238,7 +5275,7 @@ int mlx5e_netdev_init(struct net_device *netdev,
priv->wq = create_singlethread_workqueue("mlx5e");
if (!priv->wq)
- return -ENOMEM;
+ goto err_free_cpumask;
/* netdev init */
netif_carrier_off(netdev);
@@ -5248,11 +5285,17 @@ int mlx5e_netdev_init(struct net_device *netdev,
#endif
return 0;
+
+err_free_cpumask:
+ free_cpumask_var(priv->scratchpad.cpumask);
+
+ return -ENOMEM;
}
void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv)
{
destroy_workqueue(priv->wq);
+ free_cpumask_var(priv->scratchpad.cpumask);
}
struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
@@ -5287,6 +5330,7 @@ err_free_netdev:
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
+ const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
const struct mlx5e_profile *profile;
int max_nch;
int err;
@@ -5298,10 +5342,25 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
max_nch = mlx5e_get_max_num_channels(priv->mdev);
if (priv->channels.params.num_channels > max_nch) {
mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
+ /* Reducing the number of channels - RXFH has to be reset, and
+ * mlx5e_num_channels_changed below will build the RQT.
+ */
+ priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED;
priv->channels.params.num_channels = max_nch;
- mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, max_nch);
}
+ /* 1. Set the real number of queues in the kernel the first time.
+ * 2. Set our default XPS cpumask.
+ * 3. Build the RQT.
+ *
+ * rtnl_lock is required by netif_set_real_num_*_queues in case the
+ * netdev has been registered by this point (if this function was called
+ * in the reload or resume flow).
+ */
+ if (take_rtnl)
+ rtnl_lock();
+ mlx5e_num_channels_changed(priv);
+ if (take_rtnl)
+ rtnl_unlock();
err = profile->init_tx(priv);
if (err)
@@ -5425,17 +5484,27 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
goto err_destroy_netdev;
}
+ err = mlx5e_devlink_port_register(priv);
+ if (err) {
+ mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
+ goto err_detach;
+ }
+
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
- goto err_detach;
+ goto err_devlink_port_unregister;
}
+ mlx5e_devlink_port_type_eth_set(priv);
+
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_init_app(priv);
#endif
return priv;
+err_devlink_port_unregister:
+ mlx5e_devlink_port_unregister(priv);
err_detach:
mlx5e_detach(mdev, priv);
err_destroy_netdev:
@@ -5457,6 +5526,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_delete_app(priv);
#endif
+ mlx5e_devlink_port_unregister(priv);
unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 6ed307d7f191..2a0243e4af75 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -41,7 +41,7 @@
#include <net/ipv6_stubs.h>
#include "eswitch.h"
-#include "eswitch_offloads_chains.h"
+#include "esw/chains.h"
#include "en.h"
#include "en_rep.h"
#include "en_tc.h"
@@ -192,7 +192,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
if (err) {
- pr_warn("vport %d error %d reading stats\n", rep->vport, err);
+ netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
+ rep->vport, err);
return;
}
@@ -252,25 +253,6 @@ static int mlx5e_rep_set_ringparam(struct net_device *dev,
return mlx5e_ethtool_set_ringparam(priv, param);
}
-static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
- struct mlx5_flow_destination *dest)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
- struct mlx5_flow_handle *flow_rule;
-
- flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
- rep->vport,
- dest);
- if (IS_ERR(flow_rule))
- return PTR_ERR(flow_rule);
-
- mlx5_del_flow_rules(rpriv->vport_rx_rule);
- rpriv->vport_rx_rule = flow_rule;
- return 0;
-}
-
static void mlx5e_rep_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
@@ -283,33 +265,8 @@ static int mlx5e_rep_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- u16 curr_channels_amount = priv->channels.params.num_channels;
- u32 new_channels_amount = ch->combined_count;
- struct mlx5_flow_destination new_dest;
- int err = 0;
-
- err = mlx5e_ethtool_set_channels(priv, ch);
- if (err)
- return err;
- if (curr_channels_amount == 1 && new_channels_amount > 1) {
- new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- new_dest.ft = priv->fs.ttc.ft.t;
- } else if (new_channels_amount == 1 && curr_channels_amount > 1) {
- new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- new_dest.tir_num = priv->direct_tir[0].tirn;
- } else {
- return 0;
- }
-
- err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
- if (err) {
- netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
- curr_channels_amount, new_channels_amount);
- return err;
- }
-
- return 0;
+ return mlx5e_ethtool_set_channels(priv, ch);
}
static int mlx5e_rep_get_coalesce(struct net_device *netdev,
@@ -375,6 +332,9 @@ static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = mlx5e_rep_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = mlx5e_rep_get_strings,
@@ -391,6 +351,9 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
};
static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = mlx5e_uplink_rep_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = mlx5e_rep_get_strings,
@@ -406,6 +369,10 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
.set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
.get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
+ .get_rxfh = mlx5e_get_rxfh,
+ .set_rxfh = mlx5e_set_rxfh,
+ .get_rxnfc = mlx5e_get_rxnfc,
+ .set_rxnfc = mlx5e_set_rxnfc,
.get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
.set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
};
@@ -727,9 +694,9 @@ static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
static int
mlx5e_rep_indr_offload(struct net_device *netdev,
struct flow_cls_offload *flower,
- struct mlx5e_rep_indr_block_priv *indr_priv)
+ struct mlx5e_rep_indr_block_priv *indr_priv,
+ unsigned long flags)
{
- unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
int err = 0;
@@ -750,20 +717,68 @@ mlx5e_rep_indr_offload(struct net_device *netdev,
return err;
}
-static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
- void *type_data, void *indr_priv)
+static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type,
+ void *type_data, void *indr_priv)
{
+ unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
struct mlx5e_rep_indr_block_priv *priv = indr_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
+ return mlx5e_rep_indr_offload(priv->netdev, type_data, priv,
+ flags);
default:
return -EOPNOTSUPP;
}
}
-static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv)
+static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,
+ void *type_data, void *indr_priv)
+{
+ struct mlx5e_rep_indr_block_priv *priv = indr_priv;
+ struct flow_cls_offload *f = type_data;
+ struct flow_cls_offload tmp;
+ struct mlx5e_priv *mpriv;
+ struct mlx5_eswitch *esw;
+ unsigned long flags;
+ int err;
+
+ mpriv = netdev_priv(priv->rpriv->netdev);
+ esw = mpriv->mdev->priv.eswitch;
+
+ flags = MLX5_TC_FLAG(EGRESS) |
+ MLX5_TC_FLAG(ESW_OFFLOAD) |
+ MLX5_TC_FLAG(FT_OFFLOAD);
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ memcpy(&tmp, f, sizeof(*f));
+
+ /* Re-use tc offload path by moving the ft flow to the
+ * reserved ft chain.
+ *
+ * FT offload can use prio range [0, INT_MAX], so we normalize
+ * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
+ * as with tc, where prio 0 isn't supported.
+ *
+ * We only support chain 0 of FT offload.
+ */
+ if (!mlx5_esw_chains_prios_supported(esw) ||
+ tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw) ||
+ tmp.common.chain_index)
+ return -EOPNOTSUPP;
+
+ tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
+ tmp.common.prio++;
+ err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags);
+ memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mlx5e_rep_indr_block_unbind(void *cb_priv)
{
struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
@@ -774,9 +789,10 @@ static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv)
static LIST_HEAD(mlx5e_block_cb_list);
static int
-mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
- struct mlx5e_rep_priv *rpriv,
- struct flow_block_offload *f)
+mlx5e_rep_indr_setup_block(struct net_device *netdev,
+ struct mlx5e_rep_priv *rpriv,
+ struct flow_block_offload *f,
+ flow_setup_cb_t *setup_cb)
{
struct mlx5e_rep_indr_block_priv *indr_priv;
struct flow_block_cb *block_cb;
@@ -802,9 +818,8 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
list_add(&indr_priv->list,
&rpriv->uplink_priv.tc_indr_block_priv_list);
- block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb,
- indr_priv, indr_priv,
- mlx5e_rep_indr_tc_block_unbind);
+ block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv,
+ mlx5e_rep_indr_block_unbind);
if (IS_ERR(block_cb)) {
list_del(&indr_priv->list);
kfree(indr_priv);
@@ -819,9 +834,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
if (!indr_priv)
return -ENOENT;
- block_cb = flow_block_cb_lookup(f->block,
- mlx5e_rep_indr_setup_block_cb,
- indr_priv);
+ block_cb = flow_block_cb_lookup(f->block, setup_cb, indr_priv);
if (!block_cb)
return -ENOENT;
@@ -835,13 +848,16 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
}
static
-int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
- enum tc_setup_type type, void *type_data)
+int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data)
{
switch (type) {
case TC_SETUP_BLOCK:
- return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
- type_data);
+ return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
+ mlx5e_rep_indr_setup_tc_cb);
+ case TC_SETUP_FT:
+ return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
+ mlx5e_rep_indr_setup_ft_cb);
default:
return -EOPNOTSUPP;
}
@@ -853,7 +869,7 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
int err;
err = __flow_indr_block_cb_register(netdev, rpriv,
- mlx5e_rep_indr_setup_tc_cb,
+ mlx5e_rep_indr_setup_cb,
rpriv);
if (err) {
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
@@ -867,7 +883,7 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
struct net_device *netdev)
{
- __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
+ __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_cb,
rpriv);
}
@@ -1279,8 +1295,7 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
case TC_SETUP_CLSFLOWER:
memcpy(&tmp, f, sizeof(*f));
- if (!mlx5_esw_chains_prios_supported(esw) ||
- tmp.common.chain_index)
+ if (!mlx5_esw_chains_prios_supported(esw))
return -EOPNOTSUPP;
/* Re-use tc offload path by moving the ft flow to the
@@ -1396,7 +1411,7 @@ static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
{
- return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
+ return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
}
static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
@@ -1422,7 +1437,7 @@ static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan
return 0;
}
-static struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
+static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1435,7 +1450,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_setup_tc = mlx5e_rep_setup_tc,
- .ndo_get_devlink_port = mlx5e_get_devlink_port,
+ .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
.ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
@@ -1448,7 +1463,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_start_xmit = mlx5e_xmit,
.ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
.ndo_setup_tc = mlx5e_rep_setup_tc,
- .ndo_get_devlink_port = mlx5e_get_devlink_port,
+ .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
.ndo_get_stats64 = mlx5e_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
@@ -1464,6 +1479,11 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_set_features = mlx5e_set_features,
};
+bool mlx5e_eswitch_uplink_rep(struct net_device *netdev)
+{
+ return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep;
+}
+
bool mlx5e_eswitch_rep(struct net_device *netdev)
{
if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
@@ -1584,6 +1604,8 @@ static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct ttc_params ttc_params = {};
int tt, err;
@@ -1593,6 +1615,11 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
/* The inner_ttc in the ttc params is intentionally not set */
ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
mlx5e_set_ttc_ft_params(&ttc_params);
+
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ /* To give uplik rep TTC a lower level for chaining from root ft */
+ ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
+
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
@@ -1604,6 +1631,52 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
return 0;
}
+static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ int err = 0;
+
+ if (rep->vport != MLX5_VPORT_UPLINK) {
+ /* non uplik reps will skip any bypass tables and go directly to
+ * their own ttc
+ */
+ rpriv->root_ft = priv->fs.ttc.ft.t;
+ return 0;
+ }
+
+ /* uplink root ft will be used to auto chain, to ethtool or ttc tables */
+ ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_OFFLOADS);
+ if (!ns) {
+ netdev_err(priv->netdev, "Failed to get reps offloads namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */
+ ft_attr.prio = 1;
+ ft_attr.level = 1;
+
+ rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(rpriv->root_ft)) {
+ err = PTR_ERR(rpriv->root_ft);
+ rpriv->root_ft = NULL;
+ }
+
+ return err;
+}
+
+static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ return;
+ mlx5_destroy_flow_table(rpriv->root_ft);
+}
+
static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -1612,11 +1685,10 @@ static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_destination dest;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dest.tir_num = priv->direct_tir[0].tirn;
- flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
- rep->vport,
- &dest);
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = rpriv->root_ft;
+
+ flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, &dest);
if (IS_ERR(flow_rule))
return PTR_ERR(flow_rule);
rpriv->vport_rx_rule = flow_rule;
@@ -1656,12 +1728,20 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_direct_tirs;
- err = mlx5e_create_rep_vport_rx_rule(priv);
+ err = mlx5e_create_rep_root_ft(priv);
if (err)
goto err_destroy_ttc_table;
+ err = mlx5e_create_rep_vport_rx_rule(priv);
+ if (err)
+ goto err_destroy_root_ft;
+
+ mlx5e_ethtool_init_steering(priv);
+
return 0;
+err_destroy_root_ft:
+ mlx5e_destroy_rep_root_ft(priv);
err_destroy_ttc_table:
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_direct_tirs:
@@ -1682,6 +1762,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
mlx5_del_flow_rules(rpriv->vport_rx_rule);
+ mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
mlx5e_destroy_indirect_tirs(priv, false);
@@ -1920,7 +2001,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.update_rx = mlx5e_update_rep_rx,
.update_stats = mlx5e_update_ndo_stats,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
- .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
+ .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
.max_tc = 1,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_rep_stats_grps,
@@ -1940,7 +2021,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.update_stats = mlx5e_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
- .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
+ .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
.max_tc = MLX5E_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_ul_rep_stats_grps,
@@ -2026,8 +2107,9 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
&mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
if (!netdev) {
- pr_warn("Failed to create representor netdev for vport %d\n",
- rep->vport);
+ mlx5_core_warn(dev,
+ "Failed to create representor netdev for vport %d\n",
+ rep->vport);
kfree(rpriv);
return -EINVAL;
}
@@ -2045,29 +2127,32 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
err = mlx5e_attach_netdev(netdev_priv(netdev));
if (err) {
- pr_warn("Failed to attach representor netdev for vport %d\n",
- rep->vport);
+ netdev_warn(netdev,
+ "Failed to attach representor netdev for vport %d\n",
+ rep->vport);
goto err_destroy_mdev_resources;
}
err = mlx5e_rep_neigh_init(rpriv);
if (err) {
- pr_warn("Failed to initialized neighbours handling for vport %d\n",
- rep->vport);
+ netdev_warn(netdev,
+ "Failed to initialized neighbours handling for vport %d\n",
+ rep->vport);
goto err_detach_netdev;
}
err = register_devlink_port(dev, rpriv);
if (err) {
- esw_warn(dev, "Failed to register devlink port %d\n",
- rep->vport);
+ netdev_warn(netdev, "Failed to register devlink port %d\n",
+ rep->vport);
goto err_neigh_cleanup;
}
err = register_netdev(netdev);
if (err) {
- pr_warn("Failed to register representor netdev for vport %d\n",
- rep->vport);
+ netdev_warn(netdev,
+ "Failed to register representor netdev for vport %d\n",
+ rep->vport);
goto err_devlink_cleanup;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 3f756d51435f..6a2337900420 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -55,6 +55,7 @@ struct mlx5e_neigh_update_table {
unsigned long min_interval; /* jiffies */
};
+struct mlx5_tc_ct_priv;
struct mlx5_rep_uplink_priv {
/* Filters DB - instantiated by the uplink representor and shared by
* the uplink's VFs
@@ -81,12 +82,20 @@ struct mlx5_rep_uplink_priv {
struct mutex unready_flows_lock;
struct list_head unready_flows;
struct work_struct reoffload_flows_work;
+
+ /* maps tun_info to a unique id*/
+ struct mapping_ctx *tunnel_mapping;
+ /* maps tun_enc_opts to a unique id*/
+ struct mapping_ctx *tunnel_enc_opts_mapping;
+
+ struct mlx5_tc_ct_priv *ct_priv;
};
struct mlx5e_rep_priv {
struct mlx5_eswitch_rep *rep;
struct mlx5e_neigh_update_table neigh_update;
struct net_device *netdev;
+ struct mlx5_flow_table *root_ft;
struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
@@ -191,6 +200,8 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe);
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e);
@@ -200,6 +211,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
bool mlx5e_eswitch_rep(struct net_device *netdev);
+bool mlx5e_eswitch_uplink_rep(struct net_device *netdev);
#else /* CONFIG_MLX5_ESWITCH */
static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 312d4692425b..6173faf542b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -158,7 +158,8 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
- rq->handle_rx_cqe(rq, &cqd->title);
+ INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe, rq, &cqd->title);
}
mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
wq->cc = cqcc;
@@ -178,7 +179,8 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
mlx5e_read_title_slot(rq, wq, cc);
mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
mlx5e_decompress_cqe(rq, wq, cc);
- rq->handle_rx_cqe(rq, &cqd->title);
+ INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe, rq, &cqd->title);
cqd->mini_arr_idx++;
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
@@ -1192,6 +1194,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5e_tc_update_priv tc_priv = {};
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
struct sk_buff *skb;
@@ -1224,13 +1227,78 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
+ if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv))
+ goto free_wqe;
+
napi_gro_receive(rq->cq.napi, skb);
+ mlx5_tc_rep_post_napi_receive(&tc_priv);
+
free_wqe:
mlx5e_free_rx_wqe(rq, wi, true);
wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
+
+void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe)
+{
+ u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
+ u16 wqe_id = be16_to_cpu(cqe->wqe_id);
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
+ u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
+ u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
+ u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
+ u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ struct mlx5e_tc_update_priv tc_priv = {};
+ struct mlx5e_rx_wqe_ll *wqe;
+ struct mlx5_wq_ll *wq;
+ struct sk_buff *skb;
+ u16 cqe_bcnt;
+
+ wi->consumed_strides += cstrides;
+
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ trigger_report(rq, cqe);
+ rq->stats->wqe_err++;
+ goto mpwrq_cqe_out;
+ }
+
+ if (unlikely(mpwrq_is_filler_cqe(cqe))) {
+ struct mlx5e_rq_stats *stats = rq->stats;
+
+ stats->mpwqe_filler_cqes++;
+ stats->mpwqe_filler_strides += cstrides;
+ goto mpwrq_cqe_out;
+ }
+
+ cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
+
+ skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
+ mlx5e_skb_from_cqe_mpwrq_linear,
+ mlx5e_skb_from_cqe_mpwrq_nonlinear,
+ rq, wi, cqe_bcnt, head_offset, page_idx);
+ if (!skb)
+ goto mpwrq_cqe_out;
+
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+ if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv))
+ goto mpwrq_cqe_out;
+
+ napi_gro_receive(rq->cq.napi, skb);
+
+ mlx5_tc_rep_post_napi_receive(&tc_priv);
+
+mpwrq_cqe_out:
+ if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
+ return;
+
+ wq = &rq->mpwqe.wq;
+ wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
+ mlx5e_free_rx_mpwqe(rq, wi, true);
+ mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
+}
#endif
struct sk_buff *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index ec5fc52bf572..438128dde187 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -51,14 +51,18 @@
#include "en_rep.h"
#include "en_tc.h"
#include "eswitch.h"
-#include "eswitch_offloads_chains.h"
+#include "esw/chains.h"
#include "fs_core.h"
#include "en/port.h"
#include "en/tc_tun.h"
+#include "en/mapping.h"
+#include "en/tc_ct.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "diag/en_tc_tracepoint.h"
+#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
+
struct mlx5_nic_flow_attr {
u32 action;
u32 flow_tag;
@@ -84,6 +88,7 @@ enum {
MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
+ MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
};
#define MLX5E_TC_MAX_SPLITS 1
@@ -134,6 +139,8 @@ struct mlx5e_tc_flow {
refcount_t refcnt;
struct rcu_head rcu_head;
struct completion init_done;
+ int tunnel_id; /* the mapped tunnel id of this flow */
+
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -144,15 +151,118 @@ struct mlx5e_tc_flow_parse_attr {
const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct net_device *filter_dev;
struct mlx5_flow_spec spec;
- int num_mod_hdr_actions;
- int max_mod_hdr_actions;
- void *mod_hdr_actions;
+ struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
};
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
+struct tunnel_match_key {
+ struct flow_dissector_key_control enc_control;
+ struct flow_dissector_key_keyid enc_key_id;
+ struct flow_dissector_key_ports enc_tp;
+ struct flow_dissector_key_ip enc_ip;
+ union {
+ struct flow_dissector_key_ipv4_addrs enc_ipv4;
+ struct flow_dissector_key_ipv6_addrs enc_ipv6;
+ };
+
+ int filter_ifindex;
+};
+
+/* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS.
+ * Upper TUNNEL_INFO_BITS for general tunnel info.
+ * Lower ENC_OPTS_BITS bits for enc_opts.
+ */
+#define TUNNEL_INFO_BITS 6
+#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
+#define ENC_OPTS_BITS 2
+#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
+#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
+#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
+
+struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
+ [CHAIN_TO_REG] = {
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
+ .moffset = 0,
+ .mlen = 2,
+ },
+ [TUNNEL_TO_REG] = {
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
+ .moffset = 3,
+ .mlen = 1,
+ .soffset = MLX5_BYTE_OFF(fte_match_param,
+ misc_parameters_2.metadata_reg_c_1),
+ },
+ [ZONE_TO_REG] = zone_to_reg_ct,
+ [CTSTATE_TO_REG] = ctstate_to_reg_ct,
+ [MARK_TO_REG] = mark_to_reg_ct,
+ [LABELS_TO_REG] = labels_to_reg_ct,
+ [FTEID_TO_REG] = fteid_to_reg_ct,
+ [TUPLEID_TO_REG] = tupleid_to_reg_ct,
+};
+
+static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
+
+void
+mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data,
+ u32 mask)
+{
+ int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
+ int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
+ void *headers_c = spec->match_criteria;
+ void *headers_v = spec->match_value;
+ void *fmask, *fval;
+
+ fmask = headers_c + soffset;
+ fval = headers_v + soffset;
+
+ mask = cpu_to_be32(mask) >> (32 - (match_len * 8));
+ data = cpu_to_be32(data) >> (32 - (match_len * 8));
+
+ memcpy(fmask, &mask, match_len);
+ memcpy(fval, &data, match_len);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+}
+
+int
+mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data)
+{
+ int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
+ int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
+ int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
+ char *modact;
+ int err;
+
+ err = alloc_mod_hdr_actions(mdev, MLX5_FLOW_NAMESPACE_FDB,
+ mod_hdr_acts);
+ if (err)
+ return err;
+
+ modact = mod_hdr_acts->actions +
+ (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
+
+ /* Firmware has 5bit length field and 0 means 32bits */
+ if (mlen == 4)
+ mlen = 0;
+
+ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, modact, field, mfield);
+ MLX5_SET(set_action_in, modact, offset, moffset * 8);
+ MLX5_SET(set_action_in, modact, length, mlen * 8);
+ MLX5_SET(set_action_in, modact, data, data);
+ mod_hdr_acts->num_actions++;
+
+ return 0;
+}
+
struct mlx5e_hairpin {
struct mlx5_hairpin *pair;
@@ -210,8 +320,6 @@ struct mlx5e_mod_hdr_entry {
int compl_result;
};
-#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
-
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow);
@@ -361,10 +469,10 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
struct mod_hdr_key key;
u32 hash_key;
- num_actions = parse_attr->num_mod_hdr_actions;
+ num_actions = parse_attr->mod_hdr_acts.num_actions;
actions_size = MLX5_MH_ACT_SZ * num_actions;
- key.actions = parse_attr->mod_hdr_actions;
+ key.actions = parse_attr->mod_hdr_acts.actions;
key.num_actions = num_actions;
hash_key = hash_mod_hdr_info(&key);
@@ -954,7 +1062,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
flow_act.modify_hdr = attr->modify_hdr;
- kfree(parse_attr->mod_hdr_actions);
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (err)
return err;
}
@@ -1043,8 +1151,16 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
{
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
+ if (flow_flag_test(flow, CT)) {
+ mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
+
+ return mlx5_tc_ct_flow_offload(flow->priv, flow, spec, attr,
+ mod_hdr_acts);
+ }
+
rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
if (IS_ERR(rule))
return rule;
@@ -1063,10 +1179,15 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
static void
mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_esw_flow_attr *attr)
{
flow_flag_clear(flow, OFFLOADED);
+ if (flow_flag_test(flow, CT)) {
+ mlx5_tc_ct_delete_flow(flow->priv, flow, attr);
+ return;
+ }
+
if (attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
@@ -1076,17 +1197,17 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
static struct mlx5_flow_handle *
mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
- struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *slow_attr)
+ struct mlx5_flow_spec *spec)
{
+ struct mlx5_esw_flow_attr slow_attr;
struct mlx5_flow_handle *rule;
- memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
- slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- slow_attr->split_count = 0;
- slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
+ slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ slow_attr.split_count = 0;
+ slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, &slow_attr);
if (!IS_ERR(rule))
flow_flag_set(flow, SLOW);
@@ -1095,14 +1216,15 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
static void
mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
- struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *slow_attr)
+ struct mlx5e_tc_flow *flow)
{
- memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
- slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- slow_attr->split_count = 0;
- slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
- mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
+ struct mlx5_esw_flow_attr slow_attr;
+
+ memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
+ slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ slow_attr.split_count = 0;
+ slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, &slow_attr);
flow_flag_clear(flow, SLOW);
}
@@ -1173,7 +1295,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
int out_index;
if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
- NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-switch priorities unsupported, upgrade FW");
return -EOPNOTSUPP;
}
@@ -1184,13 +1307,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
*/
max_chain = mlx5_esw_chains_get_chain_range(esw);
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
- NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested chain is out of supported range");
return -EOPNOTSUPP;
}
max_prio = mlx5_esw_chains_get_prio_range(esw);
if (attr->prio > max_prio) {
- NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested priority is out of supported range");
return -EOPNOTSUPP;
}
@@ -1220,7 +1345,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
- kfree(parse_attr->mod_hdr_actions);
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (err)
return err;
}
@@ -1237,14 +1362,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
* (1) there's no error
* (2) there's an encap action and we don't have valid neigh
*/
- if (!encap_valid) {
- /* continue with goto slow path rule instead */
- struct mlx5_esw_flow_attr slow_attr;
-
- flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
- } else {
+ if (!encap_valid)
+ flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
+ else
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
- }
if (IS_ERR(flow->rule[0]))
return PTR_ERR(flow->rule[0]);
@@ -1272,9 +1393,10 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- struct mlx5_esw_flow_attr slow_attr;
int out_index;
+ mlx5e_put_flow_tunnel_id(flow);
+
if (flow_flag_test(flow, NOT_READY)) {
remove_unready_flow(flow);
kvfree(attr->parse_attr);
@@ -1283,7 +1405,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (mlx5e_is_offloaded_flow(flow)) {
if (flow_flag_test(flow, SLOW))
- mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
+ mlx5e_tc_unoffload_from_slow_path(esw, flow);
else
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
}
@@ -1312,7 +1434,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr slow_attr, *esw_attr;
+ struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
@@ -1365,7 +1487,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
continue;
}
- mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
+ mlx5e_tc_unoffload_from_slow_path(esw, flow);
flow->rule[0] = rule;
/* was unset when slow path rule removed */
flow_flag_set(flow, OFFLOADED);
@@ -1377,7 +1499,6 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr slow_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
@@ -1389,7 +1510,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
spec = &flow->esw_attr->parse_attr->spec;
/* update from encap rule to slow path rule */
- rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
+ rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
/* mark the flow's encap dest as non-valid */
flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
@@ -1664,150 +1785,272 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
}
}
+static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_action *flow_action = &rule->action;
+ const struct flow_action_entry *act;
+ int i;
-static int parse_tunnel_attr(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct flow_cls_offload *f,
- struct net_device *filter_dev, u8 *match_level)
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_GOTO:
+ return true;
+ default:
+ continue;
+ }
+ }
+
+ return false;
+}
+
+static int
+enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
+ struct flow_dissector_key_enc_opts *opts,
+ struct netlink_ext_ack *extack,
+ bool *dont_care)
+{
+ struct geneve_opt *opt;
+ int off = 0;
+
+ *dont_care = true;
+
+ while (opts->len > off) {
+ opt = (struct geneve_opt *)&opts->data[off];
+
+ if (!(*dont_care) || opt->opt_class || opt->type ||
+ memchr_inv(opt->opt_data, 0, opt->length * 4)) {
+ *dont_care = false;
+
+ if (opt->opt_class != U16_MAX ||
+ opt->type != U8_MAX ||
+ memchr_inv(opt->opt_data, 0xFF,
+ opt->length * 4)) {
+ NL_SET_ERR_MSG(extack,
+ "Partial match of tunnel options in chain > 0 isn't supported");
+ netdev_warn(priv->netdev,
+ "Partial match of tunnel options in chain > 0 isn't supported");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ off += sizeof(struct geneve_opt) + opt->length * 4;
+ }
+
+ return 0;
+}
+
+#define COPY_DISSECTOR(rule, diss_key, dst)\
+({ \
+ struct flow_rule *__rule = (rule);\
+ typeof(dst) __dst = dst;\
+\
+ memcpy(__dst,\
+ skb_flow_dissector_target(__rule->match.dissector,\
+ diss_key,\
+ __rule->match.key),\
+ sizeof(*__dst));\
+})
+
+static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct flow_cls_offload *f,
+ struct net_device *filter_dev)
{
- struct netlink_ext_ack *extack = f->common.extack;
- void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers);
- void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
+ struct flow_match_enc_opts enc_opts_match;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct tunnel_match_key tunnel_key;
+ bool enc_opts_is_dont_care = true;
+ u32 tun_id, enc_opts_id = 0;
+ struct mlx5_eswitch *esw;
+ u32 value, mask;
int err;
- err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
- headers_c, headers_v, match_level);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack,
- "failed to parse tunnel attributes");
+ esw = priv->mdev->priv.eswitch;
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ memset(&tunnel_key, 0, sizeof(tunnel_key));
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ &tunnel_key.enc_control);
+ if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+ &tunnel_key.enc_ipv4);
+ else
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
+ &tunnel_key.enc_ipv6);
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
+ &tunnel_key.enc_tp);
+ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
+ &tunnel_key.enc_key_id);
+ tunnel_key.filter_ifindex = filter_dev->ifindex;
+
+ err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
+ if (err)
return err;
- }
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
- struct flow_match_control match;
- u16 addr_type;
+ flow_rule_match_enc_opts(rule, &enc_opts_match);
+ err = enc_opts_is_dont_care_or_full_match(priv,
+ enc_opts_match.mask,
+ extack,
+ &enc_opts_is_dont_care);
+ if (err)
+ goto err_enc_opts;
- flow_rule_match_enc_control(rule, &match);
- addr_type = match.key->addr_type;
+ if (!enc_opts_is_dont_care) {
+ err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_match.key, &enc_opts_id);
+ if (err)
+ goto err_enc_opts;
+ }
- /* For tunnel addr_type used same key id`s as for non-tunnel */
- if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- struct flow_match_ipv4_addrs match;
+ value = tun_id << ENC_OPTS_BITS | enc_opts_id;
+ mask = enc_opts_id ? TUNNEL_ID_MASK :
+ (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
- flow_rule_match_enc_ipv4_addrs(rule, &match);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- src_ipv4_src_ipv6.ipv4_layout.ipv4,
- ntohl(match.mask->src));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- src_ipv4_src_ipv6.ipv4_layout.ipv4,
- ntohl(match.key->src));
+ if (attr->chain) {
+ mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
+ TUNNEL_TO_REG, value, mask);
+ } else {
+ mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
+ err = mlx5e_tc_match_to_reg_set(priv->mdev,
+ mod_hdr_acts,
+ TUNNEL_TO_REG, value);
+ if (err)
+ goto err_set;
- MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
- ntohl(match.mask->dst));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
- ntohl(match.key->dst));
-
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
- ethertype);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ETH_P_IP);
- } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- struct flow_match_ipv6_addrs match;
-
- flow_rule_match_enc_ipv6_addrs(rule, &match);
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
- src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
- src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6));
-
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6));
- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
- ipv6));
-
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
- ethertype);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- ETH_P_IPV6);
- }
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
- struct flow_match_ip match;
+ flow->tunnel_id = value;
+ return 0;
- flow_rule_match_enc_ip(rule, &match);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
- match.mask->tos & 0x3);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
- match.key->tos & 0x3);
+err_set:
+ if (enc_opts_id)
+ mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_id);
+err_enc_opts:
+ mapping_remove(uplink_priv->tunnel_mapping, tun_id);
+ return err;
+}
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
- match.mask->tos >> 2);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
- match.key->tos >> 2);
+static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
+{
+ u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
+ u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5_eswitch *esw;
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
- match.mask->ttl);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
- match.key->ttl);
+ esw = flow->priv->mdev->priv.eswitch;
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ if (tun_id)
+ mapping_remove(uplink_priv->tunnel_mapping, tun_id);
+ if (enc_opts_id)
+ mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_id);
+}
- if (match.mask->ttl &&
- !MLX5_CAP_ESW_FLOWTABLE_FDB
- (priv->mdev,
- ft_field_support.outer_ipv4_ttl)) {
+u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
+{
+ return flow->tunnel_id;
+}
+
+static int parse_tunnel_attr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct net_device *filter_dev,
+ u8 *match_level,
+ bool *match_inner)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct netlink_ext_ack *extack = f->common.extack;
+ bool needs_mapping, sets_mapping;
+ int err;
+
+ if (!mlx5e_is_eswitch_flow(flow))
+ return -EOPNOTSUPP;
+
+ needs_mapping = !!flow->esw_attr->chain;
+ sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f);
+ *match_inner = !needs_mapping;
+
+ if ((needs_mapping || sets_mapping) &&
+ !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
+ NL_SET_ERR_MSG(extack,
+ "Chains on tunnel devices isn't supported without register loopback support");
+ netdev_warn(priv->netdev,
+ "Chains on tunnel devices isn't supported without register loopback support");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow->esw_attr->chain) {
+ err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
+ match_level);
+ if (err) {
NL_SET_ERR_MSG_MOD(extack,
- "Matching on TTL is not supported");
- return -EOPNOTSUPP;
+ "Failed to parse tunnel attributes");
+ netdev_warn(priv->netdev,
+ "Failed to parse tunnel attributes");
+ return err;
}
+ flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
}
- /* Enforce DMAC when offloading incoming tunneled flows.
- * Flow counters require a match on the DMAC.
- */
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
- dmac_47_16), priv->netdev->dev_addr);
+ if (!needs_mapping && !sets_mapping)
+ return 0;
- /* let software handle IP fragments */
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
+ return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
+}
- return 0;
+static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
+{
+ return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ inner_headers);
}
-static void *get_match_headers_criteria(u32 flags,
- struct mlx5_flow_spec *spec)
+static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
{
- return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
- MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- inner_headers) :
- MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers);
+ return MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ inner_headers);
+}
+
+static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
+{
+ return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+}
+
+static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
+{
+ return MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
}
static void *get_match_headers_value(u32 flags,
struct mlx5_flow_spec *spec)
{
return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
- MLX5_ADDR_OF(fte_match_param, spec->match_value,
- inner_headers) :
- MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers);
+ get_match_inner_headers_value(spec) :
+ get_match_outer_headers_value(spec);
+}
+
+static void *get_match_headers_criteria(u32 flags,
+ struct mlx5_flow_spec *spec)
+{
+ return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
+ get_match_inner_headers_criteria(spec) :
+ get_match_outer_headers_criteria(spec);
}
static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
@@ -1845,6 +2088,7 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
}
static int __parse_cls_flower(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct net_device *filter_dev,
@@ -1885,6 +2129,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_TCP) |
BIT(FLOW_DISSECTOR_KEY_IP) |
+ BIT(FLOW_DISSECTOR_KEY_CT) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
@@ -1894,18 +2139,22 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
if (mlx5e_get_tc_tun(filter_dev)) {
- if (parse_tunnel_attr(priv, spec, f, filter_dev,
- outer_match_level))
- return -EOPNOTSUPP;
+ bool match_inner = false;
- /* At this point, header pointers should point to the inner
- * headers, outer header were already set by parse_tunnel_attr
- */
- match_level = inner_match_level;
- headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
- spec);
- headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
- spec);
+ err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
+ outer_match_level, &match_inner);
+ if (err)
+ return err;
+
+ if (match_inner) {
+ /* header pointers should point to the inner headers
+ * if the packet was decapsulated already.
+ * outer headers are set by parse_tunnel_attr.
+ */
+ match_level = inner_match_level;
+ headers_c = get_match_inner_headers_criteria(spec);
+ headers_v = get_match_inner_headers_value(spec);
+ }
}
err = mlx5e_flower_parse_meta(filter_dev, f);
@@ -2222,8 +2471,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
inner_match_level = MLX5_MATCH_NONE;
outer_match_level = MLX5_MATCH_NONE;
- err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level,
- &outer_match_level);
+ err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
+ &inner_match_level, &outer_match_level);
non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
outer_match_level : inner_match_level;
@@ -2383,25 +2632,26 @@ static struct mlx5_fields fields[] = {
OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
};
-/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
- * max from the SW pedit action. On success, attr->num_mod_hdr_actions
- * says how many HW actions were actually parsed.
- */
-static int offload_pedit_fields(struct pedit_headers_action *hdrs,
+static int offload_pedit_fields(struct mlx5e_priv *priv,
+ int namespace,
+ struct pedit_headers_action *hdrs,
struct mlx5e_tc_flow_parse_attr *parse_attr,
u32 *action_flags,
struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
- int i, action_size, nactions, max_actions, first, last, next_z;
+ int i, action_size, first, last, next_z;
void *headers_c, *headers_v, *action, *vals_p;
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
+ struct mlx5e_tc_mod_hdr_acts *mod_acts;
struct mlx5_fields *f;
unsigned long mask;
__be32 mask_be32;
__be16 mask_be16;
+ int err;
u8 cmd;
+ mod_acts = &parse_attr->mod_hdr_acts;
headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
@@ -2411,11 +2661,6 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
add_vals = &hdrs[1].vals;
action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
- action = parse_attr->mod_hdr_actions +
- parse_attr->num_mod_hdr_actions * action_size;
-
- max_actions = parse_attr->max_mod_hdr_actions;
- nactions = parse_attr->num_mod_hdr_actions;
for (i = 0; i < ARRAY_SIZE(fields); i++) {
bool skip;
@@ -2441,13 +2686,6 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
return -EOPNOTSUPP;
}
- if (nactions == max_actions) {
- NL_SET_ERR_MSG_MOD(extack,
- "too many pedit actions, can't offload");
- printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
- return -EOPNOTSUPP;
- }
-
skip = false;
if (s_mask) {
void *match_mask = headers_c + f->match_offset;
@@ -2495,6 +2733,18 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
return -EOPNOTSUPP;
}
+ err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "too many pedit actions, can't offload");
+ mlx5_core_warn(priv->mdev,
+ "mlx5: parsed %d pedit actions, can't do more\n",
+ mod_acts->num_actions);
+ return err;
+ }
+
+ action = mod_acts->actions +
+ (mod_acts->num_actions * action_size);
MLX5_SET(set_action_in, action, action_type, cmd);
MLX5_SET(set_action_in, action, field, f->field);
@@ -2517,11 +2767,9 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
else if (f->field_bsize == 8)
MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
- action += action_size;
- nactions++;
+ ++mod_acts->num_actions;
}
- parse_attr->num_mod_hdr_actions = nactions;
return 0;
}
@@ -2534,34 +2782,52 @@ static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
}
-static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
- struct pedit_headers_action *hdrs,
- int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr)
+int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
+ int namespace,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
{
- int nkeys, action_size, max_actions;
+ int action_size, new_num_actions, max_hw_actions;
+ size_t new_sz, old_sz;
+ void *ret;
- nkeys = hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits +
- hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits;
- action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
+ if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
+ return 0;
- max_actions = mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace);
- /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
- max_actions = min(max_actions, nkeys * 16);
+ action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
- parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
- if (!parse_attr->mod_hdr_actions)
+ max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
+ namespace);
+ new_num_actions = min(max_hw_actions,
+ mod_hdr_acts->actions ?
+ mod_hdr_acts->max_actions * 2 : 1);
+ if (mod_hdr_acts->max_actions == new_num_actions)
+ return -ENOSPC;
+
+ new_sz = action_size * new_num_actions;
+ old_sz = mod_hdr_acts->max_actions * action_size;
+ ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
+ if (!ret)
return -ENOMEM;
- parse_attr->max_mod_hdr_actions = max_actions;
+ memset(ret + old_sz, 0, new_sz - old_sz);
+ mod_hdr_acts->actions = ret;
+ mod_hdr_acts->max_actions = new_num_actions;
+
return 0;
}
+void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+{
+ kfree(mod_hdr_acts->actions);
+ mod_hdr_acts->actions = NULL;
+ mod_hdr_acts->num_actions = 0;
+ mod_hdr_acts->max_actions = 0;
+}
+
static const struct pedit_headers zero_masks = {};
static int parse_tc_pedit_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
@@ -2609,13 +2875,8 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
int err;
u8 cmd;
- if (!parse_attr->mod_hdr_actions) {
- err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr);
- if (err)
- goto out_err;
- }
-
- err = offload_pedit_fields(hdrs, parse_attr, action_flags, extack);
+ err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
+ action_flags, extack);
if (err < 0)
goto out_dealloc_parsed_actions;
@@ -2635,8 +2896,7 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
return 0;
out_dealloc_parsed_actions:
- kfree(parse_attr->mod_hdr_actions);
-out_err:
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
return err;
}
@@ -2681,7 +2941,9 @@ struct ipv6_hoplimit_word {
__u8 hop_limit;
};
-static bool is_action_keys_supported(const struct flow_action_entry *act)
+static int is_action_keys_supported(const struct flow_action_entry *act,
+ bool ct_flow, bool *modify_ip_header,
+ struct netlink_ext_ack *extack)
{
u32 mask, offset;
u8 htype;
@@ -2700,7 +2962,13 @@ static bool is_action_keys_supported(const struct flow_action_entry *act)
if (offset != offsetof(struct iphdr, ttl) ||
ttl_word->protocol ||
ttl_word->check) {
- return true;
+ *modify_ip_header = true;
+ }
+
+ if (ct_flow && offset >= offsetof(struct iphdr, saddr)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload re-write of ipv4 address with action ct");
+ return -EOPNOTSUPP;
}
} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
struct ipv6_hoplimit_word *hoplimit_word =
@@ -2709,15 +2977,27 @@ static bool is_action_keys_supported(const struct flow_action_entry *act)
if (offset != offsetof(struct ipv6hdr, payload_len) ||
hoplimit_word->payload_len ||
hoplimit_word->nexthdr) {
- return true;
+ *modify_ip_header = true;
}
+
+ if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload re-write of ipv6 address with action ct");
+ return -EOPNOTSUPP;
+ }
+ } else if (ct_flow && (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
+ htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload re-write of transport header ports with action ct");
+ return -EOPNOTSUPP;
}
- return false;
+
+ return 0;
}
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
struct flow_action *flow_action,
- u32 actions,
+ u32 actions, bool ct_flow,
struct netlink_ext_ack *extack)
{
const struct flow_action_entry *act;
@@ -2725,7 +3005,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
void *headers_v;
u16 ethertype;
u8 ip_proto;
- int i;
+ int i, err;
headers_v = get_match_headers_value(actions, spec);
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
@@ -2740,10 +3020,10 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
act->id != FLOW_ACTION_ADD)
continue;
- if (is_action_keys_supported(act)) {
- modify_ip_header = true;
- break;
- }
+ err = is_action_keys_supported(act, ct_flow,
+ &modify_ip_header, extack);
+ if (err)
+ return err;
}
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
@@ -2765,23 +3045,29 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
+ bool ct_flow;
u32 actions;
- if (mlx5e_is_eswitch_flow(flow))
+ ct_flow = flow_flag_test(flow, CT);
+ if (mlx5e_is_eswitch_flow(flow)) {
actions = flow->esw_attr->action;
- else
- actions = flow->nic_attr->action;
- if (flow_flag_test(flow, EGRESS) &&
- !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) ||
- (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
- (actions & MLX5_FLOW_CONTEXT_ACTION_DROP)))
- return false;
+ if (flow->esw_attr->split_count && ct_flow) {
+ /* All registers used by ct are cleared when using
+ * split rules.
+ */
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't offload mirroring with action ct");
+ return false;
+ }
+ } else {
+ actions = flow->nic_attr->action;
+ }
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
return modify_header_match_supported(&parse_attr->spec,
flow_action, actions,
- extack);
+ ct_flow, extack);
return true;
}
@@ -2837,8 +3123,7 @@ static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
return -EOPNOTSUPP;
}
- err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr,
- hdrs, NULL);
+ err = parse_tc_pedit_action(priv, &pedit_act, namespace, hdrs, NULL);
*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
return err;
@@ -2883,6 +3168,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
if (!flow_action_has_entries(flow_action))
return -EINVAL;
+ if (!flow_action_hw_stats_check(flow_action, extack,
+ FLOW_ACTION_HW_STATS_DELAYED_BIT))
+ return -EOPNOTSUPP;
+
attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
flow_action_for_each(i, act, flow_action) {
@@ -2900,7 +3189,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
case FLOW_ACTION_MANGLE:
case FLOW_ACTION_ADD:
err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr, hdrs, extack);
+ hdrs, extack);
if (err)
return err;
@@ -2969,9 +3258,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
/* in case all pedit actions are skipped, remove the MOD_HDR
* flag.
*/
- if (parse_attr->num_mod_hdr_actions == 0) {
+ if (parse_attr->mod_hdr_acts.num_actions == 0) {
action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- kfree(parse_attr->mod_hdr_actions);
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
}
}
@@ -3314,6 +3603,85 @@ static bool is_duplicated_output_device(struct net_device *dev,
return false;
}
+static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ const struct flow_action_entry *act,
+ u32 actions,
+ struct netlink_ext_ack *extack)
+{
+ u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ bool ft_flow = mlx5e_is_ft_flow(flow);
+ u32 dest_chain = act->chain_index;
+
+ if (ft_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5_esw_chains_backwards_supported(esw) &&
+ dest_chain <= attr->chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Goto lower numbered chain isn't supported");
+ return -EOPNOTSUPP;
+ }
+ if (dest_chain > max_chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested destination chain is out of supported range");
+ return -EOPNOTSUPP;
+ }
+
+ if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_and_fwd_to_table)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Goto chain is not allowed if action has reformat or decap");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int verify_uplink_forwarding(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5e_rep_priv *rep_priv;
+
+ /* Forwarding non encapsulated traffic between
+ * uplink ports is allowed only if
+ * termination_table_raw_traffic cap is set.
+ *
+ * Input vport was stored esw_attr->in_rep.
+ * In LAG case, *priv* is the private data of
+ * uplink which may be not the input vport.
+ */
+ rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
+
+ if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
+ mlx5e_eswitch_uplink_rep(out_dev)))
+ return 0;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
+ termination_table_raw_traffic)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are both uplink, can't offload forwarding");
+ pr_err("devices %s %s are both uplink, can't offload forwarding\n",
+ priv->netdev->name, out_dev->name);
+ return -EOPNOTSUPP;
+ } else if (out_dev != rep_priv->netdev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are not the same uplink, can't offload forwarding");
+ pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
+ priv->netdev->name, out_dev->name);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
@@ -3328,13 +3696,17 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
bool ft_flow = mlx5e_is_ft_flow(flow);
const struct flow_action_entry *act;
+ bool encap = false, decap = false;
+ u32 action = attr->action;
int err, i, if_count = 0;
- bool encap = false;
- u32 action = 0;
if (!flow_action_has_entries(flow_action))
return -EINVAL;
+ if (!flow_action_hw_stats_check(flow_action, extack,
+ FLOW_ACTION_HW_STATS_DELAYED_BIT))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_DROP:
@@ -3344,7 +3716,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
case FLOW_ACTION_MANGLE:
case FLOW_ACTION_ADD:
err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr, hdrs, extack);
+ hdrs, extack);
if (err)
return err;
@@ -3382,8 +3754,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
NL_SET_ERR_MSG_MOD(extack,
"can't support more output ports, can't offload forwarding");
- pr_err("can't support more than %d output ports, can't offload forwarding\n",
- attr->out_count);
+ netdev_warn(priv->netdev,
+ "can't support more than %d output ports, can't offload forwarding\n",
+ attr->out_count);
return -EOPNOTSUPP;
}
@@ -3441,11 +3814,17 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return err;
}
+ err = verify_uplink_forwarding(priv, flow, out_dev, extack);
+ if (err)
+ return err;
+
if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
- pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
- priv->netdev->name, out_dev->name);
+ netdev_warn(priv->netdev,
+ "devices %s %s not on same switch HW, can't offload forwarding\n",
+ priv->netdev->name,
+ out_dev->name);
return -EOPNOTSUPP;
}
@@ -3464,8 +3843,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
} else {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
- pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
- priv->netdev->name, out_dev->name);
+ netdev_warn(priv->netdev,
+ "devices %s %s not on same switch HW, can't offload forwarding\n",
+ priv->netdev->name,
+ out_dev->name);
return -EINVAL;
}
}
@@ -3507,28 +3888,24 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->split_count = attr->out_count;
break;
case FLOW_ACTION_TUNNEL_DECAP:
- action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ decap = true;
break;
- case FLOW_ACTION_GOTO: {
- u32 dest_chain = act->chain_index;
- u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
+ case FLOW_ACTION_GOTO:
+ err = mlx5_validate_goto_chain(esw, flow, act, action,
+ extack);
+ if (err)
+ return err;
- if (ft_flow) {
- NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
- return -EOPNOTSUPP;
- }
- if (dest_chain <= attr->chain) {
- NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
- return -EOPNOTSUPP;
- }
- if (dest_chain > max_chain) {
- NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
- return -EOPNOTSUPP;
- }
action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->dest_chain = dest_chain;
+ attr->dest_chain = act->chain_index;
+ break;
+ case FLOW_ACTION_CT:
+ err = mlx5_tc_ct_parse_action(priv, attr, act, extack);
+ if (err)
+ return err;
+
+ flow_flag_set(flow, CT);
break;
- }
default:
NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
return -EOPNOTSUPP;
@@ -3557,9 +3934,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
* flag. we might have set split_count either by pedit or
* pop/push. if there is no pop/push either, reset it too.
*/
- if (parse_attr->num_mod_hdr_actions == 0) {
+ if (parse_attr->mod_hdr_acts.num_actions == 0) {
action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- kfree(parse_attr->mod_hdr_actions);
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
(action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
attr->split_count = 0;
@@ -3571,8 +3948,25 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
if (attr->dest_chain) {
+ if (decap) {
+ /* It can be supported if we'll create a mapping for
+ * the tunnel device only (without tunnel), and set
+ * this tunnel id with this decap flow.
+ *
+ * On restore (miss), we'll just set this saved tunnel
+ * device.
+ */
+
+ NL_SET_ERR_MSG(extack,
+ "Decap with goto isn't supported");
+ netdev_warn(priv->netdev,
+ "Decap with goto isn't supported");
+ return -EOPNOTSUPP;
+ }
+
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirroring goto chain rules isn't supported");
return -EOPNOTSUPP;
}
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
@@ -3580,7 +3974,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (!(attr->action &
(MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
- NL_SET_ERR_MSG(extack, "Rule must have at least one forward/drop action");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Rule must have at least one forward/drop action");
return -EOPNOTSUPP;
}
@@ -3751,6 +4146,10 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
+ err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack);
+ if (err)
+ goto err_free;
+
err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
complete_all(&flow->init_done);
if (err) {
@@ -4035,7 +4434,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
goto errout;
}
- if (mlx5e_is_offloaded_flow(flow)) {
+ if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
counter = mlx5e_tc_get_counter(flow);
if (!counter)
goto errout;
@@ -4069,7 +4468,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
no_peer_counter:
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
out:
- flow_stats_update(&f->stats, bytes, packets, lastuse);
+ flow_stats_update(&f->stats, bytes, packets, lastuse,
+ FLOW_ACTION_HW_STATS_DELAYED);
trace_mlx5e_stats_flower(f);
errout:
mlx5e_flow_put(priv, flow);
@@ -4126,6 +4526,9 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ if (!flow_action_basic_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
@@ -4147,8 +4550,14 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct netlink_ext_ack *extack = ma->common.extack;
+ if (!mlx5_esw_qos_enabled(esw)) {
+ NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
+ return -EOPNOTSUPP;
+ }
+
if (ma->common.prio != 1) {
NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
return -EINVAL;
@@ -4177,7 +4586,8 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
rpriv->prev_vf_vport_stats = cur_stats;
- flow_stats_update(&ma->stats, dpkts, dbytes, jiffies);
+ flow_stats_update(&ma->stats, dpkts, dbytes, jiffies,
+ FLOW_ACTION_HW_STATS_DELAYED);
}
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
@@ -4295,12 +4705,63 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
{
- return rhashtable_init(tc_ht, &tc_ht_params);
+ const size_t sz_enc_opts = sizeof(struct flow_dissector_key_enc_opts);
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *priv;
+ struct mapping_ctx *mapping;
+ int err;
+
+ uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
+ priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
+
+ err = mlx5_tc_ct_init(uplink_priv);
+ if (err)
+ goto err_ct;
+
+ mapping = mapping_create(sizeof(struct tunnel_match_key),
+ TUNNEL_INFO_BITS_MASK, true);
+ if (IS_ERR(mapping)) {
+ err = PTR_ERR(mapping);
+ goto err_tun_mapping;
+ }
+ uplink_priv->tunnel_mapping = mapping;
+
+ mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true);
+ if (IS_ERR(mapping)) {
+ err = PTR_ERR(mapping);
+ goto err_enc_opts_mapping;
+ }
+ uplink_priv->tunnel_enc_opts_mapping = mapping;
+
+ err = rhashtable_init(tc_ht, &tc_ht_params);
+ if (err)
+ goto err_ht_init;
+
+ return err;
+
+err_ht_init:
+ mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
+err_enc_opts_mapping:
+ mapping_destroy(uplink_priv->tunnel_mapping);
+err_tun_mapping:
+ mlx5_tc_ct_clean(uplink_priv);
+err_ct:
+ netdev_warn(priv->netdev,
+ "Failed to initialize tc (eswitch), err: %d", err);
+ return err;
}
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
{
+ struct mlx5_rep_uplink_priv *uplink_priv;
+
rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
+
+ uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
+ mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
+ mapping_destroy(uplink_priv->tunnel_mapping);
+
+ mlx5_tc_ct_clean(uplink_priv);
}
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
@@ -4332,3 +4793,147 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
}
mutex_unlock(&rpriv->unready_flows_lock);
}
+
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv,
+ u32 tunnel_id)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct flow_dissector_key_enc_opts enc_opts = {};
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct metadata_dst *tun_dst;
+ struct tunnel_match_key key;
+ u32 tun_id, enc_opts_id;
+ struct net_device *dev;
+ int err;
+
+ enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
+ tun_id = tunnel_id >> ENC_OPTS_BITS;
+
+ if (!tun_id)
+ return true;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
+ if (err) {
+ WARN_ON_ONCE(true);
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel for tun_id: %d, err: %d\n",
+ tun_id, err);
+ return false;
+ }
+
+ if (enc_opts_id) {
+ err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_id, &enc_opts);
+ if (err) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
+ enc_opts_id, err);
+ return false;
+ }
+ }
+
+ tun_dst = tun_rx_dst(enc_opts.len);
+ if (!tun_dst) {
+ WARN_ON_ONCE(true);
+ return false;
+ }
+
+ ip_tunnel_key_init(&tun_dst->u.tun_info.key,
+ key.enc_ipv4.src, key.enc_ipv4.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ 0, /* label */
+ key.enc_tp.src, key.enc_tp.dst,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ TUNNEL_KEY);
+
+ if (enc_opts.len)
+ ip_tunnel_info_opts_set(&tun_dst->u.tun_info, enc_opts.data,
+ enc_opts.len, enc_opts.dst_opt_type);
+
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
+ dev = dev_get_by_index(&init_net, key.filter_ifindex);
+ if (!dev) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel device with ifindex: %d\n",
+ key.filter_ifindex);
+ return false;
+ }
+
+ /* Set tun_dev so we do dev_put() after datapath */
+ tc_priv->tun_dev = dev;
+
+ skb->dev = dev;
+
+ return true;
+}
+#endif /* CONFIG_NET_TC_SKB_EXT */
+
+bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv)
+{
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct tc_skb_ext *tc_skb_ext;
+ struct mlx5_eswitch *esw;
+ struct mlx5e_priv *priv;
+ int tunnel_moffset;
+ int err;
+
+ reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
+ if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
+ reg_c0 = 0;
+ reg_c1 = be32_to_cpu(cqe->imm_inval_pkey);
+
+ if (!reg_c0)
+ return true;
+
+ priv = netdev_priv(skb->dev);
+ esw = priv->mdev->priv.eswitch;
+
+ err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain);
+ if (err) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find chain for chain tag: %d, err: %d\n",
+ reg_c0, err);
+ return false;
+ }
+
+ if (chain) {
+ tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ if (!tc_skb_ext) {
+ WARN_ON(1);
+ return false;
+ }
+
+ tc_skb_ext->chain = chain;
+
+ tuple_id = reg_c1 & TUPLE_ID_MAX;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id))
+ return false;
+ }
+
+ tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset;
+ tunnel_id = reg_c1 >> (8 * tunnel_moffset);
+ return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+#endif /* CONFIG_NET_TC_SKB_EXT */
+
+ return true;
+}
+
+void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
+{
+ if (tc_priv->tun_dev)
+ dev_put(tc_priv->tun_dev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 262cdb7b69b1..abdcfa4c4e0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -91,9 +91,63 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
+enum mlx5e_tc_attr_to_reg {
+ CHAIN_TO_REG,
+ TUNNEL_TO_REG,
+ CTSTATE_TO_REG,
+ ZONE_TO_REG,
+ MARK_TO_REG,
+ LABELS_TO_REG,
+ FTEID_TO_REG,
+ TUPLEID_TO_REG,
+};
+
+struct mlx5e_tc_attr_to_reg_mapping {
+ int mfield; /* rewrite field */
+ int moffset; /* offset of mfield */
+ int mlen; /* bytes to rewrite/match */
+
+ int soffset; /* offset of spec for match */
+};
+
+extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
+
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev);
+struct mlx5e_tc_update_priv {
+ struct net_device *tun_dev;
+};
+
+bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv);
+
+void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv);
+
+struct mlx5e_tc_mod_hdr_acts {
+ int num_actions;
+ int max_actions;
+ void *actions;
+};
+
+int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data);
+
+void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data,
+ u32 mask);
+
+int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
+ int namespace,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+
+struct mlx5e_tc_flow;
+u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow);
+
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index ee60383adc5b..fd6b2a1898c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -72,8 +72,8 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
{
int txq_ix = netdev_pick_tx(dev, skb, NULL);
struct mlx5e_priv *priv = netdev_priv(dev);
- u16 num_channels;
int up = 0;
+ int ch_ix;
if (!netdev_get_num_tc(dev))
return txq_ix;
@@ -86,14 +86,13 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
if (skb_vlan_tag_present(skb))
up = skb_vlan_tag_get_prio(skb);
- /* txq_ix can be larger than num_channels since
- * dev->num_real_tx_queues = num_channels * num_tc
+ /* Normalize any picked txq_ix to [0, num_channels),
+ * So we can return a txq_ix that matches the channel and
+ * packet UP.
*/
- num_channels = priv->channels.params.num_channels;
- if (txq_ix >= num_channels)
- txq_ix = priv->txq2sq[txq_ix]->ch_ix;
+ ch_ix = priv->txq2sq[txq_ix]->ch_ix;
- return priv->channel_tc2realtxq[txq_ix][up];
+ return priv->channel_tc2realtxq[ch_ix][up];
}
static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 800d34ed8a96..87c49e7a164c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -31,6 +31,7 @@
*/
#include <linux/irq.h>
+#include <linux/indirect_call_wrapper.h>
#include "en.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
@@ -100,7 +101,10 @@ static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskr
busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET);
mlx5e_xsk_update_tx_wakeup(xsksq);
- xsk_rx_alloc_err = xskrq->post_wqes(xskrq);
+ xsk_rx_alloc_err = INDIRECT_CALL_2(xskrq->post_wqes,
+ mlx5e_post_rx_mpwqes,
+ mlx5e_post_rx_wqes,
+ xskrq);
busy_xsk |= mlx5e_xsk_update_rx_wakeup(xskrq, xsk_rx_alloc_err);
return busy_xsk;
@@ -143,7 +147,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_poll_ico_cq(&c->icosq.cq);
- busy |= rq->post_wqes(rq);
+ busy |= INDIRECT_CALL_2(rq->post_wqes,
+ mlx5e_post_rx_mpwqes,
+ mlx5e_post_rx_wqes,
+ rq);
if (xsk_open) {
mlx5e_poll_ico_cq(&c->xskicosq.cq);
busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/esw/Makefile
new file mode 100644
index 000000000000..c78512eed8d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+subdir-ccflags-y += -I$(src)/..
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
index 4276194b633f..029001040737 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
@@ -5,23 +5,26 @@
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/fs.h>
-#include "eswitch_offloads_chains.h"
+#include "esw/chains.h"
+#include "en/mapping.h"
#include "mlx5_core.h"
#include "fs_core.h"
#include "eswitch.h"
#include "en.h"
+#include "en_tc.h"
#define esw_chains_priv(esw) ((esw)->fdb_table.offloads.esw_chains_priv)
#define esw_chains_lock(esw) (esw_chains_priv(esw)->lock)
#define esw_chains_ht(esw) (esw_chains_priv(esw)->chains_ht)
+#define esw_chains_mapping(esw) (esw_chains_priv(esw)->chains_mapping)
#define esw_prios_ht(esw) (esw_chains_priv(esw)->prios_ht)
#define fdb_pool_left(esw) (esw_chains_priv(esw)->fdb_left)
#define tc_slow_fdb(esw) ((esw)->fdb_table.offloads.slow_fdb)
#define tc_end_fdb(esw) (esw_chains_priv(esw)->tc_end_fdb)
#define fdb_ignore_flow_level_supported(esw) \
(MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
-
-#define ESW_OFFLOADS_NUM_GROUPS 4
+#define fdb_modify_header_fwd_to_table_supported(esw) \
+ (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
* and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
@@ -36,6 +39,7 @@ static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024,
1 * 1024 * 1024,
64 * 1024,
128 };
+#define ESW_FT_TBL_SZ (64 * 1024)
struct mlx5_esw_chains_priv {
struct rhashtable chains_ht;
@@ -44,6 +48,7 @@ struct mlx5_esw_chains_priv {
struct mutex lock;
struct mlx5_flow_table *tc_end_fdb;
+ struct mapping_ctx *chains_mapping;
int fdb_left[ARRAY_SIZE(ESW_POOLS)];
};
@@ -54,9 +59,12 @@ struct fdb_chain {
u32 chain;
int ref;
+ int id;
struct mlx5_eswitch *esw;
struct list_head prios_list;
+ struct mlx5_flow_handle *restore_rule;
+ struct mlx5_modify_hdr *miss_modify_hdr;
};
struct fdb_prio_key {
@@ -99,6 +107,12 @@ bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw)
return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
}
+bool mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw)
+{
+ return mlx5_esw_chains_prios_supported(esw) &&
+ fdb_ignore_flow_level_supported(esw);
+}
+
u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw)
{
if (!mlx5_esw_chains_prios_supported(esw))
@@ -198,7 +212,9 @@ mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- sz = mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE);
+ sz = (chain == mlx5_esw_chains_get_ft_chain(esw)) ?
+ mlx5_esw_chains_get_avail_sz_from_pool(esw, ESW_FT_TBL_SZ) :
+ mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE);
if (!sz)
return ERR_PTR(-ENOSPC);
ft_attr.max_fte = sz;
@@ -234,7 +250,7 @@ mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
}
ft_attr.autogroup.num_reserved_entries = 2;
- ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS;
+ ft_attr.autogroup.max_num_groups = esw->params.large_group_num;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) {
esw_warn(esw->dev,
@@ -255,6 +271,83 @@ mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw,
mlx5_destroy_flow_table(fdb);
}
+static int
+create_fdb_chain_restore(struct fdb_chain *fdb_chain)
+{
+ char modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)];
+ struct mlx5_eswitch *esw = fdb_chain->esw;
+ struct mlx5_modify_hdr *mod_hdr;
+ u32 index;
+ int err;
+
+ if (fdb_chain->chain == mlx5_esw_chains_get_ft_chain(esw) ||
+ !mlx5_esw_chains_prios_supported(esw))
+ return 0;
+
+ err = mapping_add(esw_chains_mapping(esw), &fdb_chain->chain, &index);
+ if (err)
+ return err;
+ if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
+ /* we got the special default flow tag id, so we won't know
+ * if we actually marked the packet with the restore rule
+ * we create.
+ *
+ * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
+ */
+ err = mapping_add(esw_chains_mapping(esw),
+ &fdb_chain->chain, &index);
+ mapping_remove(esw_chains_mapping(esw),
+ MLX5_FS_DEFAULT_FLOW_TAG);
+ if (err)
+ return err;
+ }
+
+ fdb_chain->id = index;
+
+ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, modact, field,
+ mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mfield);
+ MLX5_SET(set_action_in, modact, offset,
+ mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].moffset * 8);
+ MLX5_SET(set_action_in, modact, length,
+ mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mlen * 8);
+ MLX5_SET(set_action_in, modact, data, fdb_chain->id);
+ mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB,
+ 1, modact);
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ goto err_mod_hdr;
+ }
+ fdb_chain->miss_modify_hdr = mod_hdr;
+
+ fdb_chain->restore_rule = esw_add_restore_rule(esw, fdb_chain->id);
+ if (IS_ERR(fdb_chain->restore_rule)) {
+ err = PTR_ERR(fdb_chain->restore_rule);
+ goto err_rule;
+ }
+
+ return 0;
+
+err_rule:
+ mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr);
+err_mod_hdr:
+ /* Datapath can't find this mapping, so we can safely remove it */
+ mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
+ return err;
+}
+
+static void destroy_fdb_chain_restore(struct fdb_chain *fdb_chain)
+{
+ struct mlx5_eswitch *esw = fdb_chain->esw;
+
+ if (!fdb_chain->miss_modify_hdr)
+ return;
+
+ mlx5_del_flow_rules(fdb_chain->restore_rule);
+ mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr);
+ mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
+}
+
static struct fdb_chain *
mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
{
@@ -269,6 +362,10 @@ mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
fdb_chain->chain = chain;
INIT_LIST_HEAD(&fdb_chain->prios_list);
+ err = create_fdb_chain_restore(fdb_chain);
+ if (err)
+ goto err_restore;
+
err = rhashtable_insert_fast(&esw_chains_ht(esw), &fdb_chain->node,
chain_params);
if (err)
@@ -277,6 +374,8 @@ mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
return fdb_chain;
err_insert:
+ destroy_fdb_chain_restore(fdb_chain);
+err_restore:
kvfree(fdb_chain);
return ERR_PTR(err);
}
@@ -288,6 +387,8 @@ mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain)
rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node,
chain_params);
+
+ destroy_fdb_chain_restore(fdb_chain);
kvfree(fdb_chain);
}
@@ -310,10 +411,11 @@ mlx5_esw_chains_get_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
}
static struct mlx5_flow_handle *
-mlx5_esw_chains_add_miss_rule(struct mlx5_flow_table *fdb,
+mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain,
+ struct mlx5_flow_table *fdb,
struct mlx5_flow_table *next_fdb)
{
- static const struct mlx5_flow_spec spec = {};
+ struct mlx5_eswitch *esw = fdb_chain->esw;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act act = {};
@@ -322,7 +424,13 @@ mlx5_esw_chains_add_miss_rule(struct mlx5_flow_table *fdb,
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = next_fdb;
- return mlx5_add_flow_rules(fdb, &spec, &act, &dest, 1);
+ if (next_fdb == tc_end_fdb(esw) &&
+ mlx5_esw_chains_prios_supported(esw)) {
+ act.modify_hdr = fdb_chain->miss_modify_hdr;
+ act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ }
+
+ return mlx5_add_flow_rules(fdb, NULL, &act, &dest, 1);
}
static int
@@ -345,7 +453,8 @@ mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio,
list_for_each_entry_continue_reverse(pos,
&fdb_chain->prios_list,
list) {
- miss_rules[n] = mlx5_esw_chains_add_miss_rule(pos->fdb,
+ miss_rules[n] = mlx5_esw_chains_add_miss_rule(fdb_chain,
+ pos->fdb,
next_fdb);
if (IS_ERR(miss_rules[n])) {
err = PTR_ERR(miss_rules[n]);
@@ -459,7 +568,7 @@ mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw,
}
/* Add miss rule to next_fdb */
- miss_rule = mlx5_esw_chains_add_miss_rule(fdb, next_fdb);
+ miss_rule = mlx5_esw_chains_add_miss_rule(fdb_chain, fdb, next_fdb);
if (IS_ERR(miss_rule)) {
err = PTR_ERR(miss_rule);
goto err_miss_rule;
@@ -618,12 +727,44 @@ mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw)
return tc_end_fdb(esw);
}
+struct mlx5_flow_table *
+mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw)
+{
+ u32 chain, prio, level;
+ int err;
+
+ if (!fdb_ignore_flow_level_supported(esw)) {
+ err = -EOPNOTSUPP;
+
+ esw_warn(esw->dev,
+ "Couldn't create global flow table, ignore_flow_level not supported.");
+ goto err_ignore;
+ }
+
+ chain = mlx5_esw_chains_get_chain_range(esw),
+ prio = mlx5_esw_chains_get_prio_range(esw);
+ level = mlx5_esw_chains_get_level_range(esw);
+
+ return mlx5_esw_chains_create_fdb_table(esw, chain, prio, level);
+
+err_ignore:
+ return ERR_PTR(err);
+}
+
+void
+mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ft)
+{
+ mlx5_esw_chains_destroy_fdb_table(esw, ft);
+}
+
static int
mlx5_esw_chains_init(struct mlx5_eswitch *esw)
{
struct mlx5_esw_chains_priv *chains_priv;
struct mlx5_core_dev *dev = esw->dev;
u32 max_flow_counter, fdb_max;
+ struct mapping_ctx *mapping;
int err;
chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
@@ -637,7 +778,7 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw)
esw_debug(dev,
"Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n",
- max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, fdb_max);
+ max_flow_counter, esw->params.large_group_num, fdb_max);
mlx5_esw_chains_init_sz_pool(esw);
@@ -645,6 +786,16 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw)
esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
+ } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
+ esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
+ } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
+ /* Disabled when ttl workaround is needed, e.g
+ * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
+ */
+ esw_warn(dev,
+ "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
+ esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
} else {
esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
esw_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
@@ -660,10 +811,20 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw)
if (err)
goto init_prios_ht_err;
+ mapping = mapping_create(sizeof(u32), esw_get_max_restore_tag(esw),
+ true);
+ if (IS_ERR(mapping)) {
+ err = PTR_ERR(mapping);
+ goto mapping_err;
+ }
+ esw_chains_mapping(esw) = mapping;
+
mutex_init(&esw_chains_lock(esw));
return 0;
+mapping_err:
+ rhashtable_destroy(&esw_prios_ht(esw));
init_prios_ht_err:
rhashtable_destroy(&esw_chains_ht(esw));
init_chains_ht_err:
@@ -675,6 +836,7 @@ static void
mlx5_esw_chains_cleanup(struct mlx5_eswitch *esw)
{
mutex_destroy(&esw_chains_lock(esw));
+ mapping_destroy(esw_chains_mapping(esw));
rhashtable_destroy(&esw_prios_ht(esw));
rhashtable_destroy(&esw_chains_ht(esw));
@@ -704,12 +866,9 @@ mlx5_esw_chains_open(struct mlx5_eswitch *esw)
/* Open level 1 for split rules now if prios isn't supported */
if (!mlx5_esw_chains_prios_supported(esw)) {
- ft = mlx5_esw_chains_get_table(esw, 0, 1, 1);
-
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
+ err = mlx5_esw_vport_tbl_get(esw);
+ if (err)
goto level_1_err;
- }
}
return 0;
@@ -725,7 +884,7 @@ static void
mlx5_esw_chains_close(struct mlx5_eswitch *esw)
{
if (!mlx5_esw_chains_prios_supported(esw))
- mlx5_esw_chains_put_table(esw, 0, 1, 1);
+ mlx5_esw_vport_tbl_put(esw);
mlx5_esw_chains_put_table(esw, 0, 1, 0);
mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
}
@@ -756,3 +915,30 @@ mlx5_esw_chains_destroy(struct mlx5_eswitch *esw)
mlx5_esw_chains_close(esw);
mlx5_esw_chains_cleanup(esw);
}
+
+int
+mlx5_esw_chains_get_chain_mapping(struct mlx5_eswitch *esw, u32 chain,
+ u32 *chain_mapping)
+{
+ return mapping_add(esw_chains_mapping(esw), &chain, chain_mapping);
+}
+
+int
+mlx5_esw_chains_put_chain_mapping(struct mlx5_eswitch *esw, u32 chain_mapping)
+{
+ return mapping_remove(esw_chains_mapping(esw), chain_mapping);
+}
+
+int mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag,
+ u32 *chain)
+{
+ int err;
+
+ err = mapping_find(esw_chains_mapping(esw), tag, chain);
+ if (err) {
+ esw_warn(esw->dev, "Can't find chain for tag: %d\n", tag);
+ return -ENOENT;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h
index 2e13097fe348..f8c4239846ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h
@@ -4,8 +4,12 @@
#ifndef __ML5_ESW_CHAINS_H__
#define __ML5_ESW_CHAINS_H__
+#include "eswitch.h"
+
bool
mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw);
+bool
+mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw);
u32
mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw);
u32
@@ -23,8 +27,23 @@ mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
struct mlx5_flow_table *
mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw);
+struct mlx5_flow_table *
+mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw);
+void
+mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ft);
+
+int
+mlx5_esw_chains_get_chain_mapping(struct mlx5_eswitch *esw, u32 chain,
+ u32 *chain_mapping);
+int
+mlx5_esw_chains_put_chain_mapping(struct mlx5_eswitch *esw,
+ u32 chain_mapping);
+
int mlx5_esw_chains_create(struct mlx5_eswitch *esw);
void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw);
-#endif /* __ML5_ESW_CHAINS_H__ */
+int
+mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, u32 *chain);
+#endif /* __ML5_ESW_CHAINS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index e49acd0c5da5..7f618a443bfd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -39,6 +39,7 @@
#include "lib/eq.h"
#include "eswitch.h"
#include "fs_core.h"
+#include "devlink.h"
#include "ecpf.h"
enum {
@@ -1333,7 +1334,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
goto out;
}
- memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach drop flow counter */
@@ -1345,7 +1345,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
dest_num++;
}
vport->ingress.legacy.drop_rule =
- mlx5_add_flow_rules(vport->ingress.acl, spec,
+ mlx5_add_flow_rules(vport->ingress.acl, NULL,
&flow_act, dst, dest_num);
if (IS_ERR(vport->ingress.legacy.drop_rule)) {
err = PTR_ERR(vport->ingress.legacy.drop_rule);
@@ -1408,7 +1408,6 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_flow_destination drop_ctr_dst = {0};
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_spec *spec;
int dest_num = 0;
int err = 0;
@@ -1437,11 +1436,6 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
if (err)
return err;
- /* Drop others rule (star rule) */
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec)
- goto out;
-
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach egress drop flow counter */
@@ -1453,7 +1447,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
dest_num++;
}
vport->egress.legacy.drop_rule =
- mlx5_add_flow_rules(vport->egress.acl, spec,
+ mlx5_add_flow_rules(vport->egress.acl, NULL,
&flow_act, dst, dest_num);
if (IS_ERR(vport->egress.legacy.drop_rule)) {
err = PTR_ERR(vport->egress.legacy.drop_rule);
@@ -1462,8 +1456,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
vport->vport, err);
vport->egress.legacy.drop_rule = NULL;
}
-out:
- kvfree(spec);
+
return err;
}
@@ -1669,34 +1662,6 @@ static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
((u8 *)node_guid)[0] = mac[5];
}
-static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
-{
- u16 vport_num = vport->vport;
- int flags;
-
- if (mlx5_esw_is_manager_vport(esw, vport_num))
- return;
-
- mlx5_modify_vport_admin_state(esw->dev,
- MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
- vport_num, 1,
- vport->info.link_state);
-
- /* Host PF has its own mac/guid. */
- if (vport_num) {
- mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
- vport->info.mac);
- mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
- vport->info.node_guid);
- }
-
- flags = (vport->info.vlan || vport->info.qos) ?
- SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
- modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
- flags);
-}
-
static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
@@ -1706,8 +1671,7 @@ static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
if (mlx5_esw_is_manager_vport(esw, vport->vport))
return 0;
- if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
- MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
+ if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
if (IS_ERR(vport->ingress.legacy.drop_counter)) {
esw_warn(esw->dev,
@@ -1721,8 +1685,7 @@ static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
if (ret)
goto ingress_err;
- if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
- MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
+ if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
vport->egress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
if (IS_ERR(vport->egress.legacy.drop_counter)) {
esw_warn(esw->dev,
@@ -1783,29 +1746,75 @@ static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
-static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- enum mlx5_eswitch_vport_event enabled_events)
+static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ u16 vport_num = vport->vport;
+ int flags;
+ int err;
+
+ err = esw_vport_setup_acl(esw, vport);
+ if (err)
+ return err;
+
+ /* Attach vport to the eswitch rate limiter */
+ esw_vport_enable_qos(esw, vport, vport->info.max_rate, vport->qos.bw_share);
+
+ if (mlx5_esw_is_manager_vport(esw, vport_num))
+ return 0;
+
+ mlx5_modify_vport_admin_state(esw->dev,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
+ vport_num, 1,
+ vport->info.link_state);
+
+ /* Host PF has its own mac/guid. */
+ if (vport_num) {
+ mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
+ vport->info.mac);
+ mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
+ vport->info.node_guid);
+ }
+
+ flags = (vport->info.vlan || vport->info.qos) ?
+ SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
+ modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
+ vport->info.qos, flags);
+
+ return 0;
+}
+
+/* Don't cleanup vport->info, it's needed to restore vport configuration */
+static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
u16 vport_num = vport->vport;
+
+ if (!mlx5_esw_is_manager_vport(esw, vport_num))
+ mlx5_modify_vport_admin_state(esw->dev,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
+ vport_num, 1,
+ MLX5_VPORT_ADMIN_STATE_DOWN);
+
+ esw_vport_disable_qos(esw, vport);
+ esw_vport_cleanup_acl(esw, vport);
+}
+
+static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
+ enum mlx5_eswitch_vport_event enabled_events)
+{
+ struct mlx5_vport *vport;
int ret;
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+
mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
- /* Restore old vport configuration */
- esw_apply_vport_conf(esw, vport);
-
- ret = esw_vport_setup_acl(esw, vport);
+ ret = esw_vport_setup(esw, vport);
if (ret)
goto done;
- /* Attach vport to the eswitch rate limiter */
- if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
- vport->qos.bw_share))
- esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
-
/* Sync with current vport context */
vport->enabled_events = enabled_events;
vport->enabled = true;
@@ -1826,10 +1835,11 @@ done:
return ret;
}
-static void esw_disable_vport(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
- u16 vport_num = vport->vport;
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
mutex_lock(&esw->state_lock);
if (!vport->enabled)
@@ -1847,16 +1857,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
- esw_vport_disable_qos(esw, vport);
-
- if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
- esw->mode == MLX5_ESWITCH_LEGACY)
- mlx5_modify_vport_admin_state(esw->dev,
- MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
- vport_num, 1,
- MLX5_VPORT_ADMIN_STATE_DOWN);
-
- esw_vport_cleanup_acl(esw, vport);
+ esw_vport_cleanup(esw, vport);
esw->enabled_vports--;
done:
@@ -1944,6 +1945,59 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
+int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
+ enum mlx5_eswitch_vport_event enabled_events)
+{
+ int err;
+
+ err = esw_enable_vport(esw, vport_num, enabled_events);
+ if (err)
+ return err;
+
+ err = esw_offloads_load_rep(esw, vport_num);
+ if (err)
+ goto err_rep;
+
+ return err;
+
+err_rep:
+ esw_disable_vport(esw, vport_num);
+ return err;
+}
+
+void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ esw_offloads_unload_rep(esw, vport_num);
+ esw_disable_vport(esw, vport_num);
+}
+
+void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
+{
+ int i;
+
+ mlx5_esw_for_each_vf_vport_num_reverse(esw, i, num_vfs)
+ mlx5_eswitch_unload_vport(esw, i);
+}
+
+int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
+ enum mlx5_eswitch_vport_event enabled_events)
+{
+ int err;
+ int i;
+
+ mlx5_esw_for_each_vf_vport_num(esw, i, num_vfs) {
+ err = mlx5_eswitch_load_vport(esw, i, enabled_events);
+ if (err)
+ goto vf_err;
+ }
+
+ return 0;
+
+vf_err:
+ mlx5_eswitch_unload_vf_vports(esw, i - 1);
+ return err;
+}
+
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
* whichever are present on the eswitch.
*/
@@ -1951,46 +2005,33 @@ int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events)
{
- struct mlx5_vport *vport;
- int num_vfs;
int ret;
- int i;
/* Enable PF vport */
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- ret = esw_enable_vport(esw, vport, enabled_events);
+ ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
if (ret)
return ret;
/* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- ret = esw_enable_vport(esw, vport, enabled_events);
+ ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
if (ret)
goto ecpf_err;
}
/* Enable VF vports */
- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
- ret = esw_enable_vport(esw, vport, enabled_events);
- if (ret)
- goto vf_err;
- }
+ ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs,
+ enabled_events);
+ if (ret)
+ goto vf_err;
return 0;
vf_err:
- num_vfs = i - 1;
- mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, num_vfs)
- esw_disable_vport(esw, vport);
-
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- esw_disable_vport(esw, vport);
- }
+ if (mlx5_ecpf_vport_exists(esw->dev))
+ mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- esw_disable_vport(esw, vport);
+ mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
return ret;
}
@@ -1999,19 +2040,81 @@ ecpf_err:
*/
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{
- struct mlx5_vport *vport;
- int i;
+ mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
- mlx5_esw_for_all_vports_reverse(esw, i, vport)
- esw_disable_vport(esw, vport);
+ if (mlx5_ecpf_vport_exists(esw->dev))
+ mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
+
+ mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
}
-int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
+static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
{
+ struct devlink *devlink = priv_to_devlink(esw->dev);
+ union devlink_param_value val;
int err;
- if (!ESW_ALLOWED(esw) ||
- !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
+ err = devlink_param_driverinit_value_get(devlink,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ &val);
+ if (!err) {
+ esw->params.large_group_num = val.vu32;
+ } else {
+ esw_warn(esw->dev,
+ "Devlink can't get param fdb_large_groups, uses default (%d).\n",
+ ESW_OFFLOADS_DEFAULT_NUM_GROUPS);
+ esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
+ }
+}
+
+static void
+mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
+{
+ const u32 *out;
+
+ WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
+
+ if (num_vfs < 0)
+ return;
+
+ if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
+ esw->esw_funcs.num_vfs = num_vfs;
+ return;
+ }
+
+ out = mlx5_esw_query_functions(esw->dev);
+ if (IS_ERR(out))
+ return;
+
+ esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
+ host_params_context.host_num_of_vfs);
+ kvfree(out);
+}
+
+/**
+ * mlx5_eswitch_enable_locked - Enable eswitch
+ * @esw: Pointer to eswitch
+ * @mode: Eswitch mode to enable
+ * @num_vfs: Enable eswitch for given number of VFs. This is optional.
+ * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
+ * Caller should pass num_vfs > 0 when enabling eswitch for
+ * vf vports. Caller should pass num_vfs = 0, when eswitch
+ * is enabled without sriov VFs or when caller
+ * is unaware of the sriov state of the host PF on ECPF based
+ * eswitch. Caller should pass < 0 when num_vfs should be
+ * completely ignored. This is typically the case when eswitch
+ * is enabled without sriov regardless of PF/ECPF system.
+ * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads
+ * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
+ * It returns 0 on success or error code on failure.
+ */
+int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
+{
+ int err;
+
+ lockdep_assert_held(&esw->mode_lock);
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
return -EOPNOTSUPP;
}
@@ -2022,6 +2125,10 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
+ mlx5_eswitch_get_devlink_param(esw);
+
+ mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
+
esw_create_tsar(esw);
esw->mode = mode;
@@ -2058,11 +2165,34 @@ abort:
return err;
}
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
+/**
+ * mlx5_eswitch_enable - Enable eswitch
+ * @esw: Pointer to eswitch
+ * @num_vfs: Enable eswitch swich for given number of VFs.
+ * Caller must pass num_vfs > 0 when enabling eswitch for
+ * vf vports.
+ * mlx5_eswitch_enable() returns 0 on success or error code on failure.
+ */
+int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
+{
+ int ret;
+
+ if (!ESW_ALLOWED(esw))
+ return 0;
+
+ mutex_lock(&esw->mode_lock);
+ ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
+ mutex_unlock(&esw->mode_lock);
+ return ret;
+}
+
+void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
{
int old_mode;
- if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
+ lockdep_assert_held_write(&esw->mode_lock);
+
+ if (esw->mode == MLX5_ESWITCH_NONE)
return;
esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
@@ -2091,6 +2221,16 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
mlx5_eswitch_clear_vf_vports_info(esw);
}
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
+{
+ if (!ESW_ALLOWED(esw))
+ return;
+
+ mutex_lock(&esw->mode_lock);
+ mlx5_eswitch_disable_locked(esw, clear_vf);
+ mutex_unlock(&esw->mode_lock);
+}
+
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw;
@@ -2142,6 +2282,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
hash_init(esw->offloads.mod_hdr.hlist);
atomic64_set(&esw->offloads.num_flows, 0);
mutex_init(&esw->state_lock);
+ mutex_init(&esw->mode_lock);
mlx5_esw_for_all_vports(esw, i, vport) {
vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
@@ -2176,6 +2317,8 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
esw_offloads_cleanup_reps(esw);
+ mutex_destroy(&esw->mode_lock);
+ mutex_destroy(&esw->state_lock);
mutex_destroy(&esw->offloads.mod_hdr.lock);
mutex_destroy(&esw->offloads.encap_tbl_lock);
kfree(esw->vports);
@@ -2410,12 +2553,11 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
}
/* Star rule to forward all traffic to uplink vport */
- memset(spec, 0, sizeof(*spec));
memset(&dest, 0, sizeof(dest));
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = MLX5_VPORT_UPLINK;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
@@ -2600,9 +2742,13 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
u64 bytes = 0;
int err = 0;
- if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY)
+ if (esw->mode != MLX5_ESWITCH_LEGACY)
return 0;
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled)
+ goto unlock;
+
if (vport->egress.legacy.drop_counter)
mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
&stats->rx_dropped, &bytes);
@@ -2613,20 +2759,22 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
!MLX5_CAP_GEN(dev, transmit_discard_vport_down))
- return 0;
+ goto unlock;
err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
&rx_discard_vport_down,
&tx_discard_vport_down);
if (err)
- return err;
+ goto unlock;
if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
stats->rx_dropped += rx_discard_vport_down;
if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
stats->tx_dropped += tx_discard_vport_down;
- return 0;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
}
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
@@ -2742,22 +2890,4 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
}
-void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs)
-{
- const u32 *out;
-
- WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
-
- if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- esw->esw_funcs.num_vfs = num_vfs;
- return;
- }
-
- out = mlx5_esw_query_functions(esw->dev);
- if (IS_ERR(out))
- return;
- esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
- host_params_context.host_num_of_vfs);
- kvfree(out);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 4472710ccc9c..39f42f985fbd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -42,6 +42,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
+#include "en/tc_ct.h"
#define FDB_TC_MAX_CHAIN 3
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
@@ -55,6 +56,8 @@
#ifdef CONFIG_MLX5_ESWITCH
+#define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
+
#define MLX5_MAX_UC_PER_VPORT(dev) \
(1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
@@ -183,12 +186,22 @@ struct mlx5_eswitch_fdb {
int vlan_push_pop_refcount;
struct mlx5_esw_chains_priv *esw_chains_priv;
+ struct {
+ DECLARE_HASHTABLE(table, 8);
+ /* Protects vports.table */
+ struct mutex lock;
+ } vports;
+
} offloads;
};
u32 flags;
};
struct mlx5_esw_offload {
+ struct mlx5_flow_table *ft_offloads_restore;
+ struct mlx5_flow_group *restore_group;
+ struct mlx5_modify_hdr *restore_copy_hdr_id;
+
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group;
struct mlx5_eswitch_rep *vport_reps;
@@ -224,6 +237,7 @@ struct mlx5_esw_functions {
enum {
MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
+ MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
};
struct mlx5_eswitch {
@@ -244,6 +258,11 @@ struct mlx5_eswitch {
*/
struct mutex state_lock;
+ /* Protects eswitch mode change that occurs via one or more
+ * user commands, i.e. sriov state change, devlink commands.
+ */
+ struct mutex mode_lock;
+
struct {
bool enabled;
u32 root_tsar_id;
@@ -255,6 +274,9 @@ struct mlx5_eswitch {
u16 manager_vport;
u16 first_host_vport;
struct mlx5_esw_functions esw_funcs;
+ struct {
+ u32 large_group_num;
+ } params;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -279,7 +301,11 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
-int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode);
+
+#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
+int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs);
+int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
+void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, u8 mac[ETH_ALEN]);
@@ -315,6 +341,7 @@ struct mlx5_termtbl_handle;
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_spec *spec);
@@ -375,6 +402,8 @@ enum {
enum {
MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
+ MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
+ MLX5_ESW_ATTR_FLAG_HAIRPIN = BIT(3),
};
struct mlx5_esw_flow_attr {
@@ -405,6 +434,9 @@ struct mlx5_esw_flow_attr {
u16 prio;
u32 dest_chain;
u32 flags;
+ struct mlx5_flow_table *fdb;
+ struct mlx5_flow_table *dest_ft;
+ struct mlx5_ct_attr ct_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
};
@@ -414,7 +446,6 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
-int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode);
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
enum devlink_eswitch_encap_mode encap,
struct netlink_ext_ack *extack);
@@ -433,6 +464,11 @@ int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
struct mlx5_vport *vport,
u16 vlan_id, u32 flow_action);
+static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw)
+{
+ return esw->qos.enabled;
+}
+
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
u8 vlan_depth)
{
@@ -608,7 +644,6 @@ mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
-void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
int
@@ -623,11 +658,30 @@ void
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
+int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw);
+void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw);
+
+struct mlx5_flow_handle *
+esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
+u32
+esw_get_max_restore_tag(struct mlx5_eswitch *esw);
+
+int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
+void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
+
+int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
+ enum mlx5_eswitch_vport_event enabled_events);
+void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num);
+
+int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
+ enum mlx5_eswitch_vport_event enabled_events);
+void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
-static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; }
+static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
@@ -636,8 +690,11 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(-EOPNOTSUPP);
}
-static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
-
+static inline struct mlx5_flow_handle *
+esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 1a57b2bd74b8..f171eb2234b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -37,7 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
-#include "eswitch_offloads_chains.h"
+#include "esw/chains.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
@@ -50,6 +50,181 @@
#define MLX5_ESW_MISS_FLOWS (2)
#define UPLINK_REP_INDEX 0
+/* Per vport tables */
+
+#define MLX5_ESW_VPORT_TABLE_SIZE 128
+
+/* This struct is used as a key to the hash table and we need it to be packed
+ * so hash result is consistent
+ */
+struct mlx5_vport_key {
+ u32 chain;
+ u16 prio;
+ u16 vport;
+ u16 vhca_id;
+} __packed;
+
+struct mlx5_vport_table {
+ struct hlist_node hlist;
+ struct mlx5_flow_table *fdb;
+ u32 num_rules;
+ struct mlx5_vport_key key;
+};
+
+#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
+
+static struct mlx5_flow_table *
+esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *fdb;
+
+ ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
+ ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
+ ft_attr.prio = FDB_PER_VPORT;
+ fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(fdb)) {
+ esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
+ PTR_ERR(fdb));
+ }
+
+ return fdb;
+}
+
+static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5_vport_key *key)
+{
+ key->vport = attr->in_rep->vport;
+ key->chain = attr->chain;
+ key->prio = attr->prio;
+ key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+ return jhash(key, sizeof(*key), 0);
+}
+
+/* caller must hold vports.lock */
+static struct mlx5_vport_table *
+esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
+{
+ struct mlx5_vport_table *e;
+
+ hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
+ if (!memcmp(&e->key, skey, sizeof(*skey)))
+ return e;
+
+ return NULL;
+}
+
+static void
+esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
+{
+ struct mlx5_vport_table *e;
+ struct mlx5_vport_key key;
+ u32 hkey;
+
+ mutex_lock(&esw->fdb_table.offloads.vports.lock);
+ hkey = flow_attr_to_vport_key(esw, attr, &key);
+ e = esw_vport_tbl_lookup(esw, &key, hkey);
+ if (!e || --e->num_rules)
+ goto out;
+
+ hash_del(&e->hlist);
+ mlx5_destroy_flow_table(e->fdb);
+ kfree(e);
+out:
+ mutex_unlock(&esw->fdb_table.offloads.vports.lock);
+}
+
+static struct mlx5_flow_table *
+esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *fdb;
+ struct mlx5_vport_table *e;
+ struct mlx5_vport_key skey;
+ u32 hkey;
+
+ mutex_lock(&esw->fdb_table.offloads.vports.lock);
+ hkey = flow_attr_to_vport_key(esw, attr, &skey);
+ e = esw_vport_tbl_lookup(esw, &skey, hkey);
+ if (e) {
+ e->num_rules++;
+ goto out;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e) {
+ fdb = ERR_PTR(-ENOMEM);
+ goto err_alloc;
+ }
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!ns) {
+ esw_warn(dev, "Failed to get FDB namespace\n");
+ fdb = ERR_PTR(-ENOENT);
+ goto err_ns;
+ }
+
+ fdb = esw_vport_tbl_create(esw, ns);
+ if (IS_ERR(fdb))
+ goto err_ns;
+
+ e->fdb = fdb;
+ e->num_rules = 1;
+ e->key = skey;
+ hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
+out:
+ mutex_unlock(&esw->fdb_table.offloads.vports.lock);
+ return e->fdb;
+
+err_ns:
+ kfree(e);
+err_alloc:
+ mutex_unlock(&esw->fdb_table.offloads.vports.lock);
+ return fdb;
+}
+
+int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_flow_attr attr = {};
+ struct mlx5_eswitch_rep rep = {};
+ struct mlx5_flow_table *fdb;
+ struct mlx5_vport *vport;
+ int i;
+
+ attr.prio = 1;
+ attr.in_rep = &rep;
+ mlx5_esw_for_all_vports(esw, i, vport) {
+ attr.in_rep->vport = vport->vport;
+ fdb = esw_vport_tbl_get(esw, &attr);
+ if (IS_ERR(fdb))
+ goto out;
+ }
+ return 0;
+
+out:
+ mlx5_esw_vport_tbl_put(esw);
+ return PTR_ERR(fdb);
+}
+
+void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_flow_attr attr = {};
+ struct mlx5_eswitch_rep rep = {};
+ struct mlx5_vport *vport;
+ int i;
+
+ attr.prio = 1;
+ attr.in_rep = &rep;
+ mlx5_esw_for_all_vports(esw, i, vport) {
+ attr.in_rep->vport = vport->vport;
+ esw_vport_tbl_put(esw, &attr);
+ }
+}
+
+/* End: Per vport tables */
+
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
u16 vport_num)
{
@@ -85,7 +260,8 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
attr->in_rep->vport));
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
- MLX5_SET_TO_ONES(fte_match_set_misc2, misc2, metadata_reg_c_0);
+ MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
@@ -124,6 +300,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
bool split = !!(attr->split_count);
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
+ bool hairpin = false;
int j, i = 0;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
@@ -148,7 +325,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
struct mlx5_flow_table *ft;
- if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
+ if (attr->dest_ft) {
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = attr->dest_ft;
+ i++;
+ } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
@@ -191,8 +373,6 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
i++;
}
- mlx5_eswitch_set_rule_source_port(esw, spec, attr);
-
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
if (attr->inner_match_level != MLX5_MATCH_NONE)
@@ -201,27 +381,45 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr;
- fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio,
- !!split);
+ if (split) {
+ fdb = esw_vport_tbl_get(esw, attr);
+ } else {
+ if (attr->chain || attr->prio)
+ fdb = mlx5_esw_chains_get_table(esw, attr->chain,
+ attr->prio, 0);
+ else
+ fdb = attr->fdb;
+
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
+ mlx5_eswitch_set_rule_source_port(esw, spec, attr);
+ }
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
}
- if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec))
+ if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) {
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
&flow_act, dest, i);
- else
+ hairpin = true;
+ } else {
rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
+ }
if (IS_ERR(rule))
goto err_add_rule;
else
atomic64_inc(&esw->offloads.num_flows);
+ if (hairpin)
+ attr->flags |= MLX5_ESW_ATTR_FLAG_HAIRPIN;
+
return rule;
err_add_rule:
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, !!split);
+ if (split)
+ esw_vport_tbl_put(esw, attr);
+ else if (attr->chain || attr->prio)
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
err_esw_get:
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
@@ -247,7 +445,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
goto err_get_fast;
}
- fwd_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 1);
+ fwd_fdb = esw_vport_tbl_get(esw, attr);
if (IS_ERR(fwd_fdb)) {
rule = ERR_CAST(fwd_fdb);
goto err_get_fwd;
@@ -285,7 +483,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
return rule;
add_err:
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
+ esw_vport_tbl_put(esw, attr);
err_get_fwd:
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
err_get_fast:
@@ -303,20 +501,25 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
mlx5_del_flow_rules(rule);
- /* unref the term table */
- for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
- if (attr->dests[i].termtbl)
- mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
+ if (attr->flags & MLX5_ESW_ATTR_FLAG_HAIRPIN) {
+ /* unref the term table */
+ for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
+ if (attr->dests[i].termtbl)
+ mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
+ }
}
atomic64_dec(&esw->offloads.num_flows);
if (fwd_rule) {
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
+ esw_vport_tbl_put(esw, attr);
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
} else {
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
- !!split);
+ if (split)
+ esw_vport_tbl_put(esw, attr);
+ else if (attr->chain || attr->prio)
+ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
+ 0);
if (attr->dest_chain)
mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
}
@@ -578,14 +781,21 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
+static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
+{
+ return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
+ MLX5_FDB_TO_VPORT_REG_C_1;
+}
+
static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
- u8 fdb_to_vport_reg_c_id;
+ u8 curr, wanted;
int err;
- if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
+ if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
+ !mlx5_eswitch_vport_match_metadata_enabled(esw))
return 0;
err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
@@ -593,22 +803,33 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
if (err)
return err;
- fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.fdb_to_vport_reg_c_id);
+ curr = MLX5_GET(query_esw_vport_context_out, out,
+ esw_vport_context.fdb_to_vport_reg_c_id);
+ wanted = MLX5_FDB_TO_VPORT_REG_C_0;
+ if (mlx5_eswitch_reg_c1_loopback_supported(esw))
+ wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
if (enable)
- fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
+ curr |= wanted;
else
- fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
+ curr &= ~wanted;
MLX5_SET(modify_esw_vport_context_in, in,
- esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
+ esw_vport_context.fdb_to_vport_reg_c_id, curr);
MLX5_SET(modify_esw_vport_context_in, in,
field_select.fdb_to_vport_reg_c_id, 1);
- return mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false,
- in, sizeof(in));
+ err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, in,
+ sizeof(in));
+ if (!err) {
+ if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
+ esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
+ else
+ esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
+ }
+
+ return err;
}
static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
@@ -621,7 +842,8 @@ static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_2);
- MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
} else {
@@ -836,6 +1058,59 @@ out:
return err;
}
+struct mlx5_flow_handle *
+esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
+{
+ struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
+ struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
+ struct mlx5_flow_context *flow_context;
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_spec *spec;
+ void *misc;
+
+ if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ ESW_CHAIN_TAG_METADATA_MASK);
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
+
+ flow_context = &spec->flow_context;
+ flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
+ flow_context->flow_tag = tag;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = esw->offloads.ft_offloads;
+
+ flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ kfree(spec);
+
+ if (IS_ERR(flow_rule))
+ esw_warn(esw->dev,
+ "Failed to create restore rule for tag: %d, err(%d)\n",
+ tag, (int)PTR_ERR(flow_rule));
+
+ return flow_rule;
+}
+
+u32
+esw_get_max_restore_tag(struct mlx5_eswitch *esw)
+{
+ return ESW_CHAIN_TAG_METADATA_MASK;
+}
+
#define MAX_PF_SQ 256
#define MAX_SQ_NVPORTS 32
@@ -851,8 +1126,9 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS_2);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- misc_parameters_2.metadata_reg_c_0);
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
} else {
MLX5_SET(create_flow_group_in, flow_group_in,
match_criteria_enable,
@@ -1057,6 +1333,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
}
ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
+ ft_attr.prio = 1;
ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft_offloads)) {
@@ -1134,7 +1411,8 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
- MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
} else {
@@ -1160,6 +1438,148 @@ out:
return flow_rule;
}
+
+static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
+{
+ u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
+ struct mlx5_core_dev *dev = esw->dev;
+ int vport;
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ return -EOPNOTSUPP;
+
+ if (esw->mode == MLX5_ESWITCH_NONE)
+ return -EOPNOTSUPP;
+
+ switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
+ case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
+ mlx5_mode = MLX5_INLINE_MODE_NONE;
+ goto out;
+ case MLX5_CAP_INLINE_MODE_L2:
+ mlx5_mode = MLX5_INLINE_MODE_L2;
+ goto out;
+ case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+ goto query_vports;
+ }
+
+query_vports:
+ mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
+ mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
+ mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
+ if (prev_mlx5_mode != mlx5_mode)
+ return -EINVAL;
+ prev_mlx5_mode = mlx5_mode;
+ }
+
+out:
+ *mode = mlx5_mode;
+ return 0;
+}
+
+static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_offload *offloads = &esw->offloads;
+
+ if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
+ return;
+
+ mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
+ mlx5_destroy_flow_group(offloads->restore_group);
+ mlx5_destroy_flow_table(offloads->ft_offloads_restore);
+}
+
+static int esw_create_restore_table(struct mlx5_eswitch *esw)
+{
+ u8 modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_modify_hdr *mod_hdr;
+ void *match_criteria, *misc;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ int err = 0;
+
+ if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
+ return 0;
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
+ if (!ns) {
+ esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ esw_warn(esw->dev, "Failed to create restore table, err %d\n",
+ err);
+ goto out_free;
+ }
+
+ memset(flow_group_in, 0, inlen);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ misc_parameters_2);
+
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ ESW_CHAIN_TAG_METADATA_MASK);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ft_attr.max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+ g = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create restore flow group, err: %d\n",
+ err);
+ goto err_group;
+ }
+
+ MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
+ MLX5_SET(copy_action_in, modact, src_field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
+ MLX5_SET(copy_action_in, modact, dst_field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ mod_hdr = mlx5_modify_header_alloc(esw->dev,
+ MLX5_FLOW_NAMESPACE_KERNEL, 1,
+ modact);
+ if (IS_ERR(mod_hdr)) {
+ esw_warn(dev, "Failed to create restore mod header, err: %d\n",
+ err);
+ err = PTR_ERR(mod_hdr);
+ goto err_mod_hdr;
+ }
+
+ esw->offloads.ft_offloads_restore = ft;
+ esw->offloads.restore_group = g;
+ esw->offloads.restore_copy_hdr_id = mod_hdr;
+
+ kvfree(flow_group_in);
+
+ return 0;
+
+err_mod_hdr:
+ mlx5_destroy_flow_group(g);
+err_group:
+ mlx5_destroy_flow_table(ft);
+out_free:
+ kvfree(flow_group_in);
+
+ return err;
+}
+
static int esw_offloads_start(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
@@ -1172,13 +1592,14 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return -EINVAL;
}
- mlx5_eswitch_disable(esw, false);
- mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
- err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
+ mlx5_eswitch_disable_locked(esw, false);
+ err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
+ esw->dev->priv.sriov.num_vfs);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
- err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
+ err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
+ MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to legacy");
@@ -1233,187 +1654,66 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
esw->offloads.rep_ops[rep_type]->unload(rep);
}
-static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
-{
- struct mlx5_eswitch_rep *rep;
-
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
- __esw_offloads_unload_rep(esw, rep, rep_type);
- }
-
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
- __esw_offloads_unload_rep(esw, rep, rep_type);
- }
-
- rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
- __esw_offloads_unload_rep(esw, rep, rep_type);
-}
-
-static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
- u8 rep_type)
+static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int i;
- mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
+ mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
__esw_offloads_unload_rep(esw, rep, rep_type);
-}
-
-static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
-{
- u8 rep_type = NUM_REP_TYPES;
-
- while (rep_type-- > 0)
- __unload_reps_vf_vport(esw, nvports, rep_type);
-}
-
-static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
-{
- __unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
-
- /* Special vports must be the last to unload. */
- __unload_reps_special_vport(esw, rep_type);
-}
-
-static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
-{
- u8 rep_type = NUM_REP_TYPES;
-
- while (rep_type-- > 0)
- __unload_reps_all_vport(esw, rep_type);
-}
-
-static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep, u8 rep_type)
-{
- int err = 0;
-
- if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
- REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
- err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
- if (err)
- atomic_set(&rep->rep_data[rep_type].state,
- REP_REGISTERED);
- }
-
- return err;
-}
-
-static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
-{
- struct mlx5_eswitch_rep *rep;
- int err;
-
- rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
- err = __esw_offloads_load_rep(esw, rep, rep_type);
- if (err)
- return err;
-
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
- err = __esw_offloads_load_rep(esw, rep, rep_type);
- if (err)
- goto err_pf;
- }
if (mlx5_ecpf_vport_exists(esw->dev)) {
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
- err = __esw_offloads_load_rep(esw, rep, rep_type);
- if (err)
- goto err_ecpf;
+ __esw_offloads_unload_rep(esw, rep, rep_type);
}
- return 0;
-
-err_ecpf:
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
__esw_offloads_unload_rep(esw, rep, rep_type);
}
-err_pf:
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
__esw_offloads_unload_rep(esw, rep, rep_type);
- return err;
}
-static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
- u8 rep_type)
+int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
- int err, i;
-
- mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
- err = __esw_offloads_load_rep(esw, rep, rep_type);
- if (err)
- goto err_vf;
- }
-
- return 0;
-
-err_vf:
- __unload_reps_vf_vport(esw, --i, rep_type);
- return err;
-}
-
-static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
-{
+ int rep_type;
int err;
- /* Special vports must be loaded first, uplink rep creates mdev resource. */
- err = __load_reps_special_vport(esw, rep_type);
- if (err)
- return err;
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
+ return 0;
- err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
- if (err)
- goto err_vfs;
+ rep = mlx5_eswitch_get_rep(esw, vport_num);
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
+ if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
+ REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
+ err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
+ if (err)
+ goto err_reps;
+ }
return 0;
-err_vfs:
- __unload_reps_special_vport(esw, rep_type);
- return err;
-}
-
-static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
-{
- u8 rep_type = 0;
- int err;
-
- for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
- err = __load_reps_vf_vport(esw, nvports, rep_type);
- if (err)
- goto err_reps;
- }
-
- return err;
-
err_reps:
- while (rep_type-- > 0)
- __unload_reps_vf_vport(esw, nvports, rep_type);
+ atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
+ for (--rep_type; rep_type >= 0; rep_type--)
+ __esw_offloads_unload_rep(esw, rep, rep_type);
return err;
}
-static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
+void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
{
- u8 rep_type = 0;
- int err;
-
- for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
- err = __load_reps_all_vport(esw, rep_type);
- if (err)
- goto err_reps;
- }
+ struct mlx5_eswitch_rep *rep;
+ int rep_type;
- return err;
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
+ return;
-err_reps:
- while (rep_type-- > 0)
- __unload_reps_all_vport(esw, rep_type);
- return err;
+ rep = mlx5_eswitch_get_rep(esw, vport_num);
+ for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
+ __esw_offloads_unload_rep(esw, rep, rep_type);
}
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
@@ -1601,14 +1901,21 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
- static const struct mlx5_flow_spec spec = {};
struct mlx5_flow_act flow_act = {};
int err = 0;
+ u32 key;
+
+ key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
+ key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
- MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
- MLX5_SET(set_action_in, action, data,
- mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
+ MLX5_SET(set_action_in, action, field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
+ MLX5_SET(set_action_in, action, data, key);
+ MLX5_SET(set_action_in, action, offset,
+ ESW_SOURCE_PORT_METADATA_OFFSET);
+ MLX5_SET(set_action_in, action, length,
+ ESW_SOURCE_PORT_METADATA_BITS);
vport->ingress.offloads.modify_metadata =
mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
@@ -1625,7 +1932,7 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
vport->ingress.offloads.modify_metadata_rule =
mlx5_add_flow_rules(vport->ingress.acl,
- &spec, &flow_act, NULL, 0);
+ NULL, &flow_act, NULL, 0);
if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
esw_warn(esw->dev,
@@ -1837,6 +2144,18 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
return true;
}
+static bool
+esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
+{
+ return mlx5_core_mp_enabled(esw->dev);
+}
+
+static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
+{
+ return esw_check_vport_match_metadata_mandatory(esw) &&
+ esw_check_vport_match_metadata_supported(esw);
+}
+
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
@@ -1875,7 +2194,7 @@ static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
struct mlx5_vport *vport;
int err;
- if (esw_check_vport_match_metadata_supported(esw))
+ if (esw_use_vport_metadata(esw))
esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
@@ -1911,27 +2230,34 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err)
return err;
- err = esw_create_offloads_fdb_tables(esw, total_vports);
+ err = esw_create_offloads_table(esw, total_vports);
if (err)
- goto create_fdb_err;
+ goto create_offloads_err;
- err = esw_create_offloads_table(esw, total_vports);
+ err = esw_create_restore_table(esw);
if (err)
- goto create_ft_err;
+ goto create_restore_err;
+
+ err = esw_create_offloads_fdb_tables(esw, total_vports);
+ if (err)
+ goto create_fdb_err;
err = esw_create_vport_rx_group(esw, total_vports);
if (err)
goto create_fg_err;
+ mutex_init(&esw->fdb_table.offloads.vports.lock);
+ hash_init(esw->fdb_table.offloads.vports.table);
+
return 0;
create_fg_err:
- esw_destroy_offloads_table(esw);
-
-create_ft_err:
esw_destroy_offloads_fdb_tables(esw);
-
create_fdb_err:
+ esw_destroy_restore_table(esw);
+create_restore_err:
+ esw_destroy_offloads_table(esw);
+create_offloads_err:
esw_destroy_uplink_offloads_acl_tables(esw);
return err;
@@ -1939,9 +2265,11 @@ create_fdb_err:
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{
+ mutex_destroy(&esw->fdb_table.offloads.vports.lock);
esw_destroy_vport_rx_group(esw);
- esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw);
+ esw_destroy_restore_table(esw);
+ esw_destroy_offloads_table(esw);
esw_destroy_uplink_offloads_acl_tables(esw);
}
@@ -1961,11 +2289,12 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
/* Number of VFs can only change from "0 to x" or "x to 0". */
if (esw->esw_funcs.num_vfs > 0) {
- esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
+ mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
} else {
int err;
- err = esw_offloads_load_vf_reps(esw, new_num_vfs);
+ err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
+ MLX5_VPORT_UC_ADDR_CHANGE);
if (err)
return;
}
@@ -2023,40 +2352,43 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
+ mutex_init(&esw->offloads.termtbl_mutex);
mlx5_rdma_enable_roce(esw->dev);
- err = esw_offloads_steering_init(esw);
- if (err)
- goto err_steering_init;
err = esw_set_passing_vport_metadata(esw, true);
if (err)
goto err_vport_metadata;
+ err = esw_offloads_steering_init(esw);
+ if (err)
+ goto err_steering_init;
+
/* Representor will control the vport link state */
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
- err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
+ /* Uplink vport rep must load first. */
+ err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
if (err)
- goto err_vports;
+ goto err_uplink;
- err = esw_offloads_load_all_reps(esw);
+ err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
if (err)
- goto err_reps;
+ goto err_vports;
esw_offloads_devcom_init(esw);
- mutex_init(&esw->offloads.termtbl_mutex);
return 0;
-err_reps:
- mlx5_eswitch_disable_pf_vf_vports(esw);
err_vports:
+ esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
+err_uplink:
esw_set_passing_vport_metadata(esw, false);
-err_vport_metadata:
- esw_offloads_steering_cleanup(esw);
err_steering_init:
+ esw_offloads_steering_cleanup(esw);
+err_vport_metadata:
mlx5_rdma_disable_roce(esw->dev);
+ mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
}
@@ -2065,11 +2397,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable(esw, false);
- err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
+ mlx5_eswitch_disable_locked(esw, false);
+ err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
+ MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
- err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
+ err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
+ MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to offloads");
@@ -2082,11 +2416,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
void esw_offloads_disable(struct mlx5_eswitch *esw)
{
esw_offloads_devcom_cleanup(esw);
- esw_offloads_unload_all_reps(esw);
mlx5_eswitch_disable_pf_vf_vports(esw);
+ esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
mlx5_rdma_disable_roce(esw->dev);
+ mutex_destroy(&esw->offloads.termtbl_mutex);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
@@ -2166,60 +2501,82 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
-static int mlx5_devlink_eswitch_check(struct devlink *devlink)
+static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
-
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return -EOPNOTSUPP;
if(!MLX5_ESWITCH_MANAGER(dev))
return -EPERM;
- if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
- !mlx5_core_is_ecpf_esw_manager(dev))
- return -EOPNOTSUPP;
-
return 0;
}
+static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
+{
+ /* devlink commands in NONE eswitch mode are currently supported only
+ * on ECPF.
+ */
+ return (esw->mode == MLX5_ESWITCH_NONE &&
+ !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
u16 cur_mlx5_mode, mlx5_mode = 0;
int err;
- err = mlx5_devlink_eswitch_check(devlink);
+ err = mlx5_eswitch_check(dev);
if (err)
return err;
- cur_mlx5_mode = dev->priv.eswitch->mode;
-
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
+ mutex_lock(&esw->mode_lock);
+ err = eswitch_devlink_esw_mode_check(esw);
+ if (err)
+ goto unlock;
+
+ cur_mlx5_mode = esw->mode;
+
if (cur_mlx5_mode == mlx5_mode)
- return 0;
+ goto unlock;
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
- return esw_offloads_start(dev->priv.eswitch, extack);
+ err = esw_offloads_start(esw, extack);
else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
- return esw_offloads_stop(dev->priv.eswitch, extack);
+ err = esw_offloads_stop(esw, extack);
else
- return -EINVAL;
+ err = -EINVAL;
+
+unlock:
+ mutex_unlock(&esw->mode_lock);
+ return err;
}
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
- err = mlx5_devlink_eswitch_check(devlink);
+ err = mlx5_eswitch_check(dev);
if (err)
return err;
- return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
+ mutex_lock(&esw->mode_lock);
+ err = eswitch_devlink_esw_mode_check(dev->priv.eswitch);
+ if (err)
+ goto unlock;
+
+ err = esw_mode_to_devlink(esw->mode, mode);
+unlock:
+ mutex_unlock(&esw->mode_lock);
+ return err;
}
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
@@ -2230,18 +2587,24 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
int err, vport, num_vport;
u8 mlx5_mode;
- err = mlx5_devlink_eswitch_check(devlink);
+ err = mlx5_eswitch_check(dev);
if (err)
return err;
+ mutex_lock(&esw->mode_lock);
+ err = eswitch_devlink_esw_mode_check(esw);
+ if (err)
+ goto out;
+
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
- return 0;
+ goto out;
/* fall through */
case MLX5_CAP_INLINE_MODE_L2:
NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto out;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
break;
}
@@ -2249,7 +2612,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (atomic64_read(&esw->offloads.num_flows) > 0) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set inline mode when flows are configured");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto out;
}
err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
@@ -2266,6 +2630,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
}
esw->offloads.inline_mode = mlx5_mode;
+ mutex_unlock(&esw->mode_lock);
return 0;
revert_inline_mode:
@@ -2275,6 +2640,7 @@ revert_inline_mode:
vport,
esw->offloads.inline_mode);
out:
+ mutex_unlock(&esw->mode_lock);
return err;
}
@@ -2284,48 +2650,19 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
- err = mlx5_devlink_eswitch_check(devlink);
+ err = mlx5_eswitch_check(dev);
if (err)
return err;
- return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
-}
-
-int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
-{
- u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
- struct mlx5_core_dev *dev = esw->dev;
- int vport;
-
- if (!MLX5_CAP_GEN(dev, vport_group_manager))
- return -EOPNOTSUPP;
-
- if (esw->mode == MLX5_ESWITCH_NONE)
- return -EOPNOTSUPP;
-
- switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
- case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
- mlx5_mode = MLX5_INLINE_MODE_NONE;
- goto out;
- case MLX5_CAP_INLINE_MODE_L2:
- mlx5_mode = MLX5_INLINE_MODE_L2;
- goto out;
- case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
- goto query_vports;
- }
-
-query_vports:
- mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
- mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
- mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
- if (prev_mlx5_mode != mlx5_mode)
- return -EINVAL;
- prev_mlx5_mode = mlx5_mode;
- }
+ mutex_lock(&esw->mode_lock);
+ err = eswitch_devlink_esw_mode_check(esw);
+ if (err)
+ goto unlock;
-out:
- *mode = mlx5_mode;
- return 0;
+ err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
+unlock:
+ mutex_unlock(&esw->mode_lock);
+ return err;
}
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
@@ -2336,30 +2673,40 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
- err = mlx5_devlink_eswitch_check(devlink);
+ err = mlx5_eswitch_check(dev);
if (err)
return err;
+ mutex_lock(&esw->mode_lock);
+ err = eswitch_devlink_esw_mode_check(esw);
+ if (err)
+ goto unlock;
+
if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
(!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
- !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
- return -EOPNOTSUPP;
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
- if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
- return -EOPNOTSUPP;
+ if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
if (esw->mode == MLX5_ESWITCH_LEGACY) {
esw->offloads.encap = encap;
- return 0;
+ goto unlock;
}
if (esw->offloads.encap == encap)
- return 0;
+ goto unlock;
if (atomic64_read(&esw->offloads.num_flows) > 0) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set encapsulation when flows are configured");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto unlock;
}
esw_destroy_offloads_fdb_tables(esw);
@@ -2375,6 +2722,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
(void)esw_create_offloads_fdb_tables(esw, esw->nvports);
}
+unlock:
+ mutex_unlock(&esw->mode_lock);
return err;
}
@@ -2385,14 +2734,36 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
- err = mlx5_devlink_eswitch_check(devlink);
+ err = mlx5_eswitch_check(dev);
if (err)
return err;
+ mutex_lock(&esw->mode_lock);
+ err = eswitch_devlink_esw_mode_check(esw);
+ if (err)
+ goto unlock;
+
*encap = esw->offloads.encap;
+unlock:
+ mutex_unlock(&esw->mode_lock);
return 0;
}
+static bool
+mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+ /* Currently, only ECPF based device has representor for host PF. */
+ if (vport_num == MLX5_VPORT_PF &&
+ !mlx5_core_is_ecpf_esw_manager(esw->dev))
+ return false;
+
+ if (vport_num == MLX5_VPORT_ECPF &&
+ !mlx5_ecpf_vport_exists(esw->dev))
+ return false;
+
+ return true;
+}
+
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
const struct mlx5_eswitch_rep_ops *ops,
u8 rep_type)
@@ -2403,8 +2774,10 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
esw->offloads.rep_ops[rep_type] = ops;
mlx5_esw_for_all_reps(esw, i, rep) {
- rep_data = &rep->rep_data[rep_type];
- atomic_set(&rep_data->state, REP_REGISTERED);
+ if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
+ rep_data = &rep->rep_data[rep_type];
+ atomic_set(&rep_data->state, REP_REGISTERED);
+ }
}
}
EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
@@ -2464,15 +2837,53 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
vport_num <= esw->dev->priv.sriov.max_vfs;
}
+bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
+{
+ return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
+}
+EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
+
bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
{
return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
}
EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
-u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
+u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
u16 vport_num)
{
- return ((MLX5_CAP_GEN(esw->dev, vhca_id) & 0xffff) << 16) | vport_num;
+ u32 vport_num_mask = GENMASK(ESW_VPORT_BITS - 1, 0);
+ u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
+ u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+ u32 val;
+
+ /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
+ WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
+
+ /* Trim vhca_id to ESW_VHCA_ID_BITS */
+ vhca_id &= vhca_id_mask;
+
+ /* Make sure pf and ecpf map to end of ESW_VPORT_BITS range so they
+ * don't overlap with VF numbers, and themselves, after trimming.
+ */
+ WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) <
+ vport_num_mask - 1);
+ WARN_ON_ONCE((MLX5_VPORT_ECPF & vport_num_mask) <
+ vport_num_mask - 1);
+ WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) ==
+ (MLX5_VPORT_ECPF & vport_num_mask));
+
+ /* Make sure that the VF vport_num fits ESW_VPORT_BITS and don't
+ * overlap with pf and ecpf.
+ */
+ if (vport_num != MLX5_VPORT_UPLINK &&
+ vport_num != MLX5_VPORT_ECPF)
+ WARN_ON_ONCE(vport_num >= vport_num_mask - 1);
+
+ /* We can now trim vport_num to ESW_VPORT_BITS */
+ vport_num &= vport_num_mask;
+
+ val = (vhca_id << ESW_VPORT_BITS) | vport_num;
+ return val << (32 - ESW_SOURCE_PORT_METADATA_BITS);
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index dc08ed9339ab..17a0d2bc102b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -3,6 +3,7 @@
#include <linux/mlx5/fs.h>
#include "eswitch.h"
+#include "fs_core.h"
struct mlx5_termtbl_handle {
struct hlist_node termtbl_hlist;
@@ -28,6 +29,10 @@ mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act,
sizeof(dest->vport.num), hash);
hash = jhash((const void *)&dest->vport.vhca_id,
sizeof(dest->vport.num), hash);
+ if (dest->vport.pkt_reformat)
+ hash = jhash(dest->vport.pkt_reformat,
+ sizeof(*dest->vport.pkt_reformat),
+ hash);
return hash;
}
@@ -37,11 +42,19 @@ mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1,
struct mlx5_flow_act *flow_act2,
struct mlx5_flow_destination *dest2)
{
- return flow_act1->action != flow_act2->action ||
- dest1->vport.num != dest2->vport.num ||
- dest1->vport.vhca_id != dest2->vport.vhca_id ||
- memcmp(&flow_act1->vlan, &flow_act2->vlan,
- sizeof(flow_act1->vlan));
+ int ret;
+
+ ret = flow_act1->action != flow_act2->action ||
+ dest1->vport.num != dest2->vport.num ||
+ dest1->vport.vhca_id != dest2->vport.vhca_id ||
+ memcmp(&flow_act1->vlan, &flow_act2->vlan,
+ sizeof(flow_act1->vlan));
+ if (ret)
+ return ret;
+
+ return dest1->vport.pkt_reformat && dest2->vport.pkt_reformat ?
+ memcmp(dest1->vport.pkt_reformat, dest2->vport.pkt_reformat,
+ sizeof(*dest1->vport.pkt_reformat)) : 0;
}
static int
@@ -49,7 +62,6 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
struct mlx5_termtbl_handle *tt,
struct mlx5_flow_act *flow_act)
{
- static const struct mlx5_flow_spec spec = {};
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
int err;
@@ -63,7 +75,8 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
/* As this is the terminating action then the termination table is the
* same prio as the slow path
*/
- ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION;
+ ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION |
+ MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
ft_attr.prio = FDB_SLOW_PATH;
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
@@ -73,9 +86,8 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
return -EOPNOTSUPP;
}
- tt->rule = mlx5_add_flow_rules(tt->termtbl, &spec, flow_act,
+ tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act,
&tt->dest, 1);
-
if (IS_ERR(tt->rule)) {
esw_warn(dev, "Failed to create termination table rule\n");
goto add_flow_err;
@@ -93,7 +105,8 @@ add_flow_err:
static struct mlx5_termtbl_handle *
mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
struct mlx5_flow_act *flow_act,
- struct mlx5_flow_destination *dest)
+ struct mlx5_flow_destination *dest,
+ struct mlx5_esw_flow_attr *attr)
{
struct mlx5_termtbl_handle *tt;
bool found = false;
@@ -101,7 +114,6 @@ mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
int err;
mutex_lock(&esw->offloads.termtbl_mutex);
-
hash_key = mlx5_eswitch_termtbl_hash(flow_act, dest);
hash_for_each_possible(esw->offloads.termtbl_tbl, tt,
termtbl_hlist, hash_key) {
@@ -123,6 +135,7 @@ mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
tt->dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
tt->dest.vport.num = dest->vport.num;
tt->dest.vport.vhca_id = dest->vport.vhca_id;
+ tt->dest.vport.flags = dest->vport.flags;
memcpy(&tt->flow_act, flow_act, sizeof(*flow_act));
err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act);
@@ -157,31 +170,50 @@ mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
}
}
+static bool mlx5_eswitch_termtbl_is_encap_reformat(struct mlx5_pkt_reformat *rt)
+{
+ switch (rt->reformat_type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ return true;
+ default:
+ return false;
+ }
+}
+
static void
mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
struct mlx5_flow_act *dst)
{
- if (!(src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))
- return;
-
- src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
- dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
- memcpy(&dst->vlan[0], &src->vlan[0], sizeof(src->vlan[0]));
- memset(&src->vlan[0], 0, sizeof(src->vlan[0]));
-
- if (!(src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
- return;
+ if (src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ memcpy(&dst->vlan[0], &src->vlan[0], sizeof(src->vlan[0]));
+ memset(&src->vlan[0], 0, sizeof(src->vlan[0]));
+
+ if (src->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
+ dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
+ memcpy(&dst->vlan[1], &src->vlan[1], sizeof(src->vlan[1]));
+ memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
+ }
+ }
- src->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
- dst->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
- memcpy(&dst->vlan[1], &src->vlan[1], sizeof(src->vlan[1]));
- memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
+ if (src->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
+ mlx5_eswitch_termtbl_is_encap_reformat(src->pkt_reformat)) {
+ src->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ dst->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ dst->pkt_reformat = src->pkt_reformat;
+ src->pkt_reformat = NULL;
+ }
}
static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
const struct mlx5_flow_spec *spec)
{
- u32 port_mask, port_value;
+ u16 port_mask, port_value;
if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
return spec->flow_context.flow_source ==
@@ -191,20 +223,32 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
misc_parameters.source_port);
port_value = MLX5_GET(fte_match_param, spec->match_value,
misc_parameters.source_port);
- return (port_mask & port_value & 0xffff) == MLX5_VPORT_UPLINK;
+ return (port_mask & port_value) == MLX5_VPORT_UPLINK;
}
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_spec *spec)
{
- if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table))
+ int i;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
+ attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ||
+ !mlx5_eswitch_offload_is_uplink_port(esw, spec))
return false;
/* push vlan on RX */
- return (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) &&
- mlx5_eswitch_offload_is_uplink_port(esw, spec);
+ if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)
+ return true;
+
+ /* hairpin */
+ for (i = attr->split_count; i < attr->out_count; i++)
+ if (attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
+ return true;
+
+ return false;
}
struct mlx5_flow_handle *
@@ -234,7 +278,7 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
/* get the terminating table for the action list */
tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
- &dest[i]);
+ &dest[i], attr);
if (IS_ERR(tt)) {
esw_warn(esw->dev, "Failed to create termination table\n");
goto revert_changes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 4c61d25d2e88..b794888fa3ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -57,7 +57,7 @@ struct mlx5_fpga_ipsec_cmd_context {
struct completion complete;
struct mlx5_fpga_device *dev;
struct list_head list; /* Item in pending_cmds */
- u8 command[0];
+ u8 command[];
};
struct mlx5_fpga_esp_xfrm;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 9dc24241dc91..62ce2b9417ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -110,9 +110,9 @@
#define ANCHOR_NUM_PRIOS 1
#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
-#define OFFLOADS_MAX_FT 1
-#define OFFLOADS_NUM_PRIOS 1
-#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
+#define OFFLOADS_MAX_FT 2
+#define OFFLOADS_NUM_PRIOS 2
+#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
#define LAG_PRIO_NUM_LEVELS 1
#define LAG_NUM_PRIOS 1
@@ -145,7 +145,7 @@ static struct init_tree_node {
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
LAG_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
+ ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
OFFLOADS_MAX_FT))),
@@ -1322,7 +1322,7 @@ add_rule_fte(struct fs_fte *fte,
fte->node.active = true;
fte->status |= FS_FTE_STATUS_EXISTING;
- atomic_inc(&fte->node.version);
+ atomic_inc(&fg->node.version);
out:
return handle;
@@ -1577,28 +1577,19 @@ struct match_list {
struct mlx5_flow_group *g;
};
-struct match_list_head {
- struct list_head list;
- struct match_list first;
-};
-
-static void free_match_list(struct match_list_head *head, bool ft_locked)
+static void free_match_list(struct match_list *head, bool ft_locked)
{
- if (!list_empty(&head->list)) {
- struct match_list *iter, *match_tmp;
+ struct match_list *iter, *match_tmp;
- list_del(&head->first.list);
- tree_put_node(&head->first.g->node, ft_locked);
- list_for_each_entry_safe(iter, match_tmp, &head->list,
- list) {
- tree_put_node(&iter->g->node, ft_locked);
- list_del(&iter->list);
- kfree(iter);
- }
+ list_for_each_entry_safe(iter, match_tmp, &head->list,
+ list) {
+ tree_put_node(&iter->g->node, ft_locked);
+ list_del(&iter->list);
+ kfree(iter);
}
}
-static int build_match_list(struct match_list_head *match_head,
+static int build_match_list(struct match_list *match_head,
struct mlx5_flow_table *ft,
const struct mlx5_flow_spec *spec,
bool ft_locked)
@@ -1615,14 +1606,8 @@ static int build_match_list(struct match_list_head *match_head,
rhl_for_each_entry_rcu(g, tmp, list, hash) {
struct match_list *curr_match;
- if (likely(list_empty(&match_head->list))) {
- if (!tree_get_node(&g->node))
- continue;
- match_head->first.g = g;
- list_add_tail(&match_head->first.list,
- &match_head->list);
+ if (unlikely(!tree_get_node(&g->node)))
continue;
- }
curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
if (!curr_match) {
@@ -1630,10 +1615,6 @@ static int build_match_list(struct match_list_head *match_head,
err = -ENOMEM;
goto out;
}
- if (!tree_get_node(&g->node)) {
- kfree(curr_match);
- continue;
- }
curr_match->g = g;
list_add_tail(&curr_match->list, &match_head->list);
}
@@ -1699,7 +1680,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct match_list *iter;
bool take_write = false;
struct fs_fte *fte;
- u64 version;
+ u64 version = 0;
int err;
fte = alloc_fte(ft, spec, flow_act);
@@ -1707,10 +1688,12 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
return ERR_PTR(-ENOMEM);
search_again_locked:
- version = matched_fgs_get_version(match_head);
if (flow_act->flags & FLOW_ACT_NO_APPEND)
goto skip_search;
- /* Try to find a fg that already contains a matching fte */
+ version = matched_fgs_get_version(match_head);
+ /* Try to find an fte with identical match value and attempt update its
+ * action.
+ */
list_for_each_entry(iter, match_head, list) {
struct fs_fte *fte_tmp;
@@ -1738,10 +1721,12 @@ skip_search:
goto out;
}
- /* Check the fgs version, for case the new FTE with the
- * same values was added while the fgs weren't locked
+ /* Check the fgs version. If version have changed it could be that an
+ * FTE with the same match value was added while the fgs weren't
+ * locked.
*/
- if (version != matched_fgs_get_version(match_head)) {
+ if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
+ version != matched_fgs_get_version(match_head)) {
take_write = true;
goto search_again_locked;
}
@@ -1785,9 +1770,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
{
struct mlx5_flow_steering *steering = get_steering(&ft->node);
- struct mlx5_flow_group *g;
struct mlx5_flow_handle *rule;
- struct match_list_head match_head;
+ struct match_list match_head;
+ struct mlx5_flow_group *g;
bool take_write = false;
struct fs_fte *fte;
int version;
@@ -1892,12 +1877,16 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
int num_dest)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+ static const struct mlx5_flow_spec zero_spec = {};
struct mlx5_flow_destination gen_dest = {};
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_handle *handle = NULL;
u32 sw_action = flow_act->action;
struct fs_prio *prio;
+ if (!spec)
+ spec = &zero_spec;
+
fs_get_obj(prio, ft->node.parent);
if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!fwd_next_prio_supported(ft))
@@ -2700,6 +2689,17 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
goto out_err;
}
+ /* We put this priority last, knowing that nothing will get here
+ * unless explicitly forwarded to. This is possible because the
+ * slow path tables have catch all rules and nothing gets passed
+ * those tables.
+ */
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
+ goto out_err;
+ }
+
set_prio_attrs(steering->fdb_root_ns);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index ab69effb056d..f43caefd07a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -470,7 +470,7 @@ struct mlx5_fc_bulk {
u32 base_id;
int bulk_len;
unsigned long *bitmask;
- struct mlx5_fc fcs[0];
+ struct mlx5_fc fcs[];
};
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 909a7f284614..90e3d0233101 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -613,6 +613,44 @@ static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
fwhandle, 0);
}
+#define MLX5_FSM_REACTIVATE_TOUT 5000 /* msecs */
+static int mlx5_fsm_reactivate(struct mlxfw_dev *mlxfw_dev, u8 *status)
+{
+ unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5_FSM_REACTIVATE_TOUT);
+ struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
+ container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
+ struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
+ u32 out[MLX5_ST_SZ_DW(mirc_reg)];
+ u32 in[MLX5_ST_SZ_DW(mirc_reg)];
+ int err;
+
+ if (!MLX5_CAP_MCAM_REG2(dev, mirc))
+ return -EOPNOTSUPP;
+
+ memset(in, 0, sizeof(in));
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MIRC, 0, 1);
+ if (err)
+ return err;
+
+ do {
+ memset(out, 0, sizeof(out));
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MIRC, 0, 0);
+ if (err)
+ return err;
+
+ *status = MLX5_GET(mirc_reg, out, status_code);
+ if (*status != MLXFW_FSM_REACTIVATE_STATUS_BUSY)
+ return 0;
+
+ msleep(20);
+ } while (time_before(jiffies, exp_time));
+
+ return 0;
+}
+
static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops = {
.component_query = mlx5_component_query,
.fsm_lock = mlx5_fsm_lock,
@@ -620,6 +658,7 @@ static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops = {
.fsm_block_download = mlx5_fsm_block_download,
.fsm_component_verify = mlx5_fsm_component_verify,
.fsm_activate = mlx5_fsm_activate,
+ .fsm_reactivate = mlx5_fsm_reactivate,
.fsm_query_state = mlx5_fsm_query_state,
.fsm_cancel = mlx5_fsm_cancel,
.fsm_release = mlx5_fsm_release
@@ -634,6 +673,7 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev,
.ops = &mlx5_mlxfw_dev_ops,
.psid = dev->board_id,
.psid_size = strlen(dev->board_id),
+ .devlink = priv_to_devlink(dev),
},
.mlx5_core_dev = dev
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index d9f4e8c59c1f..fa1665caac46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -627,7 +627,7 @@ static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
health->fw_reporter =
devlink_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
- 0, false, dev);
+ 0, dev);
if (IS_ERR(health->fw_reporter))
mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
PTR_ERR(health->fw_reporter));
@@ -636,7 +636,7 @@ static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
devlink_health_reporter_create(devlink,
&mlx5_fw_fatal_reporter_ops,
MLX5_REPORTER_FW_GRACEFUL_PERIOD,
- true, dev);
+ dev);
if (IS_ERR(health->fw_fatal_reporter))
mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n",
PTR_ERR(health->fw_fatal_reporter));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 90cb50fe17fd..1eef66ee849e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -235,6 +235,9 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
}
const struct ethtool_ops mlx5i_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = mlx5i_get_drvinfo,
.get_strings = mlx5i_get_strings,
.get_sset_count = mlx5i_get_sset_count,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 56078b23f1a0..673aaa815f57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -483,7 +483,7 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
new_channels.params = *params;
new_channels.params.sw_mtu = new_mtu;
- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index c87962cab921..de7e01a027bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -56,7 +56,7 @@ struct mlx5i_priv {
u32 qkey;
u16 pkey_index;
struct mlx5i_pkey_qpn_ht *qpn_htbl;
- char *mlx5e_priv[0];
+ char *mlx5e_priv[];
};
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn);
@@ -107,7 +107,7 @@ struct mlx5i_tx_wqe {
struct mlx5_wqe_datagram_seg datagram;
struct mlx5_wqe_eth_pad pad;
struct mlx5_wqe_eth_seg eth;
- struct mlx5_wqe_data_seg data[0];
+ struct mlx5_wqe_data_seg data[];
};
static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 416676c35b1f..e9089a793632 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -93,9 +93,8 @@ static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
static void mlx5_lag_fib_event_flush(struct notifier_block *nb)
{
struct lag_mp *mp = container_of(nb, struct lag_mp, fib_nb);
- struct mlx5_lag *ldev = container_of(mp, struct mlx5_lag, lag_mp);
- flush_workqueue(ldev->wq);
+ flush_workqueue(mp->wq);
}
struct mlx5_fib_event_work {
@@ -293,7 +292,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
- queue_work(ldev->wq, &fib_work->work);
+ queue_work(mp->wq, &fib_work->work);
return NOTIFY_DONE;
}
@@ -306,11 +305,17 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
if (mp->fib_nb.notifier_call)
return 0;
+ mp->wq = create_singlethread_workqueue("mlx5_lag_mp");
+ if (!mp->wq)
+ return -ENOMEM;
+
mp->fib_nb.notifier_call = mlx5_lag_fib_event;
err = register_fib_notifier(&init_net, &mp->fib_nb,
mlx5_lag_fib_event_flush, NULL);
- if (err)
+ if (err) {
+ destroy_workqueue(mp->wq);
mp->fib_nb.notifier_call = NULL;
+ }
return err;
}
@@ -323,5 +328,6 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
return;
unregister_fib_notifier(&init_net, &mp->fib_nb);
+ destroy_workqueue(mp->wq);
mp->fib_nb.notifier_call = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
index 79be89e9c7a4..258ac7b2964e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
@@ -16,6 +16,7 @@ enum mlx5_lag_port_affinity {
struct lag_mp {
struct notifier_block fib_nb;
struct fib_info *mfi; /* used in tracking fib events */
+ struct workqueue_struct *wq;
};
#ifdef CONFIG_MLX5_ESWITCH
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index 3fc575d1c3ec..dcea87ec5977 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -42,7 +42,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
MLX5_SET(encryption_key_obj, obj, key_size, general_obj_key_size);
MLX5_SET(encryption_key_obj, obj, key_type,
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK);
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS);
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index e065c2f68f5a..6cbccba56f70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -21,7 +21,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
struct mlx5_dm *dm;
if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM))
- return 0;
+ return NULL;
dm = kzalloc(sizeof(*dm), GFP_KERNEL);
if (!dm)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index f554cfddcf4e..7af4210c1b96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -70,6 +70,7 @@
#include "diag/fw_tracer.h"
#include "ecpf.h"
#include "lib/hv_vhca.h"
+#include "diag/rsc_dump.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@@ -880,6 +881,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
dev->tracer = mlx5_fw_tracer_create(dev);
dev->hv_vhca = mlx5_hv_vhca_create(dev);
+ dev->rsc_dump = mlx5_rsc_dump_create(dev);
return 0;
@@ -909,6 +911,7 @@ err_devcom:
static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
{
+ mlx5_rsc_dump_destroy(dev);
mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
mlx5_dm_cleanup(dev);
@@ -1079,6 +1082,12 @@ static int mlx5_load(struct mlx5_core_dev *dev)
mlx5_hv_vhca_init(dev->hv_vhca);
+ err = mlx5_rsc_dump_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to init Resource dump\n");
+ goto err_rsc_dump;
+ }
+
err = mlx5_fpga_device_start(dev);
if (err) {
mlx5_core_err(dev, "fpga device start failed %d\n", err);
@@ -1134,6 +1143,8 @@ err_tls_start:
err_ipsec_start:
mlx5_fpga_device_stop(dev);
err_fpga_start:
+ mlx5_rsc_dump_cleanup(dev);
+err_rsc_dump:
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_tracer_cleanup(dev->tracer);
err_fw_tracer:
@@ -1155,6 +1166,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev);
+ mlx5_rsc_dump_cleanup(dev);
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_eq_table_destroy(dev);
@@ -1199,15 +1211,10 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
goto err_devlink_reg;
}
- if (mlx5_device_registered(dev)) {
+ if (mlx5_device_registered(dev))
mlx5_attach_device(dev);
- } else {
- err = mlx5_register_device(dev);
- if (err) {
- mlx5_core_err(dev, "register device failed %d\n", err);
- goto err_reg_dev;
- }
- }
+ else
+ mlx5_register_device(dev);
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
out:
@@ -1215,9 +1222,6 @@ out:
return err;
-err_reg_dev:
- if (boot)
- mlx5_devlink_unregister(priv_to_devlink(dev));
err_devlink_reg:
mlx5_unload(dev);
err_load:
@@ -1231,7 +1235,7 @@ function_teardown:
return err;
}
-int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
+void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{
if (cleanup) {
mlx5_unregister_device(dev);
@@ -1260,7 +1264,6 @@ int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
mlx5_function_teardown(dev, cleanup);
out:
mutex_unlock(&dev->intf_state_mutex);
- return 0;
}
static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
@@ -1282,7 +1285,6 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
mutex_init(&priv->alloc_mutex);
mutex_init(&priv->pgdir_mutex);
INIT_LIST_HEAD(&priv->pgdir_list);
- spin_lock_init(&priv->mkey_lock);
priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
mlx5_debugfs_root);
@@ -1381,12 +1383,7 @@ static void remove_one(struct pci_dev *pdev)
mlx5_crdump_disable(dev);
mlx5_devlink_unregister(devlink);
- if (mlx5_unload_one(dev, true)) {
- mlx5_core_err(dev, "mlx5_unload_one failed\n");
- mlx5_health_flush(dev);
- return;
- }
-
+ mlx5_unload_one(dev, true);
mlx5_pci_close(dev);
mlx5_mdev_uninit(dev);
mlx5_devlink_free(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index da67b28d6e23..a8fb43a85d1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -182,7 +182,7 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
void mlx5_attach_device(struct mlx5_core_dev *dev);
void mlx5_detach_device(struct mlx5_core_dev *dev);
bool mlx5_device_registered(struct mlx5_core_dev *dev);
-int mlx5_register_device(struct mlx5_core_dev *dev);
+void mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev);
void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
@@ -244,6 +244,6 @@ enum {
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
-int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup);
+void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup);
int mlx5_load_one(struct mlx5_core_dev *dev, bool boot);
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 42cc3c7ac5b6..366f2cbfc6db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -36,54 +36,31 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
-int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- struct mlx5_async_ctx *async_ctx, u32 *in,
- int inlen, u32 *out, int outlen,
- mlx5_async_cbk_t callback,
- struct mlx5_async_work *context)
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
+ struct mlx5_core_mkey *mkey,
+ u32 *in, int inlen)
{
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
u32 mkey_index;
void *mkc;
int err;
- u8 key;
-
- spin_lock_irq(&dev->priv.mkey_lock);
- key = dev->priv.mkey_key++;
- spin_unlock_irq(&dev->priv.mkey_lock);
- mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
- MLX5_SET(mkc, mkc, mkey_7_0, key);
-
- if (callback)
- return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
- callback, context);
err = mlx5_cmd_exec(dev, in, inlen, lout, sizeof(lout));
if (err)
return err;
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
mkey->size = MLX5_GET64(mkc, mkc, len);
- mkey->key = mlx5_idx_to_mkey(mkey_index) | key;
+ mkey->key |= mlx5_idx_to_mkey(mkey_index);
mkey->pd = MLX5_GET(mkc, mkc, pd);
- mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
- mkey_index, key, mkey->key);
+ mlx5_core_dbg(dev, "out 0x%x, mkey 0x%x\n", mkey_index, mkey->key);
return 0;
}
-EXPORT_SYMBOL(mlx5_core_create_mkey_cb);
-
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- u32 *in, int inlen)
-{
- return mlx5_core_create_mkey_cb(dev, mkey, NULL, in, inlen,
- NULL, 0, NULL, NULL);
-}
EXPORT_SYMBOL(mlx5_core_create_mkey);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index 01c380425f9d..f3b29d9ade1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -101,22 +101,39 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
+static bool mlx5_rl_are_equal_raw(struct mlx5_rl_entry *entry, void *rl_in,
+ u16 uid)
+{
+ return (!memcmp(entry->rl_raw, rl_in, sizeof(entry->rl_raw)) &&
+ entry->uid == uid);
+}
+
/* Finds an entry where we can register the given rate
* If the rate already exists, return the entry where it is registered,
* otherwise return the first available entry.
* If the table is full, return NULL
*/
static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
- struct mlx5_rate_limit *rl)
+ void *rl_in, u16 uid, bool dedicated)
{
struct mlx5_rl_entry *ret_entry = NULL;
bool empty_found = false;
int i;
for (i = 0; i < table->max_size; i++) {
- if (mlx5_rl_are_equal(&table->rl_entry[i].rl, rl))
- return &table->rl_entry[i];
- if (!empty_found && !table->rl_entry[i].rl.rate) {
+ if (dedicated) {
+ if (!table->rl_entry[i].refcount)
+ return &table->rl_entry[i];
+ continue;
+ }
+
+ if (table->rl_entry[i].refcount) {
+ if (table->rl_entry[i].dedicated)
+ continue;
+ if (mlx5_rl_are_equal_raw(&table->rl_entry[i], rl_in,
+ uid))
+ return &table->rl_entry[i];
+ } else if (!empty_found) {
empty_found = true;
ret_entry = &table->rl_entry[i];
}
@@ -126,18 +143,19 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
}
static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
- u16 index,
- struct mlx5_rate_limit *rl)
+ struct mlx5_rl_entry *entry, bool set)
{
- u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {};
+ void *pp_context;
+ pp_context = MLX5_ADDR_OF(set_pp_rate_limit_in, in, ctx);
MLX5_SET(set_pp_rate_limit_in, in, opcode,
MLX5_CMD_OP_SET_PP_RATE_LIMIT);
- MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
- MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rl->rate);
- MLX5_SET(set_pp_rate_limit_in, in, burst_upper_bound, rl->max_burst_sz);
- MLX5_SET(set_pp_rate_limit_in, in, typical_packet_size, rl->typical_pkt_sz);
+ MLX5_SET(set_pp_rate_limit_in, in, uid, entry->uid);
+ MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, entry->index);
+ if (set)
+ memcpy(pp_context, entry->rl_raw, sizeof(entry->rl_raw));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
@@ -158,23 +176,25 @@ bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
}
EXPORT_SYMBOL(mlx5_rl_are_equal);
-int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
- struct mlx5_rate_limit *rl)
+int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
+ bool dedicated_entry, u16 *index)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry;
int err = 0;
+ u32 rate;
+ rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit);
mutex_lock(&table->rl_lock);
- if (!rl->rate || !mlx5_rl_is_in_range(dev, rl->rate)) {
+ if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
- rl->rate, table->min_rate, table->max_rate);
+ rate, table->min_rate, table->max_rate);
err = -EINVAL;
goto out;
}
- entry = find_rl_entry(table, rl);
+ entry = find_rl_entry(table, rl_in, uid, dedicated_entry);
if (!entry) {
mlx5_core_err(dev, "Max number of %u rates reached\n",
table->max_size);
@@ -185,16 +205,24 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
/* rate already configured */
entry->refcount++;
} else {
+ memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw));
+ entry->uid = uid;
/* new rate limit */
- err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl);
+ err = mlx5_set_pp_rate_limit_cmd(dev, entry, true);
if (err) {
- mlx5_core_err(dev, "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
- err, rl->rate, rl->max_burst_sz,
- rl->typical_pkt_sz);
+ mlx5_core_err(
+ dev,
+ "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
+ err, rate,
+ MLX5_GET(set_pp_rate_limit_context, rl_in,
+ burst_upper_bound),
+ MLX5_GET(set_pp_rate_limit_context, rl_in,
+ typical_packet_size));
goto out;
}
- entry->rl = *rl;
+
entry->refcount = 1;
+ entry->dedicated = dedicated_entry;
}
*index = entry->index;
@@ -202,20 +230,61 @@ out:
mutex_unlock(&table->rl_lock);
return err;
}
+EXPORT_SYMBOL(mlx5_rl_add_rate_raw);
+
+void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+ struct mlx5_rl_entry *entry;
+
+ mutex_lock(&table->rl_lock);
+ entry = &table->rl_entry[index - 1];
+ entry->refcount--;
+ if (!entry->refcount)
+ /* need to remove rate */
+ mlx5_set_pp_rate_limit_cmd(dev, entry, false);
+ mutex_unlock(&table->rl_lock);
+}
+EXPORT_SYMBOL(mlx5_rl_remove_rate_raw);
+
+int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
+ struct mlx5_rate_limit *rl)
+{
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
+
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
+ rl->max_burst_sz);
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
+ rl->typical_pkt_sz);
+
+ return mlx5_rl_add_rate_raw(dev, rl_raw,
+ MLX5_CAP_QOS(dev, packet_pacing_uid) ?
+ MLX5_SHARED_RESOURCE_UID : 0,
+ false, index);
+}
EXPORT_SYMBOL(mlx5_rl_add_rate);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
{
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry = NULL;
- struct mlx5_rate_limit reset_rl = {0};
/* 0 is a reserved value for unlimited rate */
if (rl->rate == 0)
return;
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
+ rl->max_burst_sz);
+ MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
+ rl->typical_pkt_sz);
+
mutex_lock(&table->rl_lock);
- entry = find_rl_entry(table, rl);
+ entry = find_rl_entry(table, rl_raw,
+ MLX5_CAP_QOS(dev, packet_pacing_uid) ?
+ MLX5_SHARED_RESOURCE_UID : 0, false);
if (!entry || !entry->refcount) {
mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n",
rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
@@ -223,11 +292,9 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
}
entry->refcount--;
- if (!entry->refcount) {
+ if (!entry->refcount)
/* need to remove rate */
- mlx5_set_pp_rate_limit_cmd(dev, entry->index, &reset_rl);
- entry->rl = reset_rl;
- }
+ mlx5_set_pp_rate_limit_cmd(dev, entry, false);
out:
mutex_unlock(&table->rl_lock);
@@ -273,14 +340,13 @@ int mlx5_init_rl_table(struct mlx5_core_dev *dev)
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
{
struct mlx5_rl_table *table = &dev->priv.rl_table;
- struct mlx5_rate_limit rl = {0};
int i;
/* Clear all configured rates */
for (i = 0; i < table->max_size; i++)
- if (table->rl_entry[i].rl.rate)
- mlx5_set_pp_rate_limit_cmd(dev, table->rl_entry[i].index,
- &rl);
+ if (table->rl_entry[i].refcount)
+ mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i],
+ false);
kfree(dev->priv.rl_table.rl_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 03f037811f1d..3094d20297a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -77,8 +77,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
if (!MLX5_ESWITCH_MANAGER(dev))
goto enable_vfs_hca;
- mlx5_eswitch_update_num_of_vfs(dev->priv.eswitch, num_vfs);
- err = mlx5_eswitch_enable(dev->priv.eswitch, MLX5_ESWITCH_LEGACY);
+ err = mlx5_eswitch_enable(dev->priv.eswitch, num_vfs);
if (err) {
mlx5_core_warn(dev,
"failed to enable eswitch SRIOV (%d)\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 2d93228ff633..554811de4c9d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -672,7 +672,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
dest_action = action;
if (!action->dest_tbl.is_fw_tbl) {
if (action->dest_tbl.tbl->dmn != dmn) {
- mlx5dr_dbg(dmn,
+ mlx5dr_err(dmn,
"Destination table belongs to a different domain\n");
goto out_invalid_arg;
}
@@ -703,7 +703,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
action->dest_tbl.fw_tbl.rx_icm_addr =
output.sw_owner_icm_root_0;
} else {
- mlx5dr_dbg(dmn,
+ mlx5dr_err(dmn,
"Failed mlx5_cmd_query_flow_table ret: %d\n",
ret);
return ret;
@@ -772,7 +772,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
/* Check action duplication */
if (++action_type_set[action_type] > max_actions_type) {
- mlx5dr_dbg(dmn, "Action type %d supports only max %d time(s)\n",
+ mlx5dr_err(dmn, "Action type %d supports only max %d time(s)\n",
action_type, max_actions_type);
goto out_invalid_arg;
}
@@ -781,7 +781,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
if (dr_action_validate_and_get_next_state(action_domain,
action_type,
&state)) {
- mlx5dr_dbg(dmn, "Invalid action sequence provided\n");
+ mlx5dr_err(dmn, "Invalid action sequence provided\n");
return -EOPNOTSUPP;
}
}
@@ -797,7 +797,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
rx_rule && recalc_cs_required && dest_action) {
ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
if (ret) {
- mlx5dr_dbg(dmn,
+ mlx5dr_err(dmn,
"Failed to handle checksum recalculation err %d\n",
ret);
return ret;
@@ -964,6 +964,24 @@ struct mlx5dr_action *mlx5dr_action_create_drop(void)
}
struct mlx5dr_action *
+mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num)
+{
+ struct mlx5dr_action *action;
+
+ action = dr_action_create_generic(DR_ACTION_TYP_FT);
+ if (!action)
+ return NULL;
+
+ action->dest_tbl.is_fw_tbl = true;
+ action->dest_tbl.fw_tbl.dmn = dmn;
+ action->dest_tbl.fw_tbl.id = table_num;
+ action->dest_tbl.fw_tbl.type = FS_FT_FDB;
+ refcount_inc(&dmn->refcount);
+
+ return action;
+}
+
+struct mlx5dr_action *
mlx5dr_action_create_dest_table(struct mlx5dr_table *tbl)
{
struct mlx5dr_action *action;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index a9da961d4d2f..48b6358b6845 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -59,7 +59,7 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
if (ret) {
- mlx5dr_dbg(dmn, "Couldn't allocate PD\n");
+ mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
return ret;
}
@@ -192,7 +192,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
ret = dr_domain_query_vports(dmn);
if (ret) {
- mlx5dr_dbg(dmn, "Failed to query vports caps\n");
+ mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret);
goto free_vports_caps;
}
@@ -213,7 +213,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
int ret;
if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
- mlx5dr_dbg(dmn, "Failed to allocate domain, bad link type\n");
+ mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
return -EOPNOTSUPP;
}
@@ -257,7 +257,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
if (!vport_cap) {
- mlx5dr_dbg(dmn, "Failed to get esw manager vport\n");
+ mlx5dr_err(dmn, "Failed to get esw manager vport\n");
return -ENOENT;
}
@@ -268,7 +268,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
break;
default:
- mlx5dr_dbg(dmn, "Invalid domain\n");
+ mlx5dr_err(dmn, "Invalid domain\n");
ret = -EINVAL;
break;
}
@@ -300,7 +300,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
mutex_init(&dmn->mutex);
if (dr_domain_caps_init(mdev, dmn)) {
- mlx5dr_dbg(dmn, "Failed init domain, no caps\n");
+ mlx5dr_err(dmn, "Failed init domain, no caps\n");
goto free_domain;
}
@@ -348,8 +348,11 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
mutex_lock(&dmn->mutex);
ret = mlx5dr_send_ring_force_drain(dmn);
mutex_unlock(&dmn->mutex);
- if (ret)
+ if (ret) {
+ mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
+ flags, ret);
return ret;
+ }
}
if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index d7c7467e2d53..30d2d7376f56 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -468,7 +468,7 @@ mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
if (err) {
dr_icm_chill_buckets_abort(pool, bucket, buckets);
- mlx5dr_dbg(pool->dmn, "Sync_steering failed\n");
+ mlx5dr_err(pool->dmn, "Sync_steering failed\n");
chunk = NULL;
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index c6dbd856df94..a95938874798 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -388,14 +388,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
if (idx == 0) {
- mlx5dr_dbg(dmn, "Cannot generate any valid rules from mask\n");
+ mlx5dr_err(dmn, "Cannot generate any valid rules from mask\n");
return -EINVAL;
}
/* Check that all mask fields were consumed */
for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) {
if (((u8 *)&mask)[i] != 0) {
- mlx5dr_info(dmn, "Mask contains unsupported parameters\n");
+ mlx5dr_err(dmn, "Mask contains unsupported parameters\n");
return -EOPNOTSUPP;
}
}
@@ -563,7 +563,7 @@ static int dr_matcher_set_all_ste_builders(struct mlx5dr_matcher *matcher,
dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV6);
if (!nic_matcher->ste_builder) {
- mlx5dr_dbg(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
+ mlx5dr_err(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
return -EINVAL;
}
@@ -634,13 +634,13 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
int ret;
if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
- mlx5dr_info(dmn, "Invalid match criteria attribute\n");
+ mlx5dr_err(dmn, "Invalid match criteria attribute\n");
return -EINVAL;
}
if (mask) {
if (mask->match_sz > sizeof(struct mlx5dr_match_param)) {
- mlx5dr_info(dmn, "Invalid match size attribute\n");
+ mlx5dr_err(dmn, "Invalid match size attribute\n");
return -EINVAL;
}
mlx5dr_ste_copy_param(matcher->match_criteria,
@@ -671,7 +671,7 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher *
mlx5dr_matcher_create(struct mlx5dr_table *tbl,
- u16 priority,
+ u32 priority,
u8 match_criteria_enable,
struct mlx5dr_match_parameters *mask)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index e4cff7abb348..cce3ee7a6614 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -826,8 +826,8 @@ again:
ste_location, send_ste_list);
if (!new_htbl) {
mlx5dr_htbl_put(cur_htbl);
- mlx5dr_info(dmn, "failed creating rehash table, htbl-log_size: %d\n",
- cur_htbl->chunk_size);
+ mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
+ cur_htbl->chunk_size);
} else {
cur_htbl = new_htbl;
}
@@ -877,7 +877,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
if (!value_size ||
(value_size > sizeof(struct mlx5dr_match_param) ||
(value_size % sizeof(u32)))) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
return false;
}
@@ -888,7 +888,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->outer), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
return false;
}
}
@@ -898,7 +898,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->misc), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
return false;
}
}
@@ -908,7 +908,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->inner), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
return false;
}
}
@@ -918,7 +918,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->misc2), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
return false;
}
}
@@ -928,7 +928,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->misc3), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
+ mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
return false;
}
}
@@ -1221,7 +1221,7 @@ remove_action_members:
dr_rule_remove_action_members(rule);
free_rule:
kfree(rule);
- mlx5dr_info(dmn, "Failed creating rule\n");
+ mlx5dr_err(dmn, "Failed creating rule\n");
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 095ec7b1399d..c0ab9cf74929 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -136,7 +136,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
err = mlx5_wq_qp_create(mdev, &wqp, temp_qpc, &dr_qp->wq,
&dr_qp->wq_ctrl);
if (err) {
- mlx5_core_info(mdev, "Can't create QP WQ\n");
+ mlx5_core_warn(mdev, "Can't create QP WQ\n");
goto err_wq;
}
@@ -652,8 +652,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
/* Init */
ret = dr_modify_qp_rst2init(dmn->mdev, dr_qp, port);
- if (ret)
+ if (ret) {
+ mlx5dr_err(dmn, "Failed modify QP rst2init\n");
return ret;
+ }
/* RTR */
ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr);
@@ -668,8 +670,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp;
ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
- if (ret)
+ if (ret) {
+ mlx5dr_err(dmn, "Failed modify QP init2rtr\n");
return ret;
+ }
/* RTS */
rts_attr.timeout = 14;
@@ -677,8 +681,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
rts_attr.rnr_retry = 7;
ret = dr_cmd_modify_qp_rtr2rts(dmn->mdev, dr_qp, &rts_attr);
- if (ret)
+ if (ret) {
+ mlx5dr_err(dmn, "Failed modify QP rtr2rts\n");
return ret;
+ }
return 0;
}
@@ -862,6 +868,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
cq_size = QUEUE_SIZE + 1;
dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size);
if (!dmn->send_ring->cq) {
+ mlx5dr_err(dmn, "Failed creating CQ\n");
ret = -ENOMEM;
goto free_send_ring;
}
@@ -873,6 +880,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
if (!dmn->send_ring->qp) {
+ mlx5dr_err(dmn, "Failed creating QP\n");
ret = -ENOMEM;
goto clean_cq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index aade62a9ee5c..c0e3a1e7389d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -728,7 +728,7 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
{
if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
- mlx5dr_dbg(dmn, "Partial mask source_port is not supported\n");
+ mlx5dr_err(dmn, "Partial mask source_port is not supported\n");
return -EINVAL;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index 14ce2d7dbb66..c2fe48d7b75a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -128,16 +128,20 @@ static int dr_table_init_nic(struct mlx5dr_domain *dmn,
DR_CHUNK_SIZE_1,
MLX5DR_STE_LU_TYPE_DONT_CARE,
0);
- if (!nic_tbl->s_anchor)
+ if (!nic_tbl->s_anchor) {
+ mlx5dr_err(dmn, "Failed allocating htbl\n");
return -ENOMEM;
+ }
info.type = CONNECT_MISS;
info.miss_icm_addr = nic_dmn->default_icm_addr;
ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
nic_tbl->s_anchor,
&info, true);
- if (ret)
+ if (ret) {
+ mlx5dr_err(dmn, "Failed int and send htbl\n");
goto free_s_anchor;
+ }
mlx5dr_htbl_get(nic_tbl->s_anchor);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index dffe35145d19..3fa739951b34 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -705,7 +705,7 @@ struct mlx5dr_matcher {
struct mlx5dr_matcher_rx_tx rx;
struct mlx5dr_matcher_rx_tx tx;
struct list_head matcher_list;
- u16 prio;
+ u32 prio;
struct mlx5dr_match_param mask;
u8 match_criteria;
refcount_t refcount;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index c2027192e21e..3b3f5b9d4f95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -140,7 +140,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_group *fg)
{
struct mlx5dr_matcher *matcher;
- u16 priority = MLX5_GET(create_flow_group_in, in,
+ u32 priority = MLX5_GET(create_flow_group_in, in,
start_flow_index);
u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
in,
@@ -384,6 +384,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
enum mlx5_flow_destination_type type = dst->dest_attr.type;
+ u32 ft_id;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
@@ -420,6 +421,17 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
num_term_actions++;
break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ ft_id = dst->dest_attr.ft_num;
+ tmp_action = mlx5dr_action_create_dest_table_num(domain,
+ ft_id);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ term_actions[num_term_actions++].dest = tmp_action;
+ break;
default:
err = -EOPNOTSUPP;
goto free_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index e1edc9c247b7..7deaca9ade3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -38,8 +38,6 @@ struct mlx5dr_action_dest {
struct mlx5dr_action *reformat;
};
-#ifdef CONFIG_MLX5_SW_STEERING
-
struct mlx5dr_domain *
mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type);
@@ -59,7 +57,7 @@ u32 mlx5dr_table_get_id(struct mlx5dr_table *table);
struct mlx5dr_matcher *
mlx5dr_matcher_create(struct mlx5dr_table *table,
- u16 priority,
+ u32 priority,
u8 match_criteria_enable,
struct mlx5dr_match_parameters *mask);
@@ -77,6 +75,9 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
struct mlx5dr_action *action);
struct mlx5dr_action *
+mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num);
+
+struct mlx5dr_action *
mlx5dr_action_create_dest_table(struct mlx5dr_table *table);
struct mlx5dr_action *
@@ -125,103 +126,4 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev)
return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner);
}
-#else /* CONFIG_MLX5_SW_STEERING */
-
-static inline struct mlx5dr_domain *
-mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) { return NULL; }
-
-static inline int
-mlx5dr_domain_destroy(struct mlx5dr_domain *domain) { return 0; }
-
-static inline int
-mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags) { return 0; }
-
-static inline void
-mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
- struct mlx5dr_domain *peer_dmn) { }
-
-static inline struct mlx5dr_table *
-mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags) { return NULL; }
-
-static inline int
-mlx5dr_table_destroy(struct mlx5dr_table *table) { return 0; }
-
-static inline u32
-mlx5dr_table_get_id(struct mlx5dr_table *table) { return 0; }
-
-static inline struct mlx5dr_matcher *
-mlx5dr_matcher_create(struct mlx5dr_table *table,
- u16 priority,
- u8 match_criteria_enable,
- struct mlx5dr_match_parameters *mask) { return NULL; }
-
-static inline int
-mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher) { return 0; }
-
-static inline struct mlx5dr_rule *
-mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
- struct mlx5dr_match_parameters *value,
- size_t num_actions,
- struct mlx5dr_action *actions[]) { return NULL; }
-
-static inline int
-mlx5dr_rule_destroy(struct mlx5dr_rule *rule) { return 0; }
-
-static inline int
-mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
- struct mlx5dr_action *action) { return 0; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_dest_table(struct mlx5dr_table *table) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
- struct mlx5_flow_table *ft) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
- u32 vport, u8 vhca_id_valid,
- u16 vhca_id) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
- struct mlx5dr_action_dest *dests,
- u32 num_of_dests) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_drop(void) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_tag(u32 tag_value) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_flow_counter(u32 counter_id) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
- enum mlx5dr_action_reformat_type reformat_type,
- size_t data_sz,
- void *data) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_modify_header(struct mlx5dr_domain *domain,
- u32 flags,
- size_t actions_sz,
- __be64 actions[]) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_pop_vlan(void) { return NULL; }
-
-static inline struct mlx5dr_action *
-mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain,
- __be32 vlan_hdr) { return NULL; }
-
-static inline int
-mlx5dr_action_destroy(struct mlx5dr_action *action) { return 0; }
-
-static inline bool
-mlx5dr_is_supported(struct mlx5_core_dev *dev) { return false; }
-
-#endif /* CONFIG_MLX5_SW_STEERING */
-
#endif /* _MLX5DR_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlxfw/Kconfig b/drivers/net/ethernet/mellanox/mlxfw/Kconfig
index 0367f835a846..5b604501f33e 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxfw/Kconfig
@@ -12,3 +12,4 @@ config MLXFW
To compile this driver as a module, choose M here: the
module will be called mlxfw.
select XZ_DEC
+ select NET_DEVLINK
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
index c50e74ab02c4..7654841a05c2 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
@@ -6,6 +6,30 @@
#include <linux/firmware.h>
#include <linux/netlink.h>
+#include <linux/device.h>
+#include <net/devlink.h>
+
+struct mlxfw_dev {
+ const struct mlxfw_dev_ops *ops;
+ const char *psid;
+ u16 psid_size;
+ struct devlink *devlink;
+};
+
+static inline
+struct device *mlxfw_dev_dev(struct mlxfw_dev *mlxfw_dev)
+{
+ return mlxfw_dev->devlink->dev;
+}
+
+#define MLXFW_PRFX "mlxfw: "
+
+#define mlxfw_info(mlxfw_dev, fmt, ...) \
+ dev_info(mlxfw_dev_dev(mlxfw_dev), MLXFW_PRFX fmt, ## __VA_ARGS__)
+#define mlxfw_err(mlxfw_dev, fmt, ...) \
+ dev_err(mlxfw_dev_dev(mlxfw_dev), MLXFW_PRFX fmt, ## __VA_ARGS__)
+#define mlxfw_dbg(mlxfw_dev, fmt, ...) \
+ dev_dbg(mlxfw_dev_dev(mlxfw_dev), MLXFW_PRFX fmt, ## __VA_ARGS__)
enum mlxfw_fsm_state {
MLXFW_FSM_STATE_IDLE,
@@ -31,7 +55,19 @@ enum mlxfw_fsm_state_err {
MLXFW_FSM_STATE_ERR_MAX,
};
-struct mlxfw_dev;
+enum mlxfw_fsm_reactivate_status {
+ MLXFW_FSM_REACTIVATE_STATUS_OK,
+ MLXFW_FSM_REACTIVATE_STATUS_BUSY,
+ MLXFW_FSM_REACTIVATE_STATUS_PROHIBITED_FW_VER_ERR,
+ MLXFW_FSM_REACTIVATE_STATUS_FIRST_PAGE_COPY_FAILED,
+ MLXFW_FSM_REACTIVATE_STATUS_FIRST_PAGE_ERASE_FAILED,
+ MLXFW_FSM_REACTIVATE_STATUS_FIRST_PAGE_RESTORE_FAILED,
+ MLXFW_FSM_REACTIVATE_STATUS_CANDIDATE_FW_DEACTIVATION_FAILED,
+ MLXFW_FSM_REACTIVATE_STATUS_FW_ALREADY_ACTIVATED,
+ MLXFW_FSM_REACTIVATE_STATUS_ERR_DEVICE_RESET_REQUIRED,
+ MLXFW_FSM_REACTIVATE_STATUS_ERR_FW_PROGRAMMING_NEEDED,
+ MLXFW_FSM_REACTIVATE_STATUS_MAX,
+};
struct mlxfw_dev_ops {
int (*component_query)(struct mlxfw_dev *mlxfw_dev, u16 component_index,
@@ -51,6 +87,8 @@ struct mlxfw_dev_ops {
int (*fsm_activate)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
+ int (*fsm_reactivate)(struct mlxfw_dev *mlxfw_dev, u8 *status);
+
int (*fsm_query_state)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
enum mlxfw_fsm_state *fsm_state,
enum mlxfw_fsm_state_err *fsm_state_err);
@@ -58,16 +96,6 @@ struct mlxfw_dev_ops {
void (*fsm_cancel)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
void (*fsm_release)(struct mlxfw_dev *mlxfw_dev, u32 fwhandle);
-
- void (*status_notify)(struct mlxfw_dev *mlxfw_dev,
- const char *msg, const char *comp_name,
- u32 done_bytes, u32 total_bytes);
-};
-
-struct mlxfw_dev {
- const struct mlxfw_dev_ops *ops;
- const char *psid;
- u16 psid_size;
};
#if IS_REACHABLE(CONFIG_MLXFW)
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index 29e95d0a6ad1..046a0cb82ed8 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -16,38 +16,70 @@
(MLXFW_FSM_STATE_WAIT_TIMEOUT_MS / MLXFW_FSM_STATE_WAIT_CYCLE_MS)
#define MLXFW_FSM_MAX_COMPONENT_SIZE (10 * (1 << 20))
-static const char * const mlxfw_fsm_state_err_str[] = {
- [MLXFW_FSM_STATE_ERR_ERROR] =
- "general error",
- [MLXFW_FSM_STATE_ERR_REJECTED_DIGEST_ERR] =
- "component hash mismatch",
- [MLXFW_FSM_STATE_ERR_REJECTED_NOT_APPLICABLE] =
- "component not applicable",
- [MLXFW_FSM_STATE_ERR_REJECTED_UNKNOWN_KEY] =
- "unknown key",
- [MLXFW_FSM_STATE_ERR_REJECTED_AUTH_FAILED] =
- "authentication failed",
- [MLXFW_FSM_STATE_ERR_REJECTED_UNSIGNED] =
- "component was not signed",
- [MLXFW_FSM_STATE_ERR_REJECTED_KEY_NOT_APPLICABLE] =
- "key not applicable",
- [MLXFW_FSM_STATE_ERR_REJECTED_BAD_FORMAT] =
- "bad format",
- [MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET] =
- "pending reset",
- [MLXFW_FSM_STATE_ERR_MAX] =
- "unknown error"
+static const int mlxfw_fsm_state_errno[] = {
+ [MLXFW_FSM_STATE_ERR_ERROR] = -EIO,
+ [MLXFW_FSM_STATE_ERR_REJECTED_DIGEST_ERR] = -EBADMSG,
+ [MLXFW_FSM_STATE_ERR_REJECTED_NOT_APPLICABLE] = -ENOENT,
+ [MLXFW_FSM_STATE_ERR_REJECTED_UNKNOWN_KEY] = -ENOKEY,
+ [MLXFW_FSM_STATE_ERR_REJECTED_AUTH_FAILED] = -EACCES,
+ [MLXFW_FSM_STATE_ERR_REJECTED_UNSIGNED] = -EKEYREVOKED,
+ [MLXFW_FSM_STATE_ERR_REJECTED_KEY_NOT_APPLICABLE] = -EKEYREJECTED,
+ [MLXFW_FSM_STATE_ERR_REJECTED_BAD_FORMAT] = -ENOEXEC,
+ [MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET] = -EBUSY,
+ [MLXFW_FSM_STATE_ERR_MAX] = -EINVAL
};
-static void mlxfw_status_notify(struct mlxfw_dev *mlxfw_dev,
- const char *msg, const char *comp_name,
- u32 done_bytes, u32 total_bytes)
+#define MLXFW_ERR_PRFX "Firmware flash failed: "
+#define MLXFW_ERR_MSG(fwdev, extack, msg, err) do { \
+ mlxfw_err(fwdev, "%s, err (%d)\n", MLXFW_ERR_PRFX msg, err); \
+ NL_SET_ERR_MSG_MOD(extack, MLXFW_ERR_PRFX msg); \
+} while (0)
+
+static int mlxfw_fsm_state_err(struct mlxfw_dev *mlxfw_dev,
+ struct netlink_ext_ack *extack,
+ enum mlxfw_fsm_state_err err)
{
- if (!mlxfw_dev->ops->status_notify)
- return;
- mlxfw_dev->ops->status_notify(mlxfw_dev, msg, comp_name,
- done_bytes, total_bytes);
-}
+ enum mlxfw_fsm_state_err fsm_state_err;
+
+ fsm_state_err = min_t(enum mlxfw_fsm_state_err, err,
+ MLXFW_FSM_STATE_ERR_MAX);
+
+ switch (fsm_state_err) {
+ case MLXFW_FSM_STATE_ERR_ERROR:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "general error", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_REJECTED_DIGEST_ERR:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "component hash mismatch", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_REJECTED_NOT_APPLICABLE:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "component not applicable", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_REJECTED_UNKNOWN_KEY:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "unknown key", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_REJECTED_AUTH_FAILED:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "authentication failed", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_REJECTED_UNSIGNED:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "component was not signed", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_REJECTED_KEY_NOT_APPLICABLE:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "key not applicable", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_REJECTED_BAD_FORMAT:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "bad format", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_BLOCKED_PENDING_RESET:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "pending reset", err);
+ break;
+ case MLXFW_FSM_STATE_ERR_OK: /* fall through */
+ case MLXFW_FSM_STATE_ERR_MAX:
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "unknown error", err);
+ break;
+ };
+
+ return mlxfw_fsm_state_errno[fsm_state_err];
+};
static int mlxfw_fsm_state_wait(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
enum mlxfw_fsm_state fsm_state,
@@ -62,21 +94,18 @@ static int mlxfw_fsm_state_wait(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
retry:
err = mlxfw_dev->ops->fsm_query_state(mlxfw_dev, fwhandle,
&curr_fsm_state, &fsm_state_err);
- if (err)
+ if (err) {
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "FSM state query failed", err);
return err;
-
- if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) {
- fsm_state_err = min_t(enum mlxfw_fsm_state_err,
- fsm_state_err, MLXFW_FSM_STATE_ERR_MAX);
- pr_err("Firmware flash failed: %s\n",
- mlxfw_fsm_state_err_str[fsm_state_err]);
- NL_SET_ERR_MSG_MOD(extack, "Firmware flash failed");
- return -EINVAL;
}
+
+ if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK)
+ return mlxfw_fsm_state_err(mlxfw_dev, extack, fsm_state_err);
+
if (curr_fsm_state != fsm_state) {
if (--times == 0) {
- pr_err("Timeout reached on FSM state change");
- NL_SET_ERR_MSG_MOD(extack, "Timeout reached on FSM state change");
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Timeout reached on FSM state change", -ETIMEDOUT);
return -ETIMEDOUT;
}
msleep(MLXFW_FSM_STATE_WAIT_CYCLE_MS);
@@ -85,6 +114,92 @@ retry:
return 0;
}
+static int
+mlxfw_fsm_reactivate_err(struct mlxfw_dev *mlxfw_dev,
+ struct netlink_ext_ack *extack, u8 err)
+{
+ enum mlxfw_fsm_reactivate_status status;
+
+#define MXFW_REACT_PRFX "Reactivate FSM: "
+#define MLXFW_REACT_ERR(msg, err) \
+ MLXFW_ERR_MSG(mlxfw_dev, extack, MXFW_REACT_PRFX msg, err)
+
+ status = min_t(enum mlxfw_fsm_reactivate_status, err,
+ MLXFW_FSM_REACTIVATE_STATUS_MAX);
+
+ switch (status) {
+ case MLXFW_FSM_REACTIVATE_STATUS_BUSY:
+ MLXFW_REACT_ERR("busy", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_PROHIBITED_FW_VER_ERR:
+ MLXFW_REACT_ERR("prohibited fw ver", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_FIRST_PAGE_COPY_FAILED:
+ MLXFW_REACT_ERR("first page copy failed", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_FIRST_PAGE_ERASE_FAILED:
+ MLXFW_REACT_ERR("first page erase failed", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_FIRST_PAGE_RESTORE_FAILED:
+ MLXFW_REACT_ERR("first page restore failed", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_CANDIDATE_FW_DEACTIVATION_FAILED:
+ MLXFW_REACT_ERR("candidate fw deactivation failed", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_ERR_DEVICE_RESET_REQUIRED:
+ MLXFW_REACT_ERR("device reset required", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_ERR_FW_PROGRAMMING_NEEDED:
+ MLXFW_REACT_ERR("fw programming needed", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_FW_ALREADY_ACTIVATED:
+ MLXFW_REACT_ERR("fw already activated", err);
+ break;
+ case MLXFW_FSM_REACTIVATE_STATUS_OK: /* fall through */
+ case MLXFW_FSM_REACTIVATE_STATUS_MAX:
+ MLXFW_REACT_ERR("unexpected error", err);
+ break;
+ };
+ return -EREMOTEIO;
+};
+
+static int mlxfw_fsm_reactivate(struct mlxfw_dev *mlxfw_dev,
+ struct netlink_ext_ack *extack,
+ bool *supported)
+{
+ u8 status;
+ int err;
+
+ if (!mlxfw_dev->ops->fsm_reactivate)
+ return 0;
+
+ err = mlxfw_dev->ops->fsm_reactivate(mlxfw_dev, &status);
+ if (err == -EOPNOTSUPP) {
+ *supported = false;
+ return 0;
+ }
+
+ if (err) {
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Could not reactivate firmware flash", err);
+ return err;
+ }
+
+ if (status == MLXFW_FSM_REACTIVATE_STATUS_OK ||
+ status == MLXFW_FSM_REACTIVATE_STATUS_FW_ALREADY_ACTIVATED)
+ return 0;
+
+ return mlxfw_fsm_reactivate_err(mlxfw_dev, extack, status);
+}
+
+static void mlxfw_status_notify(struct mlxfw_dev *mlxfw_dev,
+ const char *msg, const char *comp_name,
+ u32 done_bytes, u32 total_bytes)
+{
+ devlink_flash_update_status_notify(mlxfw_dev->devlink, msg, comp_name,
+ done_bytes, total_bytes);
+}
+
#define MLXFW_ALIGN_DOWN(x, align_bits) ((x) & ~((1 << (align_bits)) - 1))
#define MLXFW_ALIGN_UP(x, align_bits) \
MLXFW_ALIGN_DOWN((x) + ((1 << (align_bits)) - 1), (align_bits))
@@ -92,6 +207,7 @@ retry:
static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
u32 fwhandle,
struct mlxfw_mfa2_component *comp,
+ bool reactivate_supp,
struct netlink_ext_ack *extack)
{
u16 comp_max_write_size;
@@ -108,34 +224,43 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
err = mlxfw_dev->ops->component_query(mlxfw_dev, comp->index,
&comp_max_size, &comp_align_bits,
&comp_max_write_size);
- if (err)
+ if (err) {
+ MLXFW_ERR_MSG(mlxfw_dev, extack, "FSM component query failed", err);
return err;
+ }
comp_max_size = min_t(u32, comp_max_size, MLXFW_FSM_MAX_COMPONENT_SIZE);
if (comp->data_size > comp_max_size) {
- pr_err("Component %d is of size %d which is bigger than limit %d\n",
- comp->index, comp->data_size, comp_max_size);
- NL_SET_ERR_MSG_MOD(extack, "Component is bigger than limit");
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Component size is bigger than limit", -EINVAL);
return -EINVAL;
}
comp_max_write_size = MLXFW_ALIGN_DOWN(comp_max_write_size,
comp_align_bits);
- pr_debug("Component update\n");
+ mlxfw_dbg(mlxfw_dev, "Component update\n");
mlxfw_status_notify(mlxfw_dev, "Updating component", comp_name, 0, 0);
err = mlxfw_dev->ops->fsm_component_update(mlxfw_dev, fwhandle,
comp->index,
comp->data_size);
- if (err)
+ if (err) {
+ if (!reactivate_supp)
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "FSM component update failed, FW reactivate is not supported",
+ err);
+ else
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "FSM component update failed", err);
return err;
+ }
err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
MLXFW_FSM_STATE_DOWNLOAD, extack);
if (err)
goto err_out;
- pr_debug("Component download\n");
+ mlxfw_dbg(mlxfw_dev, "Component download\n");
mlxfw_status_notify(mlxfw_dev, "Downloading component",
comp_name, 0, comp->data_size);
for (offset = 0;
@@ -147,19 +272,25 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev,
err = mlxfw_dev->ops->fsm_block_download(mlxfw_dev, fwhandle,
block_ptr, block_size,
offset);
- if (err)
+ if (err) {
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Component download failed", err);
goto err_out;
+ }
mlxfw_status_notify(mlxfw_dev, "Downloading component",
comp_name, offset + block_size,
comp->data_size);
}
- pr_debug("Component verify\n");
+ mlxfw_dbg(mlxfw_dev, "Component verify\n");
mlxfw_status_notify(mlxfw_dev, "Verifying component", comp_name, 0, 0);
err = mlxfw_dev->ops->fsm_component_verify(mlxfw_dev, fwhandle,
comp->index);
- if (err)
+ if (err) {
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "FSM component verify failed", err);
goto err_out;
+ }
err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
MLXFW_FSM_STATE_LOCKED, extack);
@@ -174,6 +305,7 @@ err_out:
static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
struct mlxfw_mfa2_file *mfa2_file,
+ bool reactivate_supp,
struct netlink_ext_ack *extack)
{
u32 component_count;
@@ -184,8 +316,8 @@ static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
mlxfw_dev->psid_size,
&component_count);
if (err) {
- pr_err("Could not find device PSID in MFA2 file\n");
- NL_SET_ERR_MSG_MOD(extack, "Could not find device PSID in MFA2 file");
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Could not find device PSID in MFA2 file", err);
return err;
}
@@ -194,11 +326,17 @@ static int mlxfw_flash_components(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
comp = mlxfw_mfa2_file_component_get(mfa2_file, mlxfw_dev->psid,
mlxfw_dev->psid_size, i);
- if (IS_ERR(comp))
- return PTR_ERR(comp);
+ if (IS_ERR(comp)) {
+ err = PTR_ERR(comp);
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Failed to get MFA2 component", err);
+ return err;
+ }
- pr_info("Flashing component type %d\n", comp->index);
- err = mlxfw_flash_component(mlxfw_dev, fwhandle, comp, extack);
+ mlxfw_info(mlxfw_dev, "Flashing component type %d\n",
+ comp->index);
+ err = mlxfw_flash_component(mlxfw_dev, fwhandle, comp,
+ reactivate_supp, extack);
mlxfw_mfa2_file_component_put(comp);
if (err)
return err;
@@ -211,26 +349,32 @@ int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
struct netlink_ext_ack *extack)
{
struct mlxfw_mfa2_file *mfa2_file;
+ bool reactivate_supp = true;
u32 fwhandle;
int err;
if (!mlxfw_mfa2_check(firmware)) {
- pr_err("Firmware file is not MFA2\n");
- NL_SET_ERR_MSG_MOD(extack, "Firmware file is not MFA2");
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Firmware file is not MFA2", -EINVAL);
return -EINVAL;
}
mfa2_file = mlxfw_mfa2_file_init(firmware);
- if (IS_ERR(mfa2_file))
- return PTR_ERR(mfa2_file);
+ if (IS_ERR(mfa2_file)) {
+ err = PTR_ERR(mfa2_file);
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Failed to initialize MFA2 firmware file", err);
+ return err;
+ }
- pr_info("Initialize firmware flash process\n");
+ mlxfw_info(mlxfw_dev, "Initialize firmware flash process\n");
+ devlink_flash_update_begin_notify(mlxfw_dev->devlink);
mlxfw_status_notify(mlxfw_dev, "Initializing firmware flash process",
NULL, 0, 0);
err = mlxfw_dev->ops->fsm_lock(mlxfw_dev, &fwhandle);
if (err) {
- pr_err("Could not lock the firmware FSM\n");
- NL_SET_ERR_MSG_MOD(extack, "Could not lock the firmware FSM");
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Could not lock the firmware FSM", err);
goto err_fsm_lock;
}
@@ -239,16 +383,26 @@ int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
if (err)
goto err_state_wait_idle_to_locked;
- err = mlxfw_flash_components(mlxfw_dev, fwhandle, mfa2_file, extack);
+ err = mlxfw_fsm_reactivate(mlxfw_dev, extack, &reactivate_supp);
+ if (err)
+ goto err_fsm_reactivate;
+
+ err = mlxfw_fsm_state_wait(mlxfw_dev, fwhandle,
+ MLXFW_FSM_STATE_LOCKED, extack);
+ if (err)
+ goto err_state_wait_reactivate_to_locked;
+
+ err = mlxfw_flash_components(mlxfw_dev, fwhandle, mfa2_file,
+ reactivate_supp, extack);
if (err)
goto err_flash_components;
- pr_debug("Activate image\n");
+ mlxfw_dbg(mlxfw_dev, "Activate image\n");
mlxfw_status_notify(mlxfw_dev, "Activating image", NULL, 0, 0);
err = mlxfw_dev->ops->fsm_activate(mlxfw_dev, fwhandle);
if (err) {
- pr_err("Could not activate the downloaded image\n");
- NL_SET_ERR_MSG_MOD(extack, "Could not activate the downloaded image");
+ MLXFW_ERR_MSG(mlxfw_dev, extack,
+ "Could not activate the downloaded image", err);
goto err_fsm_activate;
}
@@ -257,21 +411,25 @@ int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
if (err)
goto err_state_wait_activate_to_locked;
- pr_debug("Handle release\n");
+ mlxfw_dbg(mlxfw_dev, "Handle release\n");
mlxfw_dev->ops->fsm_release(mlxfw_dev, fwhandle);
- pr_info("Firmware flash done.\n");
+ mlxfw_info(mlxfw_dev, "Firmware flash done\n");
mlxfw_status_notify(mlxfw_dev, "Firmware flash done", NULL, 0, 0);
mlxfw_mfa2_file_fini(mfa2_file);
+ devlink_flash_update_end_notify(mlxfw_dev->devlink);
return 0;
err_state_wait_activate_to_locked:
err_fsm_activate:
err_flash_components:
+err_state_wait_reactivate_to_locked:
+err_fsm_reactivate:
err_state_wait_idle_to_locked:
mlxfw_dev->ops->fsm_release(mlxfw_dev, fwhandle);
err_fsm_lock:
mlxfw_mfa2_file_fini(mfa2_file);
+ devlink_flash_update_end_notify(mlxfw_dev->devlink);
return err;
}
EXPORT_SYMBOL(mlxfw_firmware_flash);
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
index 79057af4fe99..5d9ddf36fb4e 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
@@ -496,7 +496,7 @@ mlxfw_mfa2_file_component_tlv_get(const struct mlxfw_mfa2_file *mfa2_file,
struct mlxfw_mfa2_comp_data {
struct mlxfw_mfa2_component comp;
- u8 buff[0];
+ u8 buff[];
};
static const struct mlxfw_mfa2_tlv_component_descriptor *
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
index 33c971190bba..2014a5de5a01 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h
@@ -11,7 +11,7 @@ struct mlxfw_mfa2_tlv {
u8 version;
u8 type;
__be16 len;
- u8 data[0];
+ u8 data[];
} __packed;
static inline const struct mlxfw_mfa2_tlv *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index e9f791c43f20..e9ccd333f61d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -82,7 +82,7 @@ struct mlxsw_core {
struct mlxsw_core_port *ports;
unsigned int max_ports;
bool fw_flash_in_progress;
- unsigned long driver_priv[0];
+ unsigned long driver_priv[];
/* driver_priv has to be always the last item */
};
@@ -142,6 +142,7 @@ struct mlxsw_rx_listener_item {
struct list_head list;
struct mlxsw_rx_listener rxl;
void *priv;
+ bool enabled;
};
struct mlxsw_event_listener_item {
@@ -1197,6 +1198,72 @@ mlxsw_devlink_trap_group_init(struct devlink *devlink,
return mlxsw_driver->trap_group_init(mlxsw_core, group);
}
+static int
+mlxsw_devlink_trap_group_set(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_group_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_group_set(mlxsw_core, group, policer);
+}
+
+static int
+mlxsw_devlink_trap_policer_init(struct devlink *devlink,
+ const struct devlink_trap_policer *policer)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_policer_init)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_policer_init(mlxsw_core, policer);
+}
+
+static void
+mlxsw_devlink_trap_policer_fini(struct devlink *devlink,
+ const struct devlink_trap_policer *policer)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_policer_fini)
+ return;
+ mlxsw_driver->trap_policer_fini(mlxsw_core, policer);
+}
+
+static int
+mlxsw_devlink_trap_policer_set(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_policer_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_policer_set(mlxsw_core, policer, rate, burst,
+ extack);
+}
+
+static int
+mlxsw_devlink_trap_policer_counter_get(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_policer_counter_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_policer_counter_get(mlxsw_core, policer,
+ p_drops);
+}
+
static const struct devlink_ops mlxsw_devlink_ops = {
.reload_down = mlxsw_devlink_core_bus_device_reload_down,
.reload_up = mlxsw_devlink_core_bus_device_reload_up,
@@ -1219,6 +1286,11 @@ static const struct devlink_ops mlxsw_devlink_ops = {
.trap_fini = mlxsw_devlink_trap_fini,
.trap_action_set = mlxsw_devlink_trap_action_set,
.trap_group_init = mlxsw_devlink_trap_group_init,
+ .trap_group_set = mlxsw_devlink_trap_group_set,
+ .trap_policer_init = mlxsw_devlink_trap_policer_init,
+ .trap_policer_fini = mlxsw_devlink_trap_policer_fini,
+ .trap_policer_set = mlxsw_devlink_trap_policer_set,
+ .trap_policer_counter_get = mlxsw_devlink_trap_policer_counter_get,
};
static int
@@ -1457,14 +1529,12 @@ static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
static struct mlxsw_rx_listener_item *
__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_rx_listener *rxl,
- void *priv)
+ const struct mlxsw_rx_listener *rxl)
{
struct mlxsw_rx_listener_item *rxl_item;
list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
- if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
- rxl_item->priv == priv)
+ if (__is_rx_listener_equal(&rxl_item->rxl, rxl))
return rxl_item;
}
return NULL;
@@ -1472,11 +1542,11 @@ __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
const struct mlxsw_rx_listener *rxl,
- void *priv)
+ void *priv, bool enabled)
{
struct mlxsw_rx_listener_item *rxl_item;
- rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
if (rxl_item)
return -EEXIST;
rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
@@ -1484,6 +1554,7 @@ int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
return -ENOMEM;
rxl_item->rxl = *rxl;
rxl_item->priv = priv;
+ rxl_item->enabled = enabled;
list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
return 0;
@@ -1491,12 +1562,11 @@ int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_rx_listener *rxl,
- void *priv)
+ const struct mlxsw_rx_listener *rxl)
{
struct mlxsw_rx_listener_item *rxl_item;
- rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
if (!rxl_item)
return;
list_del_rcu(&rxl_item->list);
@@ -1505,6 +1575,19 @@ void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
+static void
+mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_rx_listener *rxl,
+ bool enabled)
+{
+ struct mlxsw_rx_listener_item *rxl_item;
+
+ rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
+ if (WARN_ON(!rxl_item))
+ return;
+ rxl_item->enabled = enabled;
+}
+
static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
void *priv)
{
@@ -1534,14 +1617,12 @@ static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
static struct mlxsw_event_listener_item *
__find_event_listener_item(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_event_listener *el,
- void *priv)
+ const struct mlxsw_event_listener *el)
{
struct mlxsw_event_listener_item *el_item;
list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
- if (__is_event_listener_equal(&el_item->el, el) &&
- el_item->priv == priv)
+ if (__is_event_listener_equal(&el_item->el, el))
return el_item;
}
return NULL;
@@ -1559,7 +1640,7 @@ int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
.trap_id = el->trap_id,
};
- el_item = __find_event_listener_item(mlxsw_core, el, priv);
+ el_item = __find_event_listener_item(mlxsw_core, el);
if (el_item)
return -EEXIST;
el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
@@ -1568,7 +1649,7 @@ int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
el_item->el = *el;
el_item->priv = priv;
- err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
+ err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item, true);
if (err)
goto err_rx_listener_register;
@@ -1586,8 +1667,7 @@ err_rx_listener_register:
EXPORT_SYMBOL(mlxsw_core_event_listener_register);
void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_event_listener *el,
- void *priv)
+ const struct mlxsw_event_listener *el)
{
struct mlxsw_event_listener_item *el_item;
const struct mlxsw_rx_listener rxl = {
@@ -1596,10 +1676,10 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
.trap_id = el->trap_id,
};
- el_item = __find_event_listener_item(mlxsw_core, el, priv);
+ el_item = __find_event_listener_item(mlxsw_core, el);
if (!el_item)
return;
- mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
+ mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl);
list_del(&el_item->list);
kfree(el_item);
}
@@ -1607,16 +1687,18 @@ EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
- void *priv)
+ void *priv, bool enabled)
{
- if (listener->is_event)
+ if (listener->is_event) {
+ WARN_ON(!enabled);
return mlxsw_core_event_listener_register(mlxsw_core,
- &listener->u.event_listener,
+ &listener->event_listener,
priv);
- else
+ } else {
return mlxsw_core_rx_listener_register(mlxsw_core,
- &listener->u.rx_listener,
- priv);
+ &listener->rx_listener,
+ priv, enabled);
+ }
}
static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
@@ -1625,26 +1707,31 @@ static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
{
if (listener->is_event)
mlxsw_core_event_listener_unregister(mlxsw_core,
- &listener->u.event_listener,
- priv);
+ &listener->event_listener);
else
mlxsw_core_rx_listener_unregister(mlxsw_core,
- &listener->u.rx_listener,
- priv);
+ &listener->rx_listener);
}
int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener, void *priv)
{
+ enum mlxsw_reg_htgt_trap_group trap_group;
+ enum mlxsw_reg_hpkt_action action;
char hpkt_pl[MLXSW_REG_HPKT_LEN];
int err;
- err = mlxsw_core_listener_register(mlxsw_core, listener, priv);
+ err = mlxsw_core_listener_register(mlxsw_core, listener, priv,
+ listener->enabled_on_register);
if (err)
return err;
- mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id,
- listener->trap_group, listener->is_ctrl);
+ action = listener->enabled_on_register ? listener->en_action :
+ listener->dis_action;
+ trap_group = listener->enabled_on_register ? listener->en_trap_group :
+ listener->dis_trap_group;
+ mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
+ trap_group, listener->is_ctrl);
err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
if (err)
goto err_trap_set;
@@ -1664,8 +1751,8 @@ void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
char hpkt_pl[MLXSW_REG_HPKT_LEN];
if (!listener->is_event) {
- mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action,
- listener->trap_id, listener->trap_group,
+ mlxsw_reg_hpkt_pack(hpkt_pl, listener->dis_action,
+ listener->trap_id, listener->dis_trap_group,
listener->is_ctrl);
mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
}
@@ -1674,17 +1761,33 @@ void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_trap_unregister);
-int mlxsw_core_trap_action_set(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_listener *listener,
- enum mlxsw_reg_hpkt_action action)
+int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ bool enabled)
{
+ enum mlxsw_reg_htgt_trap_group trap_group;
+ enum mlxsw_reg_hpkt_action action;
char hpkt_pl[MLXSW_REG_HPKT_LEN];
+ int err;
+
+ /* Not supported for event listener */
+ if (WARN_ON(listener->is_event))
+ return -EINVAL;
+ action = enabled ? listener->en_action : listener->dis_action;
+ trap_group = enabled ? listener->en_trap_group :
+ listener->dis_trap_group;
mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
- listener->trap_group, listener->is_ctrl);
- return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+ trap_group, listener->is_ctrl);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+ if (err)
+ return err;
+
+ mlxsw_core_rx_listener_state_set(mlxsw_core, &listener->rx_listener,
+ enabled);
+ return 0;
}
-EXPORT_SYMBOL(mlxsw_core_trap_action_set);
+EXPORT_SYMBOL(mlxsw_core_trap_state_set);
static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
{
@@ -1942,7 +2045,8 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
rxl->local_port == local_port) &&
rxl->trap_id == rx_info->trap_id) {
- found = true;
+ if (rxl_item->enabled)
+ found = true;
break;
}
}
@@ -2168,13 +2272,22 @@ int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module)
/* Here we need to get the module width according to the module type. */
switch (module_type) {
+ case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_OSFP:
+ return 8;
+ case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X: /* fall through */
case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X: /* fall through */
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_QSFP:
+ case MLXSW_REG_PMTM_MODULE_TYPE_QSFP:
return 4;
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X:
+ case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_DSFP:
return 2;
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_SFP: /* fall through */
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X:
+ case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X: /* fall through */
+ case MLXSW_REG_PMTM_MODULE_TYPE_SFP:
return 1;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 543476a2e503..22b0dfa7cfae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -62,7 +62,6 @@ struct mlxsw_rx_listener {
void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
u8 local_port;
u16 trap_id;
- enum mlxsw_reg_hpkt_action action;
};
struct mlxsw_event_listener {
@@ -76,58 +75,71 @@ struct mlxsw_listener {
union {
struct mlxsw_rx_listener rx_listener;
struct mlxsw_event_listener event_listener;
- } u;
- enum mlxsw_reg_hpkt_action action;
- enum mlxsw_reg_hpkt_action unreg_action;
- u8 trap_group;
- bool is_ctrl; /* should go via control buffer or not */
- bool is_event;
+ };
+ enum mlxsw_reg_hpkt_action en_action; /* Action when enabled */
+ enum mlxsw_reg_hpkt_action dis_action; /* Action when disabled */
+ u8 en_trap_group; /* Trap group when enabled */
+ u8 dis_trap_group; /* Trap group when disabled */
+ u8 is_ctrl:1, /* should go via control buffer or not */
+ is_event:1,
+ enabled_on_register:1; /* Trap should be enabled when listener
+ * is registered.
+ */
};
-#define MLXSW_RXL(_func, _trap_id, _action, _is_ctrl, _trap_group, \
- _unreg_action) \
- { \
- .trap_id = MLXSW_TRAP_ID_##_trap_id, \
- .u.rx_listener = \
- { \
- .func = _func, \
- .local_port = MLXSW_PORT_DONT_CARE, \
- .trap_id = MLXSW_TRAP_ID_##_trap_id, \
- }, \
- .action = MLXSW_REG_HPKT_ACTION_##_action, \
- .unreg_action = MLXSW_REG_HPKT_ACTION_##_unreg_action, \
- .trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_trap_group, \
- .is_ctrl = _is_ctrl, \
- .is_event = false, \
+#define __MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \
+ _dis_action, _enabled_on_register, _dis_trap_group) \
+ { \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ .rx_listener = \
+ { \
+ .func = _func, \
+ .local_port = MLXSW_PORT_DONT_CARE, \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ }, \
+ .en_action = MLXSW_REG_HPKT_ACTION_##_en_action, \
+ .dis_action = MLXSW_REG_HPKT_ACTION_##_dis_action, \
+ .en_trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_en_trap_group, \
+ .dis_trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_dis_trap_group, \
+ .is_ctrl = _is_ctrl, \
+ .enabled_on_register = _enabled_on_register, \
}
-#define MLXSW_EVENTL(_func, _trap_id, _trap_group) \
- { \
- .trap_id = MLXSW_TRAP_ID_##_trap_id, \
- .u.event_listener = \
- { \
- .func = _func, \
- .trap_id = MLXSW_TRAP_ID_##_trap_id, \
- }, \
- .action = MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, \
- .trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_trap_group, \
- .is_ctrl = false, \
- .is_event = true, \
+#define MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _trap_group, \
+ _dis_action) \
+ __MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _trap_group, \
+ _dis_action, true, _trap_group)
+
+#define MLXSW_RXL_DIS(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \
+ _dis_action, _dis_trap_group) \
+ __MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \
+ _dis_action, false, _dis_trap_group)
+
+#define MLXSW_EVENTL(_func, _trap_id, _trap_group) \
+ { \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ .event_listener = \
+ { \
+ .func = _func, \
+ .trap_id = MLXSW_TRAP_ID_##_trap_id, \
+ }, \
+ .en_action = MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, \
+ .en_trap_group = MLXSW_REG_HTGT_TRAP_GROUP_##_trap_group, \
+ .is_event = true, \
+ .enabled_on_register = true, \
}
int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
const struct mlxsw_rx_listener *rxl,
- void *priv);
+ void *priv, bool enabled);
void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_rx_listener *rxl,
- void *priv);
+ const struct mlxsw_rx_listener *rxl);
int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
const struct mlxsw_event_listener *el,
void *priv);
void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_event_listener *el,
- void *priv);
+ const struct mlxsw_event_listener *el);
int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
@@ -135,9 +147,9 @@ int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
void *priv);
-int mlxsw_core_trap_action_set(struct mlxsw_core *mlxsw_core,
- const struct mlxsw_listener *listener,
- enum mlxsw_reg_hpkt_action action);
+int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ bool enabled);
typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload,
size_t payload_len, unsigned long cb_priv);
@@ -315,6 +327,20 @@ struct mlxsw_driver {
enum devlink_trap_action action);
int (*trap_group_init)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group);
+ int (*trap_group_set)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer);
+ int (*trap_policer_init)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer);
+ void (*trap_policer_fini)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer);
+ int (*trap_policer_set)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst,
+ struct netlink_ext_ack *extack);
+ int (*trap_policer_counter_get)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
int (*resources_register)(struct mlxsw_core *mlxsw_core);
@@ -461,7 +487,10 @@ enum mlxsw_devlink_param_id {
};
struct mlxsw_skb_cb {
- struct mlxsw_tx_info tx_info;
+ union {
+ struct mlxsw_tx_info tx_info;
+ u32 cookie_index; /* Only used during receive */
+ };
};
static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index c51b2adfc1e1..70a104e728f6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -7,6 +7,9 @@
#include <linux/errno.h>
#include <linux/rhashtable.h>
#include <linux/list.h>
+#include <linux/idr.h>
+#include <linux/refcount.h>
+#include <net/flow_offload.h>
#include "item.h"
#include "trap.h"
@@ -63,6 +66,8 @@ struct mlxsw_afa {
void *ops_priv;
struct rhashtable set_ht;
struct rhashtable fwd_entry_ht;
+ struct rhashtable cookie_ht;
+ struct idr cookie_idr;
};
#define MLXSW_AFA_SET_LEN 0xA8
@@ -121,6 +126,55 @@ static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
.automatic_shrinking = true,
};
+struct mlxsw_afa_cookie {
+ struct rhash_head ht_node;
+ refcount_t ref_count;
+ struct rcu_head rcu;
+ u32 cookie_index;
+ struct flow_action_cookie fa_cookie;
+};
+
+static u32 mlxsw_afa_cookie_hash(const struct flow_action_cookie *fa_cookie,
+ u32 seed)
+{
+ return jhash2((u32 *) fa_cookie->cookie,
+ fa_cookie->cookie_len / sizeof(u32), seed);
+}
+
+static u32 mlxsw_afa_cookie_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct flow_action_cookie *fa_cookie = data;
+
+ return mlxsw_afa_cookie_hash(fa_cookie, seed);
+}
+
+static u32 mlxsw_afa_cookie_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct mlxsw_afa_cookie *cookie = data;
+
+ return mlxsw_afa_cookie_hash(&cookie->fa_cookie, seed);
+}
+
+static int mlxsw_afa_cookie_obj_cmpfn(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ const struct flow_action_cookie *fa_cookie = arg->key;
+ const struct mlxsw_afa_cookie *cookie = obj;
+
+ if (cookie->fa_cookie.cookie_len == fa_cookie->cookie_len)
+ return memcmp(cookie->fa_cookie.cookie, fa_cookie->cookie,
+ fa_cookie->cookie_len);
+ return 1;
+}
+
+static const struct rhashtable_params mlxsw_afa_cookie_ht_params = {
+ .head_offset = offsetof(struct mlxsw_afa_cookie, ht_node),
+ .hashfn = mlxsw_afa_cookie_key_hashfn,
+ .obj_hashfn = mlxsw_afa_cookie_obj_hashfn,
+ .obj_cmpfn = mlxsw_afa_cookie_obj_cmpfn,
+ .automatic_shrinking = true,
+};
+
struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
const struct mlxsw_afa_ops *ops,
void *ops_priv)
@@ -138,11 +192,18 @@ struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
&mlxsw_afa_fwd_entry_ht_params);
if (err)
goto err_fwd_entry_rhashtable_init;
+ err = rhashtable_init(&mlxsw_afa->cookie_ht,
+ &mlxsw_afa_cookie_ht_params);
+ if (err)
+ goto err_cookie_rhashtable_init;
+ idr_init(&mlxsw_afa->cookie_idr);
mlxsw_afa->max_acts_per_set = max_acts_per_set;
mlxsw_afa->ops = ops;
mlxsw_afa->ops_priv = ops_priv;
return mlxsw_afa;
+err_cookie_rhashtable_init:
+ rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
err_fwd_entry_rhashtable_init:
rhashtable_destroy(&mlxsw_afa->set_ht);
err_set_rhashtable_init:
@@ -153,6 +214,9 @@ EXPORT_SYMBOL(mlxsw_afa_create);
void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa)
{
+ WARN_ON(!idr_is_empty(&mlxsw_afa->cookie_idr));
+ idr_destroy(&mlxsw_afa->cookie_idr);
+ rhashtable_destroy(&mlxsw_afa->cookie_ht);
rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
rhashtable_destroy(&mlxsw_afa->set_ht);
kfree(mlxsw_afa);
@@ -627,6 +691,151 @@ err_counter_index_get:
return ERR_PTR(err);
}
+/* 20 bits is a maximum that hardware can handle in trap with userdef action
+ * and carry along with the trapped packet.
+ */
+#define MLXSW_AFA_COOKIE_INDEX_BITS 20
+#define MLXSW_AFA_COOKIE_INDEX_MAX ((1 << MLXSW_AFA_COOKIE_INDEX_BITS) - 1)
+
+static struct mlxsw_afa_cookie *
+mlxsw_afa_cookie_create(struct mlxsw_afa *mlxsw_afa,
+ const struct flow_action_cookie *fa_cookie)
+{
+ struct mlxsw_afa_cookie *cookie;
+ u32 cookie_index;
+ int err;
+
+ cookie = kzalloc(sizeof(*cookie) + fa_cookie->cookie_len, GFP_KERNEL);
+ if (!cookie)
+ return ERR_PTR(-ENOMEM);
+ refcount_set(&cookie->ref_count, 1);
+ memcpy(&cookie->fa_cookie, fa_cookie,
+ sizeof(*fa_cookie) + fa_cookie->cookie_len);
+
+ err = rhashtable_insert_fast(&mlxsw_afa->cookie_ht, &cookie->ht_node,
+ mlxsw_afa_cookie_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ /* Start cookie indexes with 1. Leave the 0 index unused. Packets
+ * that come from the HW which are not dropped by drop-with-cookie
+ * action are going to pass cookie_index 0 to lookup.
+ */
+ cookie_index = 1;
+ err = idr_alloc_u32(&mlxsw_afa->cookie_idr, cookie, &cookie_index,
+ MLXSW_AFA_COOKIE_INDEX_MAX, GFP_KERNEL);
+ if (err)
+ goto err_idr_alloc;
+ cookie->cookie_index = cookie_index;
+ return cookie;
+
+err_idr_alloc:
+ rhashtable_remove_fast(&mlxsw_afa->cookie_ht, &cookie->ht_node,
+ mlxsw_afa_cookie_ht_params);
+err_rhashtable_insert:
+ kfree(cookie);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_afa_cookie_destroy(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_cookie *cookie)
+{
+ idr_remove(&mlxsw_afa->cookie_idr, cookie->cookie_index);
+ rhashtable_remove_fast(&mlxsw_afa->cookie_ht, &cookie->ht_node,
+ mlxsw_afa_cookie_ht_params);
+ kfree_rcu(cookie, rcu);
+}
+
+static struct mlxsw_afa_cookie *
+mlxsw_afa_cookie_get(struct mlxsw_afa *mlxsw_afa,
+ const struct flow_action_cookie *fa_cookie)
+{
+ struct mlxsw_afa_cookie *cookie;
+
+ cookie = rhashtable_lookup_fast(&mlxsw_afa->cookie_ht, fa_cookie,
+ mlxsw_afa_cookie_ht_params);
+ if (cookie) {
+ refcount_inc(&cookie->ref_count);
+ return cookie;
+ }
+ return mlxsw_afa_cookie_create(mlxsw_afa, fa_cookie);
+}
+
+static void mlxsw_afa_cookie_put(struct mlxsw_afa *mlxsw_afa,
+ struct mlxsw_afa_cookie *cookie)
+{
+ if (!refcount_dec_and_test(&cookie->ref_count))
+ return;
+ mlxsw_afa_cookie_destroy(mlxsw_afa, cookie);
+}
+
+/* RCU read lock must be held */
+const struct flow_action_cookie *
+mlxsw_afa_cookie_lookup(struct mlxsw_afa *mlxsw_afa, u32 cookie_index)
+{
+ struct mlxsw_afa_cookie *cookie;
+
+ /* 0 index means no cookie */
+ if (!cookie_index)
+ return NULL;
+ cookie = idr_find(&mlxsw_afa->cookie_idr, cookie_index);
+ if (!cookie)
+ return NULL;
+ return &cookie->fa_cookie;
+}
+EXPORT_SYMBOL(mlxsw_afa_cookie_lookup);
+
+struct mlxsw_afa_cookie_ref {
+ struct mlxsw_afa_resource resource;
+ struct mlxsw_afa_cookie *cookie;
+};
+
+static void
+mlxsw_afa_cookie_ref_destroy(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_cookie_ref *cookie_ref)
+{
+ mlxsw_afa_resource_del(&cookie_ref->resource);
+ mlxsw_afa_cookie_put(block->afa, cookie_ref->cookie);
+ kfree(cookie_ref);
+}
+
+static void
+mlxsw_afa_cookie_ref_destructor(struct mlxsw_afa_block *block,
+ struct mlxsw_afa_resource *resource)
+{
+ struct mlxsw_afa_cookie_ref *cookie_ref;
+
+ cookie_ref = container_of(resource, struct mlxsw_afa_cookie_ref,
+ resource);
+ mlxsw_afa_cookie_ref_destroy(block, cookie_ref);
+}
+
+static struct mlxsw_afa_cookie_ref *
+mlxsw_afa_cookie_ref_create(struct mlxsw_afa_block *block,
+ const struct flow_action_cookie *fa_cookie)
+{
+ struct mlxsw_afa_cookie_ref *cookie_ref;
+ struct mlxsw_afa_cookie *cookie;
+ int err;
+
+ cookie_ref = kzalloc(sizeof(*cookie_ref), GFP_KERNEL);
+ if (!cookie_ref)
+ return ERR_PTR(-ENOMEM);
+ cookie = mlxsw_afa_cookie_get(block->afa, fa_cookie);
+ if (IS_ERR(cookie)) {
+ err = PTR_ERR(cookie);
+ goto err_cookie_get;
+ }
+ cookie_ref->cookie = cookie;
+ cookie_ref->resource.destructor = mlxsw_afa_cookie_ref_destructor;
+ mlxsw_afa_resource_add(block, &cookie_ref->resource);
+ return cookie_ref;
+
+err_cookie_get:
+ kfree(cookie_ref);
+ return ERR_PTR(err);
+}
+
#define MLXSW_AFA_ONE_ACTION_LEN 32
#define MLXSW_AFA_PAYLOAD_OFFSET 4
@@ -747,97 +956,170 @@ int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
}
EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
-/* Trap / Discard Action
- * ---------------------
- * The Trap / Discard action enables trapping / mirroring packets to the CPU
+/* Trap Action / Trap With Userdef Action
+ * --------------------------------------
+ * The Trap action enables trapping / mirroring packets to the CPU
* as well as discarding packets.
* The ACL Trap / Discard separates the forward/discard control from CPU
* trap control. In addition, the Trap / Discard action enables activating
* SPAN (port mirroring).
+ *
+ * The Trap with userdef action action has the same functionality as
+ * the Trap action with addition of user defined value that can be set
+ * and used by higher layer applications.
*/
-#define MLXSW_AFA_TRAPDISC_CODE 0x03
-#define MLXSW_AFA_TRAPDISC_SIZE 1
+#define MLXSW_AFA_TRAP_CODE 0x03
+#define MLXSW_AFA_TRAP_SIZE 1
-enum mlxsw_afa_trapdisc_trap_action {
- MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP = 0,
- MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP = 2,
+#define MLXSW_AFA_TRAPWU_CODE 0x04
+#define MLXSW_AFA_TRAPWU_SIZE 2
+
+enum mlxsw_afa_trap_trap_action {
+ MLXSW_AFA_TRAP_TRAP_ACTION_NOP = 0,
+ MLXSW_AFA_TRAP_TRAP_ACTION_TRAP = 2,
};
-/* afa_trapdisc_trap_action
+/* afa_trap_trap_action
* Trap Action.
*/
-MLXSW_ITEM32(afa, trapdisc, trap_action, 0x00, 24, 4);
+MLXSW_ITEM32(afa, trap, trap_action, 0x00, 24, 4);
-enum mlxsw_afa_trapdisc_forward_action {
- MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD = 1,
- MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3,
+enum mlxsw_afa_trap_forward_action {
+ MLXSW_AFA_TRAP_FORWARD_ACTION_FORWARD = 1,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_DISCARD = 3,
};
-/* afa_trapdisc_forward_action
+/* afa_trap_forward_action
* Forward Action.
*/
-MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4);
+MLXSW_ITEM32(afa, trap, forward_action, 0x00, 0, 4);
-/* afa_trapdisc_trap_id
+/* afa_trap_trap_id
* Trap ID to configure.
*/
-MLXSW_ITEM32(afa, trapdisc, trap_id, 0x04, 0, 9);
+MLXSW_ITEM32(afa, trap, trap_id, 0x04, 0, 9);
-/* afa_trapdisc_mirror_agent
+/* afa_trap_mirror_agent
* Mirror agent.
*/
-MLXSW_ITEM32(afa, trapdisc, mirror_agent, 0x08, 29, 3);
+MLXSW_ITEM32(afa, trap, mirror_agent, 0x08, 29, 3);
-/* afa_trapdisc_mirror_enable
+/* afa_trap_mirror_enable
* Mirror enable.
*/
-MLXSW_ITEM32(afa, trapdisc, mirror_enable, 0x08, 24, 1);
+MLXSW_ITEM32(afa, trap, mirror_enable, 0x08, 24, 1);
+
+/* user_def_val
+ * Value for the SW usage. Can be used to pass information of which
+ * rule has caused a trap. This may be overwritten by later traps.
+ * This field does a set on the packet's user_def_val only if this
+ * is the first trap_id or if the trap_id has replaced the previous
+ * packet's trap_id.
+ */
+MLXSW_ITEM32(afa, trap, user_def_val, 0x0C, 0, 20);
static inline void
-mlxsw_afa_trapdisc_pack(char *payload,
- enum mlxsw_afa_trapdisc_trap_action trap_action,
- enum mlxsw_afa_trapdisc_forward_action forward_action,
- u16 trap_id)
+mlxsw_afa_trap_pack(char *payload,
+ enum mlxsw_afa_trap_trap_action trap_action,
+ enum mlxsw_afa_trap_forward_action forward_action,
+ u16 trap_id)
{
- mlxsw_afa_trapdisc_trap_action_set(payload, trap_action);
- mlxsw_afa_trapdisc_forward_action_set(payload, forward_action);
- mlxsw_afa_trapdisc_trap_id_set(payload, trap_id);
+ mlxsw_afa_trap_trap_action_set(payload, trap_action);
+ mlxsw_afa_trap_forward_action_set(payload, forward_action);
+ mlxsw_afa_trap_trap_id_set(payload, trap_id);
}
static inline void
-mlxsw_afa_trapdisc_mirror_pack(char *payload, bool mirror_enable,
- u8 mirror_agent)
+mlxsw_afa_trapwu_pack(char *payload,
+ enum mlxsw_afa_trap_trap_action trap_action,
+ enum mlxsw_afa_trap_forward_action forward_action,
+ u16 trap_id, u32 user_def_val)
{
- mlxsw_afa_trapdisc_mirror_enable_set(payload, mirror_enable);
- mlxsw_afa_trapdisc_mirror_agent_set(payload, mirror_agent);
+ mlxsw_afa_trap_pack(payload, trap_action, forward_action, trap_id);
+ mlxsw_afa_trap_user_def_val_set(payload, user_def_val);
}
-int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
+static inline void
+mlxsw_afa_trap_mirror_pack(char *payload, bool mirror_enable,
+ u8 mirror_agent)
{
- char *act = mlxsw_afa_block_append_action(block,
- MLXSW_AFA_TRAPDISC_CODE,
- MLXSW_AFA_TRAPDISC_SIZE);
+ mlxsw_afa_trap_mirror_enable_set(payload, mirror_enable);
+ mlxsw_afa_trap_mirror_agent_set(payload, mirror_agent);
+}
+
+static int mlxsw_afa_block_append_drop_plain(struct mlxsw_afa_block *block,
+ bool ingress)
+{
+ char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAP_CODE,
+ MLXSW_AFA_TRAP_SIZE);
if (IS_ERR(act))
return PTR_ERR(act);
- mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
- MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 0);
+ mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_TRAP,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_DISCARD,
+ ingress ? MLXSW_TRAP_ID_DISCARD_INGRESS_ACL :
+ MLXSW_TRAP_ID_DISCARD_EGRESS_ACL);
return 0;
}
+
+static int
+mlxsw_afa_block_append_drop_with_cookie(struct mlxsw_afa_block *block,
+ bool ingress,
+ const struct flow_action_cookie *fa_cookie,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_afa_cookie_ref *cookie_ref;
+ u32 cookie_index;
+ char *act;
+ int err;
+
+ cookie_ref = mlxsw_afa_cookie_ref_create(block, fa_cookie);
+ if (IS_ERR(cookie_ref)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create cookie for drop action");
+ return PTR_ERR(cookie_ref);
+ }
+ cookie_index = cookie_ref->cookie->cookie_index;
+
+ act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAPWU_CODE,
+ MLXSW_AFA_TRAPWU_SIZE);
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append drop with cookie action");
+ err = PTR_ERR(act);
+ goto err_append_action;
+ }
+ mlxsw_afa_trapwu_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_TRAP,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_DISCARD,
+ ingress ? MLXSW_TRAP_ID_DISCARD_INGRESS_ACL :
+ MLXSW_TRAP_ID_DISCARD_EGRESS_ACL,
+ cookie_index);
+ return 0;
+
+err_append_action:
+ mlxsw_afa_cookie_ref_destroy(block, cookie_ref);
+ return err;
+}
+
+int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block, bool ingress,
+ const struct flow_action_cookie *fa_cookie,
+ struct netlink_ext_ack *extack)
+{
+ return fa_cookie ?
+ mlxsw_afa_block_append_drop_with_cookie(block, ingress,
+ fa_cookie, extack) :
+ mlxsw_afa_block_append_drop_plain(block, ingress);
+}
EXPORT_SYMBOL(mlxsw_afa_block_append_drop);
int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
{
- char *act = mlxsw_afa_block_append_action(block,
- MLXSW_AFA_TRAPDISC_CODE,
- MLXSW_AFA_TRAPDISC_SIZE);
+ char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAP_CODE,
+ MLXSW_AFA_TRAP_SIZE);
if (IS_ERR(act))
return PTR_ERR(act);
- mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
- MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD,
- trap_id);
+ mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_TRAP,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_DISCARD, trap_id);
return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_trap);
@@ -845,15 +1127,13 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_trap);
int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
u16 trap_id)
{
- char *act = mlxsw_afa_block_append_action(block,
- MLXSW_AFA_TRAPDISC_CODE,
- MLXSW_AFA_TRAPDISC_SIZE);
+ char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAP_CODE,
+ MLXSW_AFA_TRAP_SIZE);
if (IS_ERR(act))
return PTR_ERR(act);
- mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
- MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD,
- trap_id);
+ mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_TRAP,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_FORWARD, trap_id);
return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
@@ -920,13 +1200,13 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
u8 mirror_agent)
{
char *act = mlxsw_afa_block_append_action(block,
- MLXSW_AFA_TRAPDISC_CODE,
- MLXSW_AFA_TRAPDISC_SIZE);
+ MLXSW_AFA_TRAP_CODE,
+ MLXSW_AFA_TRAP_SIZE);
if (IS_ERR(act))
return PTR_ERR(act);
- mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
- MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0);
- mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent);
+ mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_NOP,
+ MLXSW_AFA_TRAP_FORWARD_ACTION_FORWARD, 0);
+ mlxsw_afa_trap_mirror_pack(act, true, mirror_agent);
return 0;
}
@@ -958,6 +1238,179 @@ err_append_allocated_mirror:
}
EXPORT_SYMBOL(mlxsw_afa_block_append_mirror);
+/* QoS Action
+ * ----------
+ * The QOS_ACTION is used for manipulating the QoS attributes of a packet. It
+ * can be used to change the DCSP, ECN, Color and Switch Priority of the packet.
+ * Note that PCP field can be changed using the VLAN action.
+ */
+
+#define MLXSW_AFA_QOS_CODE 0x06
+#define MLXSW_AFA_QOS_SIZE 1
+
+enum mlxsw_afa_qos_ecn_cmd {
+ /* Do nothing */
+ MLXSW_AFA_QOS_ECN_CMD_NOP,
+ /* Set ECN to afa_qos_ecn */
+ MLXSW_AFA_QOS_ECN_CMD_SET,
+};
+
+/* afa_qos_ecn_cmd
+ */
+MLXSW_ITEM32(afa, qos, ecn_cmd, 0x04, 29, 3);
+
+/* afa_qos_ecn
+ * ECN value.
+ */
+MLXSW_ITEM32(afa, qos, ecn, 0x04, 24, 2);
+
+enum mlxsw_afa_qos_dscp_cmd {
+ /* Do nothing */
+ MLXSW_AFA_QOS_DSCP_CMD_NOP,
+ /* Set DSCP 3 LSB bits according to dscp[2:0] */
+ MLXSW_AFA_QOS_DSCP_CMD_SET_3LSB,
+ /* Set DSCP 3 MSB bits according to dscp[5:3] */
+ MLXSW_AFA_QOS_DSCP_CMD_SET_3MSB,
+ /* Set DSCP 6 bits according to dscp[5:0] */
+ MLXSW_AFA_QOS_DSCP_CMD_SET_ALL,
+};
+
+/* afa_qos_dscp_cmd
+ * DSCP command.
+ */
+MLXSW_ITEM32(afa, qos, dscp_cmd, 0x04, 14, 2);
+
+/* afa_qos_dscp
+ * DSCP value.
+ */
+MLXSW_ITEM32(afa, qos, dscp, 0x04, 0, 6);
+
+enum mlxsw_afa_qos_switch_prio_cmd {
+ /* Do nothing */
+ MLXSW_AFA_QOS_SWITCH_PRIO_CMD_NOP,
+ /* Set Switch Priority to afa_qos_switch_prio */
+ MLXSW_AFA_QOS_SWITCH_PRIO_CMD_SET,
+};
+
+/* afa_qos_switch_prio_cmd
+ */
+MLXSW_ITEM32(afa, qos, switch_prio_cmd, 0x08, 14, 2);
+
+/* afa_qos_switch_prio
+ * Switch Priority.
+ */
+MLXSW_ITEM32(afa, qos, switch_prio, 0x08, 0, 4);
+
+enum mlxsw_afa_qos_dscp_rw {
+ MLXSW_AFA_QOS_DSCP_RW_PRESERVE,
+ MLXSW_AFA_QOS_DSCP_RW_SET,
+ MLXSW_AFA_QOS_DSCP_RW_CLEAR,
+};
+
+/* afa_qos_dscp_rw
+ * DSCP Re-write Enable. Controlling the rewrite_enable for DSCP.
+ */
+MLXSW_ITEM32(afa, qos, dscp_rw, 0x0C, 30, 2);
+
+static inline void
+mlxsw_afa_qos_ecn_pack(char *payload,
+ enum mlxsw_afa_qos_ecn_cmd ecn_cmd, u8 ecn)
+{
+ mlxsw_afa_qos_ecn_cmd_set(payload, ecn_cmd);
+ mlxsw_afa_qos_ecn_set(payload, ecn);
+}
+
+static inline void
+mlxsw_afa_qos_dscp_pack(char *payload,
+ enum mlxsw_afa_qos_dscp_cmd dscp_cmd, u8 dscp)
+{
+ mlxsw_afa_qos_dscp_cmd_set(payload, dscp_cmd);
+ mlxsw_afa_qos_dscp_set(payload, dscp);
+}
+
+static inline void
+mlxsw_afa_qos_switch_prio_pack(char *payload,
+ enum mlxsw_afa_qos_switch_prio_cmd prio_cmd,
+ u8 prio)
+{
+ mlxsw_afa_qos_switch_prio_cmd_set(payload, prio_cmd);
+ mlxsw_afa_qos_switch_prio_set(payload, prio);
+}
+
+static int __mlxsw_afa_block_append_qos_dsfield(struct mlxsw_afa_block *block,
+ bool set_dscp, u8 dscp,
+ bool set_ecn, u8 ecn,
+ struct netlink_ext_ack *extack)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_QOS_CODE,
+ MLXSW_AFA_QOS_SIZE);
+
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append QOS action");
+ return PTR_ERR(act);
+ }
+
+ if (set_ecn)
+ mlxsw_afa_qos_ecn_pack(act, MLXSW_AFA_QOS_ECN_CMD_SET, ecn);
+ if (set_dscp) {
+ mlxsw_afa_qos_dscp_pack(act, MLXSW_AFA_QOS_DSCP_CMD_SET_ALL,
+ dscp);
+ mlxsw_afa_qos_dscp_rw_set(act, MLXSW_AFA_QOS_DSCP_RW_CLEAR);
+ }
+
+ return 0;
+}
+
+int mlxsw_afa_block_append_qos_dsfield(struct mlxsw_afa_block *block,
+ u8 dsfield,
+ struct netlink_ext_ack *extack)
+{
+ return __mlxsw_afa_block_append_qos_dsfield(block,
+ true, dsfield >> 2,
+ true, dsfield & 0x03,
+ extack);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_qos_dsfield);
+
+int mlxsw_afa_block_append_qos_dscp(struct mlxsw_afa_block *block,
+ u8 dscp, struct netlink_ext_ack *extack)
+{
+ return __mlxsw_afa_block_append_qos_dsfield(block,
+ true, dscp,
+ false, 0,
+ extack);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_qos_dscp);
+
+int mlxsw_afa_block_append_qos_ecn(struct mlxsw_afa_block *block,
+ u8 ecn, struct netlink_ext_ack *extack)
+{
+ return __mlxsw_afa_block_append_qos_dsfield(block,
+ false, 0,
+ true, ecn,
+ extack);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_qos_ecn);
+
+int mlxsw_afa_block_append_qos_switch_prio(struct mlxsw_afa_block *block,
+ u8 prio,
+ struct netlink_ext_ack *extack)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_QOS_CODE,
+ MLXSW_AFA_QOS_SIZE);
+
+ if (IS_ERR(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append QOS action");
+ return PTR_ERR(act);
+ }
+ mlxsw_afa_qos_switch_prio_pack(act, MLXSW_AFA_QOS_SWITCH_PRIO_CMD_SET,
+ prio);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_qos_switch_prio);
+
/* Forwarding Action
* -----------------
* Forwarding Action can be used to implement Policy Based Switching (PBS)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 0e3a59dda12e..8c2705e16ef7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <net/flow_offload.h>
struct mlxsw_afa;
struct mlxsw_afa_block;
@@ -42,7 +43,11 @@ int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity);
int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
-int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
+const struct flow_action_cookie *
+mlxsw_afa_cookie_lookup(struct mlxsw_afa *mlxsw_afa, u32 cookie_index);
+int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block, bool ingress,
+ const struct flow_action_cookie *fa_cookie,
+ struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
u16 trap_id);
@@ -57,6 +62,16 @@ int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
u16 vid, u8 pcp, u8 et,
struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_qos_switch_prio(struct mlxsw_afa_block *block,
+ u8 prio,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_qos_dsfield(struct mlxsw_afa_block *block,
+ u8 dsfield,
+ struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_qos_dscp(struct mlxsw_afa_block *block,
+ u8 dscp, struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_qos_ecn(struct mlxsw_afa_block *block,
+ u8 ecn, struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
u32 counter_index);
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index feb4672a5ac0..9f6905fa6b47 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -9,6 +9,41 @@
#include "item.h"
#include "core_acl_flex_keys.h"
+/* For the purpose of the driver, define an internal storage scratchpad
+ * that will be used to store key/mask values. For each defined element type
+ * define an internal storage geometry.
+ *
+ * When adding new elements, MLXSW_AFK_ELEMENT_STORAGE_SIZE must be increased
+ * accordingly.
+ */
+static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
+ MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_32_47, 0x04, 2),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_0_31, 0x06, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_32_47, 0x0A, 2),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_0_31, 0x0C, 4),
+ MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
+ MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
+ MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
+ MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9),
+ MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16),
+ MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16),
+ MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
+ MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_8_10, 0x18, 17, 3),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_0_7, 0x18, 20, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_0_31, 0x2C, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_96_127, 0x30, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_64_95, 0x34, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4),
+};
+
struct mlxsw_afk {
struct list_head key_info_list;
unsigned int max_blocks;
@@ -26,13 +61,15 @@ static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk)
const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i];
for (j = 0; j < block->instances_count; j++) {
+ const struct mlxsw_afk_element_info *elinfo;
struct mlxsw_afk_element_inst *elinst;
elinst = &block->instances[j];
- if (elinst->type != elinst->info->type ||
+ elinfo = &mlxsw_afk_element_infos[elinst->element];
+ if (elinst->type != elinfo->type ||
(!elinst->avoid_size_check &&
elinst->item.size.bits !=
- elinst->info->item.size.bits))
+ elinfo->item.size.bits))
return false;
}
}
@@ -72,7 +109,7 @@ struct mlxsw_afk_key_info {
* is index inside "blocks"
*/
struct mlxsw_afk_element_usage elusage;
- const struct mlxsw_afk_block *blocks[0];
+ const struct mlxsw_afk_block *blocks[];
};
static bool
@@ -116,7 +153,7 @@ static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_element_inst *elinst;
elinst = &block->instances[j];
- if (elinst->info->element == element) {
+ if (elinst->element == element) {
__set_bit(element, picker->hits[i].element);
picker->hits[i].total++;
}
@@ -301,7 +338,7 @@ mlxsw_afk_block_elinst_get(const struct mlxsw_afk_block *block,
struct mlxsw_afk_element_inst *elinst;
elinst = &block->instances[i];
- if (elinst->info->element == element)
+ if (elinst->element == element)
return elinst;
}
return NULL;
@@ -409,9 +446,12 @@ static void
mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
char *output, char *storage, int u32_diff)
{
- const struct mlxsw_item *storage_item = &elinst->info->item;
const struct mlxsw_item *output_item = &elinst->item;
+ const struct mlxsw_afk_element_info *elinfo;
+ const struct mlxsw_item *storage_item;
+ elinfo = &mlxsw_afk_element_infos[elinst->element];
+ storage_item = &elinfo->item;
if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32)
mlxsw_sp_afk_encode_u32(storage_item, output_item,
storage, output, u32_diff);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index cb229b55ecc4..a47a17c04c62 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -69,42 +69,10 @@ struct mlxsw_afk_element_info {
MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF, \
_element, _offset, 0, _size)
-/* For the purpose of the driver, define an internal storage scratchpad
- * that will be used to store key/mask values. For each defined element type
- * define an internal storage geometry.
- */
-static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
- MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16),
- MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_32_47, 0x04, 2),
- MLXSW_AFK_ELEMENT_INFO_BUF(DMAC_0_31, 0x06, 4),
- MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_32_47, 0x0A, 2),
- MLXSW_AFK_ELEMENT_INFO_BUF(SMAC_0_31, 0x0C, 4),
- MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
- MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
- MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
- MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
- MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9),
- MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16),
- MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16),
- MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
- MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
- MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
- MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_8_10, 0x18, 17, 3),
- MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_0_7, 0x18, 20, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_0_31, 0x2C, 4),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_96_127, 0x30, 4),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_64_95, 0x34, 4),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4),
-};
-
#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
struct mlxsw_afk_element_inst { /* element instance in actual block */
- const struct mlxsw_afk_element_info *info;
+ enum mlxsw_afk_element element;
enum mlxsw_afk_element_type type;
struct mlxsw_item item; /* element geometry in block */
int u32_key_diff; /* in case value needs to be adjusted before write
@@ -116,7 +84,7 @@ struct mlxsw_afk_element_inst { /* element instance in actual block */
#define MLXSW_AFK_ELEMENT_INST(_type, _element, _offset, \
_shift, _size, _u32_key_diff, _avoid_size_check) \
{ \
- .info = &mlxsw_afk_element_infos[MLXSW_AFK_ELEMENT_##_element], \
+ .element = MLXSW_AFK_ELEMENT_##_element, \
.type = _type, \
.item = { \
.offset = _offset, \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 34566eb62c47..939b692ffc33 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -53,6 +53,7 @@
/**
* struct mlxsw_i2c - device private data:
+ * @cmd: command attributes;
* @cmd.mb_size_in: input mailbox size;
* @cmd.mb_off_in: input mailbox offset in register space;
* @cmd.mb_size_out: output mailbox size;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index e9ded1a6e131..fd0e97de44e7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -575,6 +575,15 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
+ if (rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_INGRESS_ACL ||
+ rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_EGRESS_ACL) {
+ u32 cookie_index = 0;
+
+ if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2)
+ cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe);
+ mlxsw_skb_cb(skb)->cookie_index = cookie_index;
+ }
+
byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
byte_count -= ETH_FCS_LEN;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 43fa8c85b5d9..32c7cabfb261 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -25,8 +25,6 @@
#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT 24
#define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000
-#define MLXSW_PCI_SW_RESET 0xF0010
-#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 200
#define MLXSW_PCI_FW_READY 0xA1844
@@ -210,6 +208,11 @@ MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5);
MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6);
mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12);
+/* pci_cqe_user_def_val_orig_pkt_len
+ * When trap_id is an ACL: User defined value from policy engine action.
+ */
+MLXSW_ITEM32(pci, cqe2, user_def_val_orig_pkt_len, 0x14, 0, 20);
+
/* pci_cqe_owner
* Ownership bit.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index e05d1d1be2fd..9b39b8e70519 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -621,7 +621,7 @@ static inline void mlxsw_reg_sfn_pack(char *payload)
{
MLXSW_REG_ZERO(sfn, payload);
mlxsw_reg_sfn_swid_set(payload, 0);
- mlxsw_reg_sfn_end_set(payload, 1);
+ mlxsw_reg_sfn_end_set(payload, 0);
mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT);
}
@@ -3296,6 +3296,12 @@ MLXSW_ITEM32(reg, qpcr, g, 0x00, 14, 2);
*/
MLXSW_ITEM32(reg, qpcr, pid, 0x00, 0, 14);
+/* reg_qpcr_clear_counter
+ * Clear counters.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, qpcr, clear_counter, 0x04, 31, 1);
+
/* reg_qpcr_color_aware
* Is the policer aware of colors.
* Must be 0 (unaware) for cpu port.
@@ -3393,6 +3399,17 @@ enum mlxsw_reg_qpcr_action {
*/
MLXSW_ITEM32(reg, qpcr, violate_action, 0x18, 0, 4);
+/* reg_qpcr_violate_count
+ * Counts the number of times violate_action happened on this PID.
+ * Access: RW
+ */
+MLXSW_ITEM64(reg, qpcr, violate_count, 0x20, 0, 64);
+
+#define MLXSW_REG_QPCR_LOWEST_CIR 1
+#define MLXSW_REG_QPCR_HIGHEST_CIR (2 * 1000 * 1000 * 1000) /* 2Gpps */
+#define MLXSW_REG_QPCR_LOWEST_CBS 4
+#define MLXSW_REG_QPCR_HIGHEST_CBS 24
+
static inline void mlxsw_reg_qpcr_pack(char *payload, u16 pid,
enum mlxsw_reg_qpcr_ir_units ir_units,
bool bytes, u32 cir, u16 cbs)
@@ -5440,15 +5457,29 @@ enum mlxsw_reg_pmtm_module_type {
/* Backplane with 4 lanes */
MLXSW_REG_PMTM_MODULE_TYPE_BP_4X,
/* QSFP */
- MLXSW_REG_PMTM_MODULE_TYPE_BP_QSFP,
+ MLXSW_REG_PMTM_MODULE_TYPE_QSFP,
/* SFP */
- MLXSW_REG_PMTM_MODULE_TYPE_BP_SFP,
+ MLXSW_REG_PMTM_MODULE_TYPE_SFP,
/* Backplane with single lane */
MLXSW_REG_PMTM_MODULE_TYPE_BP_1X = 4,
/* Backplane with two lane */
MLXSW_REG_PMTM_MODULE_TYPE_BP_2X = 8,
- /* Chip2Chip */
- MLXSW_REG_PMTM_MODULE_TYPE_C2C = 10,
+ /* Chip2Chip4x */
+ MLXSW_REG_PMTM_MODULE_TYPE_C2C4X = 10,
+ /* Chip2Chip2x */
+ MLXSW_REG_PMTM_MODULE_TYPE_C2C2X,
+ /* Chip2Chip1x */
+ MLXSW_REG_PMTM_MODULE_TYPE_C2C1X,
+ /* QSFP-DD */
+ MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD = 14,
+ /* OSFP */
+ MLXSW_REG_PMTM_MODULE_TYPE_OSFP,
+ /* SFP-DD */
+ MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD,
+ /* DSFP */
+ MLXSW_REG_PMTM_MODULE_TYPE_DSFP,
+ /* Chip2Chip8x */
+ MLXSW_REG_PMTM_MODULE_TYPE_C2C8X,
};
/* reg_pmtm_module_type
@@ -5506,12 +5537,10 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM,
MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP,
- MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
- MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF,
MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND,
@@ -5526,9 +5555,11 @@ enum mlxsw_reg_htgt_trap_group {
enum mlxsw_reg_htgt_discard_trap_group {
MLXSW_REG_HTGT_DISCARD_TRAP_GROUP_BASE = MLXSW_REG_HTGT_TRAP_GROUP_MAX,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_DUMMY,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS,
};
/* reg_htgt_trap_group
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 6534184cb942..d62496ef299c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -18,6 +18,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_CQE_V1,
MLXSW_RES_ID_CQE_V2,
MLXSW_RES_ID_COUNTER_POOL_SIZE,
+ MLXSW_RES_ID_COUNTER_BANK_SIZE,
MLXSW_RES_ID_MAX_SPAN,
MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC,
@@ -75,6 +76,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_CQE_V1] = 0x2211,
[MLXSW_RES_ID_CQE_V2] = 0x2212,
[MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410,
+ [MLXSW_RES_ID_COUNTER_BANK_SIZE] = 0x2411,
[MLXSW_RES_ID_MAX_SPAN] = 0x2420,
[MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
[MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC] = 0x2449,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 7358b5bc7eb6..24ca8d5bc564 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -43,6 +43,7 @@
#include "spectrum_acl_flex_actions.h"
#include "spectrum_span.h"
#include "spectrum_ptp.h"
+#include "spectrum_trap.h"
#include "../mlxfw/mlxfw.h"
#define MLXSW_SP1_FWREV_MAJOR 13
@@ -347,19 +348,6 @@ static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
}
-static void mlxsw_sp_status_notify(struct mlxfw_dev *mlxfw_dev,
- const char *msg, const char *comp_name,
- u32 done_bytes, u32 total_bytes)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
-
- devlink_flash_update_status_notify(priv_to_devlink(mlxsw_sp->core),
- msg, comp_name,
- done_bytes, total_bytes);
-}
-
static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
.component_query = mlxsw_sp_component_query,
.fsm_lock = mlxsw_sp_fsm_lock,
@@ -370,7 +358,6 @@ static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
.fsm_query_state = mlxsw_sp_fsm_query_state,
.fsm_cancel = mlxsw_sp_fsm_cancel,
.fsm_release = mlxsw_sp_fsm_release,
- .status_notify = mlxsw_sp_status_notify,
};
static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
@@ -382,16 +369,15 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
.ops = &mlxsw_sp_mlxfw_dev_ops,
.psid = mlxsw_sp->bus_info->psid,
.psid_size = strlen(mlxsw_sp->bus_info->psid),
+ .devlink = priv_to_devlink(mlxsw_sp->core),
},
.mlxsw_sp = mlxsw_sp
};
int err;
mlxsw_core_fw_flash_start(mlxsw_sp->core);
- devlink_flash_update_begin_notify(priv_to_devlink(mlxsw_sp->core));
err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev,
firmware, extack);
- devlink_flash_update_end_notify(priv_to_devlink(mlxsw_sp->core));
mlxsw_core_fw_flash_end(mlxsw_sp->core);
return err;
@@ -1798,6 +1784,8 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
case TC_SETUP_QDISC_TBF:
return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_FIFO:
+ return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
default:
return -EOPNOTSUPP;
}
@@ -2243,6 +2231,15 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = {
#define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \
ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats)
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_ext_stats[] = {
+ {
+ .str = "ecn_marked",
+ .getter = mlxsw_reg_ppcnt_ecn_marked_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_EXT_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_ext_stats)
+
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = {
{
.str = "discard_ingress_general",
@@ -2352,6 +2349,7 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \
+ MLXSW_SP_PORT_HW_EXT_STATS_LEN + \
MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \
(MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
IEEE_8021QAZ_MAX_TCS) + \
@@ -2413,6 +2411,12 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < MLXSW_SP_PORT_HW_EXT_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_ext_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) {
memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str,
ETH_GSTRING_LEN);
@@ -2474,6 +2478,10 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
*p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats;
*p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
break;
+ case MLXSW_REG_PPCNT_EXT_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_ext_stats;
+ *p_len = MLXSW_SP_PORT_HW_EXT_STATS_LEN;
+ break;
case MLXSW_REG_PPCNT_DISCARD_CNT:
*p_hw_stats = mlxsw_sp_port_hw_discard_stats;
*p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
@@ -2543,6 +2551,11 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
data, data_index);
data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
+ /* Extended Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_EXT_STATS_LEN;
+
/* Discard Counters */
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0,
data, data_index);
@@ -2788,27 +2801,6 @@ static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width,
return ptys_proto;
}
-static u32
-mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed)
-{
- u32 ptys_proto = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
- if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed)
- ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
- }
- return ptys_proto;
-}
-
-static int
-mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- u32 *base_speed)
-{
- *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G;
- return 0;
-}
-
static void
mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
u8 local_port, u32 proto_admin, bool autoneg)
@@ -2833,8 +2825,6 @@ mlxsw_sp1_port_type_speed_ops = {
.from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex,
.to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link,
.to_ptys_speed = mlxsw_sp1_to_ptys_speed,
- .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed,
- .port_speed_base = mlxsw_sp1_port_speed_base,
.reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack,
.reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack,
};
@@ -3235,51 +3225,6 @@ static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp,
return ptys_proto;
}
-static u32
-mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed)
-{
- u32 ptys_proto = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
- if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed)
- ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
- }
- return ptys_proto;
-}
-
-static int
-mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- u32 *base_speed)
-{
- char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 eth_proto_cap;
- int err;
-
- /* In Spectrum-2, the speed of 1x can change from port to port, so query
- * it from firmware.
- */
- mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
- if (err)
- return err;
- mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
-
- if (eth_proto_cap &
- MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) {
- *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G;
- return 0;
- }
-
- if (eth_proto_cap &
- MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) {
- *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G;
- return 0;
- }
-
- return -EIO;
-}
-
static void
mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
u8 local_port, u32 proto_admin,
@@ -3305,8 +3250,6 @@ mlxsw_sp2_port_type_speed_ops = {
.from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex,
.to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link,
.to_ptys_speed = mlxsw_sp2_to_ptys_speed,
- .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed,
- .port_speed_base = mlxsw_sp2_port_speed_base,
.reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack,
.reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack,
};
@@ -3520,24 +3463,24 @@ static int
mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
const struct mlxsw_sp_port_type_speed_ops *ops;
char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 eth_proto_admin;
- u32 upper_speed;
- u32 base_speed;
int err;
ops = mlxsw_sp->port_type_speed_ops;
- err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port,
- &base_speed);
+ /* Set advertised speeds to supported speeds. */
+ ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
+ 0, false);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
if (err)
return err;
- upper_speed = base_speed * mlxsw_sp_port->mapping.width;
- eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed);
+ ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
+ &eth_proto_admin, &eth_proto_oper);
ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
- eth_proto_admin, mlxsw_sp_port->link.autoneg);
+ eth_proto_cap, mlxsw_sp_port->link.autoneg);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}
@@ -4614,6 +4557,7 @@ static const struct mlxsw_listener mlxsw_sp1_listener[] = {
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
char qpcr_pl[MLXSW_REG_QPCR_LEN];
enum mlxsw_reg_qpcr_ir_units ir_units;
int max_cpu_policers;
@@ -4636,7 +4580,6 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
rate = 128;
burst_size = 7;
@@ -4649,7 +4592,6 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
@@ -4677,6 +4619,7 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
continue;
}
+ __set_bit(i, mlxsw_sp->trap->policers_usage);
mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
burst_size);
err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
@@ -4729,19 +4672,20 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1:
priority = 2;
tc = 2;
break;
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
- case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
priority = 1;
tc = 1;
break;
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
+ priority = 0;
+ tc = 1;
+ break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
tc = MLXSW_REG_HTGT_DEFAULT_TC;
@@ -4805,20 +4749,32 @@ static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_trap *trap;
+ u64 max_policers;
int err;
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
+ return -EIO;
+ max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
+ trap = kzalloc(struct_size(trap, policers_usage,
+ BITS_TO_LONGS(max_policers)), GFP_KERNEL);
+ if (!trap)
+ return -ENOMEM;
+ trap->max_policers = max_policers;
+ mlxsw_sp->trap = trap;
+
err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
if (err)
- return err;
+ goto err_cpu_policers_set;
err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
if (err)
- return err;
+ goto err_trap_groups_set;
err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
ARRAY_SIZE(mlxsw_sp_listener));
if (err)
- return err;
+ goto err_traps_register;
err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
mlxsw_sp->listeners_count);
@@ -4830,6 +4786,10 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
err_extra_traps_init:
mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
ARRAY_SIZE(mlxsw_sp_listener));
+err_traps_register:
+err_trap_groups_set:
+err_cpu_policers_set:
+ kfree(trap);
return err;
}
@@ -4839,6 +4799,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp->listeners_count);
mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
ARRAY_SIZE(mlxsw_sp_listener));
+ kfree(mlxsw_sp->trap);
}
#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
@@ -4935,16 +4896,35 @@ static const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
};
#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
+#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
+
+static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor)
+{
+ return 3 * mtu + buffer_factor * speed / 1000;
+}
static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed)
{
- return 3 * mtu + MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR * speed / 1000;
+ int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
+
+ return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
}
static const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
.buffsize_get = mlxsw_sp2_span_buffsize_get,
};
+static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed)
+{
+ int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
+
+ return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
+}
+
+static const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = {
+ .buffsize_get = mlxsw_sp3_span_buffsize_get,
+};
+
u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
{
u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu);
@@ -5223,7 +5203,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
- mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -5460,8 +5440,13 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
goto err_resources_span_register;
+ err = mlxsw_sp_counter_resources_register(mlxsw_core);
+ if (err)
+ goto err_resources_counter_register;
+
return 0;
+err_resources_counter_register:
err_resources_span_register:
devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
return err;
@@ -5479,8 +5464,13 @@ static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
goto err_resources_span_register;
+ err = mlxsw_sp_counter_resources_register(mlxsw_core);
+ if (err)
+ goto err_resources_counter_register;
+
return 0;
+err_resources_counter_register:
err_resources_span_register:
devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
return err;
@@ -5684,6 +5674,11 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set,
.trap_group_init = mlxsw_sp_trap_group_init,
+ .trap_group_set = mlxsw_sp_trap_group_set,
+ .trap_policer_init = mlxsw_sp_trap_policer_init,
+ .trap_policer_fini = mlxsw_sp_trap_policer_fini,
+ .trap_policer_set = mlxsw_sp_trap_policer_set,
+ .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
@@ -5718,6 +5713,11 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set,
.trap_group_init = mlxsw_sp_trap_group_init,
+ .trap_group_set = mlxsw_sp_trap_group_set,
+ .trap_policer_init = mlxsw_sp_trap_policer_init,
+ .trap_policer_fini = mlxsw_sp_trap_policer_fini,
+ .trap_policer_set = mlxsw_sp_trap_policer_set,
+ .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.params_register = mlxsw_sp2_params_register,
@@ -5751,6 +5751,11 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set,
.trap_group_init = mlxsw_sp_trap_group_init,
+ .trap_group_set = mlxsw_sp_trap_group_set,
+ .trap_policer_init = mlxsw_sp_trap_policer_init,
+ .trap_policer_fini = mlxsw_sp_trap_policer_fini,
+ .trap_policer_set = mlxsw_sp_trap_policer_set,
+ .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.params_register = mlxsw_sp2_params_register,
@@ -6316,7 +6321,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
return -EINVAL;
}
if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) {
+ !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
return -EOPNOTSUPP;
}
@@ -6472,7 +6477,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
return -EINVAL;
}
if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
+ !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
return -EOPNOTSUPP;
}
@@ -6549,7 +6554,7 @@ static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
if (!info->linking)
break;
if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
+ !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
return -EOPNOTSUPP;
}
@@ -6609,7 +6614,7 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
if (!info->linking)
break;
if (netif_is_macvlan(upper_dev) &&
- !mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) {
+ !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a0f1f9dceec5..ca56e72cb4b7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -19,6 +19,7 @@
#include <net/pkt_cls.h>
#include <net/red.h>
#include <net/vxlan.h>
+#include <net/flow_offload.h>
#include "port.h"
#include "core.h"
@@ -32,9 +33,6 @@
#define MLXSW_SP_MID_MAX 7000
-#define MLXSW_SP_PORT_BASE_SPEED_25G 25000 /* Mb/s */
-#define MLXSW_SP_PORT_BASE_SPEED_50G 50000 /* Mb/s */
-
#define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */
#define MLXSW_SP_KVD_GRANULARITY 128
@@ -48,6 +46,10 @@
#define MLXSW_SP_RESOURCE_NAME_SPAN "span_agents"
+#define MLXSW_SP_RESOURCE_NAME_COUNTERS "counters"
+#define MLXSW_SP_RESOURCE_NAME_COUNTERS_FLOW "flow"
+#define MLXSW_SP_RESOURCE_NAME_COUNTERS_RIF "rif"
+
enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_KVD = 1,
MLXSW_SP_RESOURCE_KVD_LINEAR,
@@ -57,6 +59,9 @@ enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
MLXSW_SP_RESOURCE_SPAN,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ MLXSW_SP_RESOURCE_COUNTERS_FLOW,
+ MLXSW_SP_RESOURCE_COUNTERS_RIF,
};
struct mlxsw_sp_port;
@@ -141,6 +146,7 @@ struct mlxsw_sp_port_type_speed_ops;
struct mlxsw_sp_ptp_state;
struct mlxsw_sp_ptp_ops;
struct mlxsw_sp_span_ops;
+struct mlxsw_sp_qdisc_state;
struct mlxsw_sp_port_mapping {
u8 module;
@@ -168,12 +174,9 @@ struct mlxsw_sp {
struct notifier_block netdevice_nb;
struct mlxsw_sp_ptp_clock *clock;
struct mlxsw_sp_ptp_state *ptp_state;
-
struct mlxsw_sp_counter_pool *counter_pool;
- struct {
- struct mlxsw_sp_span_entry *entries;
- int entries_count;
- } span;
+ struct mlxsw_sp_span *span;
+ struct mlxsw_sp_trap *trap;
const struct mlxsw_fw_rev *req_rev;
const char *fw_filename;
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
@@ -282,8 +285,7 @@ struct mlxsw_sp_port {
struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
struct mlxsw_sp_port_vlan *default_vlan;
- struct mlxsw_sp_qdisc *root_qdisc;
- struct mlxsw_sp_qdisc *tclass_qdiscs;
+ struct mlxsw_sp_qdisc_state *qdisc;
unsigned acl_rule_count;
struct mlxsw_sp_acl_block *ing_acl_block;
struct mlxsw_sp_acl_block *eg_acl_block;
@@ -313,9 +315,6 @@ struct mlxsw_sp_port_type_speed_ops {
u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd);
u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u8 width, u32 speed);
- u32 (*to_ptys_upper_speed)(struct mlxsw_sp *mlxsw_sp, u32 upper_speed);
- int (*port_speed_base)(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- u32 *base_speed);
void (*reg_ptys_eth_pack)(struct mlxsw_sp *mlxsw_sp, char *payload,
u8 local_port, u32 proto_admin, bool autoneg);
void (*reg_ptys_eth_unpack)(struct mlxsw_sp *mlxsw_sp, char *payload,
@@ -468,10 +467,6 @@ int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack);
void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
const struct net_device *vxlan_dev);
-struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
- u16 vid,
- struct netlink_ext_ack *extack);
extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
@@ -556,7 +551,7 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
struct netdev_notifier_changeupper_info *info);
bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
-bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
+bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *l3_dev,
@@ -571,10 +566,10 @@ void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev);
-struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev);
+bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
+u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev);
u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
-struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
enum mlxsw_sp_l3proto ul_proto,
const union mlxsw_sp_l3addr *ul_sip,
@@ -653,7 +648,9 @@ struct mlxsw_sp_acl_rule_info {
struct mlxsw_afk_element_values values;
struct mlxsw_afa_block *act_block;
u8 action_created:1,
- egress_bind_blocker:1;
+ ingress_bind_blocker:1,
+ egress_bind_blocker:1,
+ counter_valid:1;
unsigned int counter_index;
};
@@ -672,16 +669,20 @@ struct mlxsw_sp_acl_block {
struct mlxsw_sp *mlxsw_sp;
unsigned int rule_count;
unsigned int disable_count;
+ unsigned int ingress_blocker_rule_count;
unsigned int egress_blocker_rule_count;
+ unsigned int ingress_binding_count;
+ unsigned int egress_binding_count;
struct net *net;
};
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
-unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
+unsigned int
+mlxsw_sp_acl_block_rule_count(const struct mlxsw_sp_acl_block *block);
void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block);
void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block);
-bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block);
+bool mlxsw_sp_acl_block_disabled(const struct mlxsw_sp_acl_block *block);
struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
struct net *net);
void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
@@ -694,7 +695,9 @@ int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress);
-bool mlxsw_sp_acl_block_is_egress_bound(struct mlxsw_sp_acl_block *block);
+bool mlxsw_sp_acl_block_is_egress_bound(const struct mlxsw_sp_acl_block *block);
+bool mlxsw_sp_acl_block_is_ingress_bound(const struct mlxsw_sp_acl_block *block);
+bool mlxsw_sp_acl_block_is_mixed_bound(const struct mlxsw_sp_acl_block *block);
struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block, u32 chain_index,
@@ -726,7 +729,10 @@ int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
u16 group_id);
int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei);
-int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
+ bool ingress,
+ const struct flow_action_cookie *fa_cookie,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
@@ -741,6 +747,14 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u32 action, u16 vid, u16 proto, u8 prio,
struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 prio, struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ enum flow_action_mangle_base htype,
+ u32 offset, u32 mask, u32 val,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct netlink_ext_ack *extack);
@@ -773,10 +787,17 @@ struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule,
- u64 *packets, u64 *bytes, u64 *last_use);
+ u64 *packets, u64 *bytes, u64 *last_use,
+ enum flow_action_hw_stats *used_hw_stats);
struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp);
+static inline const struct flow_action_cookie *
+mlxsw_sp_acl_act_cookie_lookup(struct mlxsw_sp *mlxsw_sp, u32 cookie_index)
+{
+ return mlxsw_afa_cookie_lookup(mlxsw_sp->afa, cookie_index);
+}
+
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp);
@@ -864,6 +885,8 @@ int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_ets_qopt_offload *p);
int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_tbf_qopt_offload *p);
+int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_fifo_qopt_offload *p);
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
@@ -974,9 +997,6 @@ void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid,
enum mlxsw_sp_l3proto proto,
union mlxsw_sp_l3addr *addr);
-u32 mlxsw_sp_nve_decap_tunnel_index_get(const struct mlxsw_sp *mlxsw_sp);
-bool mlxsw_sp_nve_ipv4_route_is_decap(const struct mlxsw_sp *mlxsw_sp,
- u32 tb_id, __be32 addr);
int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
struct mlxsw_sp_nve_params *params,
struct netlink_ext_ack *extack);
@@ -1003,6 +1023,22 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
enum devlink_trap_action action);
int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group);
+int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer);
+int
+mlxsw_sp_trap_policer_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer);
+void mlxsw_sp_trap_policer_fini(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer);
+int
+mlxsw_sp_trap_policer_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst, struct netlink_ext_ack *extack);
+int
+mlxsw_sp_trap_policer_counter_get(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops);
static inline struct net *mlxsw_sp_net(struct mlxsw_sp *mlxsw_sp)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
index 09ee0a807747..a9fff8adc75e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
@@ -60,7 +60,7 @@ static const struct mlxsw_sp1_kvdl_part_info mlxsw_sp1_kvdl_parts_info[] = {
struct mlxsw_sp1_kvdl_part {
struct mlxsw_sp1_kvdl_part_info info;
- unsigned long usage[0]; /* Entries */
+ unsigned long usage[]; /* Entries */
};
struct mlxsw_sp1_kvdl {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
index 8d14770766b4..3a73d654017f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -45,7 +45,7 @@ struct mlxsw_sp2_kvdl_part {
unsigned int usage_bit_count;
unsigned int indexes_per_usage_bit;
unsigned int last_allocated_bit;
- unsigned long usage[0]; /* Usage bits */
+ unsigned long usage[]; /* Usage bits */
};
struct mlxsw_sp2_kvdl {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 3d3cca596116..67ee880a8727 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -58,7 +58,7 @@ struct mlxsw_sp_acl_ruleset {
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
unsigned int ref_count;
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
@@ -71,7 +71,7 @@ struct mlxsw_sp_acl_rule {
u64 last_used;
u64 last_packets;
u64 last_bytes;
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
@@ -99,7 +99,8 @@ struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
return block->mlxsw_sp;
}
-unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block)
+unsigned int
+mlxsw_sp_acl_block_rule_count(const struct mlxsw_sp_acl_block *block)
{
return block ? block->rule_count : 0;
}
@@ -116,20 +117,24 @@ void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
block->disable_count--;
}
-bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
+bool mlxsw_sp_acl_block_disabled(const struct mlxsw_sp_acl_block *block)
{
return block->disable_count;
}
-bool mlxsw_sp_acl_block_is_egress_bound(struct mlxsw_sp_acl_block *block)
+bool mlxsw_sp_acl_block_is_egress_bound(const struct mlxsw_sp_acl_block *block)
{
- struct mlxsw_sp_acl_block_binding *binding;
+ return block->egress_binding_count;
+}
- list_for_each_entry(binding, &block->binding_list, list) {
- if (!binding->ingress)
- return true;
- }
- return false;
+bool mlxsw_sp_acl_block_is_ingress_bound(const struct mlxsw_sp_acl_block *block)
+{
+ return block->ingress_binding_count;
+}
+
+bool mlxsw_sp_acl_block_is_mixed_bound(const struct mlxsw_sp_acl_block *block)
+{
+ return block->ingress_binding_count && block->egress_binding_count;
}
static bool
@@ -163,7 +168,8 @@ mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
binding->mlxsw_sp_port, binding->ingress);
}
-static bool mlxsw_sp_acl_ruleset_block_bound(struct mlxsw_sp_acl_block *block)
+static bool
+mlxsw_sp_acl_ruleset_block_bound(const struct mlxsw_sp_acl_block *block)
{
return block->ruleset_zero;
}
@@ -250,6 +256,11 @@ int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
return -EEXIST;
+ if (ingress && block->ingress_blocker_rule_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to ingress because it contains unsupported rules");
+ return -EOPNOTSUPP;
+ }
+
if (!ingress && block->egress_blocker_rule_count) {
NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
return -EOPNOTSUPP;
@@ -267,6 +278,10 @@ int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
goto err_ruleset_bind;
}
+ if (ingress)
+ block->ingress_binding_count++;
+ else
+ block->egress_binding_count++;
list_add(&binding->list, &block->binding_list);
return 0;
@@ -288,6 +303,11 @@ int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
list_del(&binding->list);
+ if (ingress)
+ block->ingress_binding_count--;
+ else
+ block->egress_binding_count--;
+
if (mlxsw_sp_acl_ruleset_block_bound(block))
mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
@@ -515,9 +535,13 @@ int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
return mlxsw_afa_block_terminate(rulei->act_block);
}
-int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
+int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei,
+ bool ingress,
+ const struct flow_action_cookie *fa_cookie,
+ struct netlink_ext_ack *extack)
{
- return mlxsw_afa_block_append_drop(rulei->act_block);
+ return mlxsw_afa_block_append_drop(rulei->act_block, ingress,
+ fa_cookie, extack);
}
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
@@ -614,12 +638,126 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
}
}
+int mlxsw_sp_acl_rulei_act_priority(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ u32 prio, struct netlink_ext_ack *extack)
+{
+ /* Even though both Linux and Spectrum switches support 16 priorities,
+ * spectrum_qdisc only processes the first eight priomap elements, and
+ * the DCB and PFC features are tied to 8 priorities as well. Therefore
+ * bounce attempts to prioritize packets to higher priorities.
+ */
+ if (prio >= IEEE_8021QAZ_MAX_TCS) {
+ NL_SET_ERR_MSG_MOD(extack, "Only priorities 0..7 are supported");
+ return -EINVAL;
+ }
+ return mlxsw_afa_block_append_qos_switch_prio(rulei->act_block, prio,
+ extack);
+}
+
+enum mlxsw_sp_acl_mangle_field {
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP,
+ MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN,
+};
+
+struct mlxsw_sp_acl_mangle_action {
+ enum flow_action_mangle_base htype;
+ /* Offset is u32-aligned. */
+ u32 offset;
+ /* Mask bits are unset for the modified field. */
+ u32 mask;
+ /* Shift required to extract the set value. */
+ u32 shift;
+ enum mlxsw_sp_acl_mangle_field field;
+};
+
+#define MLXSW_SP_ACL_MANGLE_ACTION(_htype, _offset, _mask, _shift, _field) \
+ { \
+ .htype = _htype, \
+ .offset = _offset, \
+ .mask = _mask, \
+ .shift = _shift, \
+ .field = MLXSW_SP_ACL_MANGLE_FIELD_##_field, \
+ }
+
+#define MLXSW_SP_ACL_MANGLE_ACTION_IP4(_offset, _mask, _shift, _field) \
+ MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP4, \
+ _offset, _mask, _shift, _field)
+
+#define MLXSW_SP_ACL_MANGLE_ACTION_IP6(_offset, _mask, _shift, _field) \
+ MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP6, \
+ _offset, _mask, _shift, _field)
+
+static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = {
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff00ffff, 16, IP_DSFIELD),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff03ffff, 18, IP_DSCP),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xfffcffff, 16, IP_ECN),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf00fffff, 20, IP_DSFIELD),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf03fffff, 22, IP_DSCP),
+ MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xffcfffff, 20, IP_ECN),
+};
+
+static int
+mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_acl_mangle_action *mact,
+ u32 val, struct netlink_ext_ack *extack)
+{
+ switch (mact->field) {
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD:
+ return mlxsw_afa_block_append_qos_dsfield(rulei->act_block,
+ val, extack);
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP:
+ return mlxsw_afa_block_append_qos_dscp(rulei->act_block,
+ val, extack);
+ case MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN:
+ return mlxsw_afa_block_append_qos_ecn(rulei->act_block,
+ val, extack);
+ }
+
+ /* We shouldn't have gotten a match in the first place! */
+ WARN_ONCE(1, "Unhandled mangle field");
+ return -EINVAL;
+}
+
+int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ enum flow_action_mangle_base htype,
+ u32 offset, u32 mask, u32 val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_acl_mangle_action *mact;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_acl_mangle_actions); ++i) {
+ mact = &mlxsw_sp_acl_mangle_actions[i];
+ if (mact->htype == htype &&
+ mact->offset == offset &&
+ mact->mask == mask) {
+ val >>= mact->shift;
+ return mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp,
+ rulei, mact,
+ val, extack);
+ }
+ }
+
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
+ return -EINVAL;
+}
+
int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct netlink_ext_ack *extack)
{
- return mlxsw_afa_block_append_counter(rulei->act_block,
- &rulei->counter_index, extack);
+ int err;
+
+ err = mlxsw_afa_block_append_counter(rulei->act_block,
+ &rulei->counter_index, extack);
+ if (err)
+ return err;
+ rulei->counter_valid = true;
+ return 0;
}
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
@@ -707,6 +845,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
mutex_unlock(&mlxsw_sp->acl->rules_lock);
block->rule_count++;
+ block->ingress_blocker_rule_count += rule->rulei->ingress_bind_blocker;
block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
return 0;
@@ -726,6 +865,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
+ block->ingress_blocker_rule_count -= rule->rulei->ingress_bind_blocker;
ruleset->ht_key.block->rule_count--;
mutex_lock(&mlxsw_sp->acl->rules_lock);
list_del(&rule->list);
@@ -827,20 +967,24 @@ static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule,
- u64 *packets, u64 *bytes, u64 *last_use)
+ u64 *packets, u64 *bytes, u64 *last_use,
+ enum flow_action_hw_stats *used_hw_stats)
{
struct mlxsw_sp_acl_rule_info *rulei;
- u64 current_packets;
- u64 current_bytes;
+ u64 current_packets = 0;
+ u64 current_bytes = 0;
int err;
rulei = mlxsw_sp_acl_rule_rulei(rule);
- err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
- &current_packets, &current_bytes);
- if (err)
- return err;
-
+ if (rulei->counter_valid) {
+ err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
+ &current_packets,
+ &current_bytes);
+ if (err)
+ return err;
+ *used_hw_stats = FLOW_ACTION_HW_STATS_IMMEDIATE;
+ }
*packets = current_packets - rule->last_packets;
*bytes = current_bytes - rule->last_bytes;
*last_use = rule->last_used;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index 3a2de13fcb68..dbd3bebf11ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -13,7 +13,7 @@
struct mlxsw_sp_acl_bf {
struct mutex lock; /* Protects Bloom Filter updates. */
unsigned int bank_size;
- refcount_t refcnt[0];
+ refcount_t refcnt[];
};
/* Bloom filter uses a crc-16 hash over chunks of data which contain 4 key
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index e993159e8e4c..430da69003d8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -224,7 +224,7 @@ struct mlxsw_sp_acl_tcam_vchunk;
struct mlxsw_sp_acl_tcam_chunk {
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
struct mlxsw_sp_acl_tcam_region *region;
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
@@ -243,7 +243,7 @@ struct mlxsw_sp_acl_tcam_vchunk {
struct mlxsw_sp_acl_tcam_entry {
struct mlxsw_sp_acl_tcam_ventry *ventry;
struct mlxsw_sp_acl_tcam_chunk *chunk;
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 5965913565a5..96437992b102 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -20,7 +20,7 @@ struct mlxsw_sp_acl_tcam {
struct mutex lock; /* guards vregion list */
struct list_head vregion_list;
u32 vregion_rehash_intrvl; /* ms */
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
@@ -86,7 +86,7 @@ struct mlxsw_sp_acl_tcam_region {
char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
struct mlxsw_afk_key_info *key_info;
struct mlxsw_sp *mlxsw_sp;
- unsigned long priv[0];
+ unsigned long priv[];
/* priv has to be always the last item */
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index 83c2e1e5f216..7974982533b5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -3,92 +3,147 @@
#include <linux/kernel.h>
#include <linux/bitops.h>
+#include <linux/spinlock.h>
#include "spectrum_cnt.h"
-#define MLXSW_SP_COUNTER_POOL_BANK_SIZE 4096
-
struct mlxsw_sp_counter_sub_pool {
+ u64 size;
unsigned int base_index;
- unsigned int size;
+ enum mlxsw_res_id entry_size_res_id;
+ const char *resource_name; /* devlink resource name */
+ u64 resource_id; /* devlink resource id */
unsigned int entry_size;
unsigned int bank_count;
+ atomic_t active_entries_count;
};
struct mlxsw_sp_counter_pool {
- unsigned int pool_size;
+ u64 pool_size;
unsigned long *usage; /* Usage bitmap */
- struct mlxsw_sp_counter_sub_pool *sub_pools;
+ spinlock_t counter_pool_lock; /* Protects counter pool allocations */
+ atomic_t active_entries_count;
+ unsigned int sub_pools_count;
+ struct mlxsw_sp_counter_sub_pool sub_pools[];
};
-static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
+static const struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
[MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
+ .entry_size_res_id = MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
+ .resource_name = MLXSW_SP_RESOURCE_NAME_COUNTERS_FLOW,
+ .resource_id = MLXSW_SP_RESOURCE_COUNTERS_FLOW,
.bank_count = 6,
},
[MLXSW_SP_COUNTER_SUB_POOL_RIF] = {
+ .entry_size_res_id = MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC,
+ .resource_name = MLXSW_SP_RESOURCE_NAME_COUNTERS_RIF,
+ .resource_id = MLXSW_SP_RESOURCE_COUNTERS_RIF,
.bank_count = 2,
}
};
-static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
+static u64 mlxsw_sp_counter_sub_pool_occ_get(void *priv)
+{
+ const struct mlxsw_sp_counter_sub_pool *sub_pool = priv;
+
+ return atomic_read(&sub_pool->active_entries_count);
+}
+
+static int mlxsw_sp_counter_sub_pools_init(struct mlxsw_sp *mlxsw_sp)
{
- unsigned int total_bank_config = 0;
- unsigned int pool_size;
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_counter_sub_pool *sub_pool;
+ unsigned int base_index = 0;
+ enum mlxsw_res_id res_id;
+ int err;
int i;
- pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
- /* Check config is valid, no bank over subscription */
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++)
- total_bank_config += mlxsw_sp_counter_sub_pools[i].bank_count;
- if (total_bank_config > pool_size / MLXSW_SP_COUNTER_POOL_BANK_SIZE + 1)
- return -EINVAL;
+ for (i = 0; i < pool->sub_pools_count; i++) {
+ sub_pool = &pool->sub_pools[i];
+ res_id = sub_pool->entry_size_res_id;
+
+ if (!mlxsw_core_res_valid(mlxsw_sp->core, res_id))
+ return -EIO;
+ sub_pool->entry_size = mlxsw_core_res_get(mlxsw_sp->core,
+ res_id);
+ err = devlink_resource_size_get(devlink,
+ sub_pool->resource_id,
+ &sub_pool->size);
+ if (err)
+ goto err_resource_size_get;
+
+ devlink_resource_occ_get_register(devlink,
+ sub_pool->resource_id,
+ mlxsw_sp_counter_sub_pool_occ_get,
+ sub_pool);
+
+ sub_pool->base_index = base_index;
+ base_index += sub_pool->size;
+ atomic_set(&sub_pool->active_entries_count, 0);
+ }
return 0;
+
+err_resource_size_get:
+ for (i--; i >= 0; i--) {
+ sub_pool = &pool->sub_pools[i];
+
+ devlink_resource_occ_get_unregister(devlink,
+ sub_pool->resource_id);
+ }
+ return err;
}
-static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_counter_sub_pools_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_counter_sub_pool *sub_pool;
+ int i;
- /* Prepare generic flow pool*/
- sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW];
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_PACKETS_BYTES))
- return -EIO;
- sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- COUNTER_SIZE_PACKETS_BYTES);
- /* Prepare erif pool*/
- sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_RIF];
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_ROUTER_BASIC))
- return -EIO;
- sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- COUNTER_SIZE_ROUTER_BASIC);
- return 0;
+ for (i = 0; i < pool->sub_pools_count; i++) {
+ sub_pool = &pool->sub_pools[i];
+
+ WARN_ON(atomic_read(&sub_pool->active_entries_count));
+ devlink_resource_occ_get_unregister(devlink,
+ sub_pool->resource_id);
+ }
+}
+
+static u64 mlxsw_sp_counter_pool_occ_get(void *priv)
+{
+ const struct mlxsw_sp_counter_pool *pool = priv;
+
+ return atomic_read(&pool->active_entries_count);
}
int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
{
+ unsigned int sub_pools_count = ARRAY_SIZE(mlxsw_sp_counter_sub_pools);
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_counter_sub_pool *sub_pool;
struct mlxsw_sp_counter_pool *pool;
- unsigned int base_index;
unsigned int map_size;
- int i;
int err;
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_POOL_SIZE))
- return -EIO;
-
- err = mlxsw_sp_counter_pool_validate(mlxsw_sp);
- if (err)
- return err;
-
- err = mlxsw_sp_counter_sub_pools_prepare(mlxsw_sp);
- if (err)
- return err;
-
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ pool = kzalloc(struct_size(pool, sub_pools, sub_pools_count),
+ GFP_KERNEL);
if (!pool)
return -ENOMEM;
+ mlxsw_sp->counter_pool = pool;
+ memcpy(pool->sub_pools, mlxsw_sp_counter_sub_pools,
+ sub_pools_count * sizeof(*sub_pool));
+ pool->sub_pools_count = sub_pools_count;
+ spin_lock_init(&pool->counter_pool_lock);
+ atomic_set(&pool->active_entries_count, 0);
+
+ err = devlink_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+ &pool->pool_size);
+ if (err)
+ goto err_pool_resource_size_get;
+ devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+ mlxsw_sp_counter_pool_occ_get, pool);
- pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
pool->usage = kzalloc(map_size, GFP_KERNEL);
@@ -97,26 +152,18 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
goto err_usage_alloc;
}
- pool->sub_pools = mlxsw_sp_counter_sub_pools;
- /* Allocation is based on bank count which should be
- * specified for each sub pool statically.
- */
- base_index = 0;
- for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
- sub_pool = &pool->sub_pools[i];
- sub_pool->size = sub_pool->bank_count *
- MLXSW_SP_COUNTER_POOL_BANK_SIZE;
- sub_pool->base_index = base_index;
- base_index += sub_pool->size;
- /* The last bank can't be fully used */
- if (sub_pool->base_index + sub_pool->size > pool->pool_size)
- sub_pool->size = pool->pool_size - sub_pool->base_index;
- }
+ err = mlxsw_sp_counter_sub_pools_init(mlxsw_sp);
+ if (err)
+ goto err_sub_pools_init;
- mlxsw_sp->counter_pool = pool;
return 0;
+err_sub_pools_init:
+ kfree(pool->usage);
err_usage_alloc:
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_COUNTERS);
+err_pool_resource_size_get:
kfree(pool);
return err;
}
@@ -124,10 +171,15 @@ err_usage_alloc:
void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ mlxsw_sp_counter_sub_pools_fini(mlxsw_sp);
WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
pool->pool_size);
+ WARN_ON(atomic_read(&pool->active_entries_count));
kfree(pool->usage);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_COUNTERS);
kfree(pool);
}
@@ -139,25 +191,37 @@ int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_counter_sub_pool *sub_pool;
unsigned int entry_index;
unsigned int stop_index;
- int i;
+ int i, err;
- sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+ sub_pool = &pool->sub_pools[sub_pool_id];
stop_index = sub_pool->base_index + sub_pool->size;
entry_index = sub_pool->base_index;
+ spin_lock(&pool->counter_pool_lock);
entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
- if (entry_index == stop_index)
- return -ENOBUFS;
+ if (entry_index == stop_index) {
+ err = -ENOBUFS;
+ goto err_alloc;
+ }
/* The sub-pools can contain non-integer number of entries
* so we must check for overflow
*/
- if (entry_index + sub_pool->entry_size > stop_index)
- return -ENOBUFS;
+ if (entry_index + sub_pool->entry_size > stop_index) {
+ err = -ENOBUFS;
+ goto err_alloc;
+ }
for (i = 0; i < sub_pool->entry_size; i++)
__set_bit(entry_index + i, pool->usage);
+ spin_unlock(&pool->counter_pool_lock);
*p_counter_index = entry_index;
+ atomic_add(sub_pool->entry_size, &sub_pool->active_entries_count);
+ atomic_add(sub_pool->entry_size, &pool->active_entries_count);
return 0;
+
+err_alloc:
+ spin_unlock(&pool->counter_pool_lock);
+ return err;
}
void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
@@ -170,7 +234,77 @@ void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(counter_index >= pool->pool_size))
return;
- sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
+ sub_pool = &pool->sub_pools[sub_pool_id];
+ spin_lock(&pool->counter_pool_lock);
for (i = 0; i < sub_pool->entry_size; i++)
__clear_bit(counter_index + i, pool->usage);
+ spin_unlock(&pool->counter_pool_lock);
+ atomic_sub(sub_pool->entry_size, &sub_pool->active_entries_count);
+ atomic_sub(sub_pool->entry_size, &pool->active_entries_count);
+}
+
+int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ static struct devlink_resource_size_params size_params;
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ const struct mlxsw_sp_counter_sub_pool *sub_pool;
+ unsigned int total_bank_config;
+ u64 sub_pool_size;
+ u64 base_index;
+ u64 pool_size;
+ u64 bank_size;
+ int err;
+ int i;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, COUNTER_POOL_SIZE) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, COUNTER_BANK_SIZE))
+ return -EIO;
+
+ pool_size = MLXSW_CORE_RES_GET(mlxsw_core, COUNTER_POOL_SIZE);
+ bank_size = MLXSW_CORE_RES_GET(mlxsw_core, COUNTER_BANK_SIZE);
+
+ devlink_resource_size_params_init(&size_params, pool_size,
+ pool_size, bank_size,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink,
+ MLXSW_SP_RESOURCE_NAME_COUNTERS,
+ pool_size,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ return err;
+
+ /* Allocation is based on bank count which should be
+ * specified for each sub pool statically.
+ */
+ total_bank_config = 0;
+ base_index = 0;
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
+ sub_pool = &mlxsw_sp_counter_sub_pools[i];
+ sub_pool_size = sub_pool->bank_count * bank_size;
+ /* The last bank can't be fully used */
+ if (base_index + sub_pool_size > pool_size)
+ sub_pool_size = pool_size - base_index;
+ base_index += sub_pool_size;
+
+ devlink_resource_size_params_init(&size_params, sub_pool_size,
+ sub_pool_size, bank_size,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink,
+ sub_pool->resource_name,
+ sub_pool_size,
+ sub_pool->resource_id,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ &size_params);
+ if (err)
+ return err;
+ total_bank_config += sub_pool->bank_count;
+ }
+
+ /* Check config is valid, no bank over subscription */
+ if (WARN_ON(total_bank_config > div64_u64(pool_size, bank_size) + 1))
+ return -EINVAL;
+
+ return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
index 81465e267b10..a68d931090dd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
@@ -4,6 +4,7 @@
#ifndef _MLXSW_SPECTRUM_CNT_H
#define _MLXSW_SPECTRUM_CNT_H
+#include "core.h"
#include "spectrum.h"
enum mlxsw_sp_counter_sub_pool_id {
@@ -19,5 +20,6 @@ void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 2dc0978428e6..daf029931b5f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
+#include <linux/mutex.h>
#include <net/devlink.h>
#include "spectrum.h"
@@ -210,7 +211,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled,
return err;
rif_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
i = 0;
start_again:
err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
@@ -241,14 +242,14 @@ start_again:
devlink_dpipe_entry_ctx_close(dump_ctx);
if (i != rif_count)
goto start_again;
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
devlink_dpipe_entry_clear(&entry);
return 0;
err_entry_append:
err_entry_get:
err_ctx_prepare:
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
devlink_dpipe_entry_clear(&entry);
return err;
}
@@ -258,7 +259,7 @@ static int mlxsw_sp_dpipe_table_erif_counters_update(void *priv, bool enable)
struct mlxsw_sp *mlxsw_sp = priv;
int i;
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
@@ -271,7 +272,7 @@ static int mlxsw_sp_dpipe_table_erif_counters_update(void *priv, bool enable)
mlxsw_sp_rif_counter_free(mlxsw_sp, rif,
MLXSW_SP_RIF_COUNTER_EGRESS);
}
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
return 0;
}
@@ -546,7 +547,7 @@ mlxsw_sp_dpipe_table_host_entries_get(struct mlxsw_sp *mlxsw_sp,
int i, j;
int err;
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
i = 0;
rif_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
start_again:
@@ -602,12 +603,12 @@ out:
if (i != rif_count)
goto start_again;
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
return 0;
err_ctx_prepare:
err_entry_append:
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
return err;
}
@@ -662,7 +663,7 @@ mlxsw_sp_dpipe_table_host_counters_update(struct mlxsw_sp *mlxsw_sp,
{
int i;
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
struct mlxsw_sp_neigh_entry *neigh_entry;
@@ -684,7 +685,7 @@ mlxsw_sp_dpipe_table_host_counters_update(struct mlxsw_sp *mlxsw_sp,
enable);
}
}
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
}
static int mlxsw_sp_dpipe_table_host4_counters_update(void *priv, bool enable)
@@ -701,7 +702,7 @@ mlxsw_sp_dpipe_table_host_size_get(struct mlxsw_sp *mlxsw_sp, int type)
u64 size = 0;
int i;
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
struct mlxsw_sp_neigh_entry *neigh_entry;
@@ -721,7 +722,7 @@ mlxsw_sp_dpipe_table_host_size_get(struct mlxsw_sp *mlxsw_sp, int type)
size++;
}
}
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
return size;
}
@@ -1093,7 +1094,7 @@ mlxsw_sp_dpipe_table_adj_entries_get(struct mlxsw_sp *mlxsw_sp,
int j;
int err;
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
nh_count_max = mlxsw_sp_dpipe_table_adj_size(mlxsw_sp);
start_again:
err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
@@ -1130,13 +1131,13 @@ skip:
devlink_dpipe_entry_ctx_close(dump_ctx);
if (nh_count != nh_count_max)
goto start_again;
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
return 0;
err_ctx_prepare:
err_entry_append:
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
return err;
}
@@ -1206,9 +1207,9 @@ mlxsw_sp_dpipe_table_adj_size_get(void *priv)
struct mlxsw_sp *mlxsw_sp = priv;
u64 size;
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
size = mlxsw_sp_dpipe_table_adj_size(mlxsw_sp);
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
return size;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 8df3cb21baa6..004c42274e48 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -8,6 +8,7 @@
#include <linux/netdevice.h>
#include <linux/rhashtable.h>
#include <linux/rtnetlink.h>
+#include <linux/refcount.h>
#include "spectrum.h"
#include "reg.h"
@@ -24,7 +25,7 @@ struct mlxsw_sp_fid_core {
struct mlxsw_sp_fid {
struct list_head list;
struct mlxsw_sp_rif *rif;
- unsigned int ref_count;
+ refcount_t ref_count;
u16 fid_index;
struct mlxsw_sp_fid_family *fid_family;
struct rhash_head ht_node;
@@ -149,7 +150,7 @@ struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
fid = rhashtable_lookup_fast(&mlxsw_sp->fid_core->fid_ht, &fid_index,
mlxsw_sp_fid_ht_params);
if (fid)
- fid->ref_count++;
+ refcount_inc(&fid->ref_count);
return fid;
}
@@ -183,7 +184,7 @@ struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
fid = rhashtable_lookup_fast(&mlxsw_sp->fid_core->vni_ht, &vni,
mlxsw_sp_fid_vni_ht_params);
if (fid)
- fid->ref_count++;
+ refcount_inc(&fid->ref_count);
return fid;
}
@@ -437,16 +438,6 @@ static int mlxsw_sp_fid_vni_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
-static int mlxsw_sp_fid_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
- u16 vid, bool valid)
-{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
- char svfa_pl[MLXSW_REG_SVFA_LEN];
-
- mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid_index, vid);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
-}
-
static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
u8 local_port, u16 vid, bool valid)
{
@@ -457,140 +448,6 @@ static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
-static int mlxsw_sp_fid_8021q_configure(struct mlxsw_sp_fid *fid)
-{
- struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
- struct mlxsw_sp_fid_8021q *fid_8021q;
- int err;
-
- err = mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, fid->fid_index, true);
- if (err)
- return err;
-
- fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
- err = mlxsw_sp_fid_vid_map(mlxsw_sp, fid->fid_index, fid_8021q->vid,
- true);
- if (err)
- goto err_fid_map;
-
- return 0;
-
-err_fid_map:
- mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, false);
- return err;
-}
-
-static void mlxsw_sp_fid_8021q_deconfigure(struct mlxsw_sp_fid *fid)
-{
- struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
- struct mlxsw_sp_fid_8021q *fid_8021q;
-
- fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
- mlxsw_sp_fid_vid_map(mlxsw_sp, fid->fid_index, fid_8021q->vid, false);
- mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, false);
-}
-
-static int mlxsw_sp_fid_8021q_index_alloc(struct mlxsw_sp_fid *fid,
- const void *arg, u16 *p_fid_index)
-{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
- u16 vid = *(u16 *) arg;
-
- /* Use 1:1 mapping for simplicity although not a must */
- if (vid < fid_family->start_index || vid > fid_family->end_index)
- return -EINVAL;
- *p_fid_index = vid;
-
- return 0;
-}
-
-static bool
-mlxsw_sp_fid_8021q_compare(const struct mlxsw_sp_fid *fid, const void *arg)
-{
- u16 vid = *(u16 *) arg;
-
- return mlxsw_sp_fid_8021q_fid(fid)->vid == vid;
-}
-
-static u16 mlxsw_sp_fid_8021q_flood_index(const struct mlxsw_sp_fid *fid)
-{
- return fid->fid_index;
-}
-
-static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
- struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
-
- /* In case there are no {Port, VID} => FID mappings on the port,
- * we can use the global VID => FID mapping we created when the
- * FID was configured.
- */
- if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 0)
- return 0;
- return __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index, local_port,
- vid, true);
-}
-
-static void
-mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
- struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
-
- if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 0)
- return;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index, local_port, vid,
- false);
-}
-
-static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops = {
- .setup = mlxsw_sp_fid_8021q_setup,
- .configure = mlxsw_sp_fid_8021q_configure,
- .deconfigure = mlxsw_sp_fid_8021q_deconfigure,
- .index_alloc = mlxsw_sp_fid_8021q_index_alloc,
- .compare = mlxsw_sp_fid_8021q_compare,
- .flood_index = mlxsw_sp_fid_8021q_flood_index,
- .port_vid_map = mlxsw_sp_fid_8021q_port_vid_map,
- .port_vid_unmap = mlxsw_sp_fid_8021q_port_vid_unmap,
-};
-
-static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021q_flood_tables[] = {
- {
- .packet_type = MLXSW_SP_FLOOD_TYPE_UC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
- .table_index = 0,
- },
- {
- .packet_type = MLXSW_SP_FLOOD_TYPE_MC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
- .table_index = 1,
- },
- {
- .packet_type = MLXSW_SP_FLOOD_TYPE_BC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
- .table_index = 2,
- },
-};
-
-/* Range and flood configuration must match mlxsw_config_profile */
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021q_family = {
- .type = MLXSW_SP_FID_TYPE_8021Q,
- .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
- .start_index = 1,
- .end_index = VLAN_VID_MASK,
- .flood_tables = mlxsw_sp_fid_8021q_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021q_flood_tables),
- .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
- .ops = &mlxsw_sp_fid_8021q_ops,
-};
-
static struct mlxsw_sp_fid_8021d *
mlxsw_sp_fid_8021d_fid(const struct mlxsw_sp_fid *fid)
{
@@ -845,6 +702,14 @@ static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021d_family = {
.lag_vid_valid = 1,
};
+static bool
+mlxsw_sp_fid_8021q_compare(const struct mlxsw_sp_fid *fid, const void *arg)
+{
+ u16 vid = *(u16 *) arg;
+
+ return mlxsw_sp_fid_8021q_fid(fid)->vid == vid;
+}
+
static void
mlxsw_sp_fid_8021q_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
const struct net_device *nve_dev)
@@ -1030,7 +895,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_lookup(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry(fid, &fid_family->fids_list, list) {
if (!fid->fid_family->ops->compare(fid, arg))
continue;
- fid->ref_count++;
+ refcount_inc(&fid->ref_count);
return fid;
}
@@ -1075,7 +940,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
goto err_rhashtable_insert;
list_add(&fid->list, &fid_family->fids_list);
- fid->ref_count++;
+ refcount_set(&fid->ref_count, 1);
return fid;
err_rhashtable_insert:
@@ -1093,7 +958,7 @@ void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
- if (--fid->ref_count != 0)
+ if (!refcount_dec_and_test(&fid->ref_count))
return;
list_del(&fid->list);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index b607919c8ad0..2f76908cae73 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -26,11 +26,20 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (!flow_action_has_entries(flow_action))
return 0;
+ if (!flow_action_mixed_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
- /* Count action is inserted first */
- err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
- if (err)
- return err;
+ act = flow_action_first_entry_get(flow_action);
+ if (act->hw_stats == FLOW_ACTION_HW_STATS_ANY ||
+ act->hw_stats == FLOW_ACTION_HW_STATS_IMMEDIATE) {
+ /* Count action is inserted first */
+ err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
+ if (err)
+ return err;
+ } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
+ return -EOPNOTSUPP;
+ }
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
@@ -41,12 +50,30 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return err;
}
break;
- case FLOW_ACTION_DROP:
- err = mlxsw_sp_acl_rulei_act_drop(rulei);
+ case FLOW_ACTION_DROP: {
+ bool ingress;
+
+ if (mlxsw_sp_acl_block_is_mixed_bound(block)) {
+ NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress");
+ return -EOPNOTSUPP;
+ }
+ ingress = mlxsw_sp_acl_block_is_ingress_bound(block);
+ err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
+ act->cookie, extack);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
return err;
}
+
+ /* Forbid block with this rulei to be bound
+ * to ingress/egress in future. Ingress rule is
+ * a blocker for egress and vice versa.
+ */
+ if (ingress)
+ rulei->egress_bind_blocker = 1;
+ else
+ rulei->ingress_bind_blocker = 1;
+ }
break;
case FLOW_ACTION_TRAP:
err = mlxsw_sp_acl_rulei_act_trap(rulei);
@@ -127,6 +154,25 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
act->id, vid,
proto, prio, extack);
}
+ case FLOW_ACTION_PRIORITY:
+ return mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei,
+ act->priority,
+ extack);
+ case FLOW_ACTION_MANGLE: {
+ enum flow_action_mangle_base htype = act->mangle.htype;
+ __be32 be_mask = (__force __be32) act->mangle.mask;
+ __be32 be_val = (__force __be32) act->mangle.val;
+ u32 offset = act->mangle.offset;
+ u32 mask = be32_to_cpu(be_mask);
+ u32 val = be32_to_cpu(be_val);
+
+ err = mlxsw_sp_acl_rulei_act_mangle(mlxsw_sp, rulei,
+ htype, offset,
+ mask, val, extack);
+ if (err)
+ return err;
+ break;
+ }
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
@@ -525,6 +571,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct flow_cls_offload *f)
{
+ enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
u64 packets;
@@ -543,11 +590,11 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
- &lastuse);
+ &lastuse, &used_hw_stats);
if (err)
goto err_rule_get_stats;
- flow_stats_update(&f->stats, bytes, packets, lastuse);
+ flow_stats_update(&f->stats, bytes, packets, lastuse, used_hw_stats);
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 1e4cdee7bcd7..20d72f1c0cee 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -2,13 +2,15 @@
/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
#include "spectrum.h"
struct mlxsw_sp_kvdl {
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
- unsigned long priv[0];
+ struct mutex kvdl_lock; /* Protects kvdl allocations */
+ unsigned long priv[];
/* priv has to be always the last item */
};
@@ -22,6 +24,7 @@ int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
GFP_KERNEL);
if (!kvdl)
return -ENOMEM;
+ mutex_init(&kvdl->kvdl_lock);
kvdl->kvdl_ops = kvdl_ops;
mlxsw_sp->kvdl = kvdl;
@@ -31,6 +34,7 @@ int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_init:
+ mutex_destroy(&kvdl->kvdl_lock);
kfree(kvdl);
return err;
}
@@ -40,6 +44,7 @@ void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv);
+ mutex_destroy(&kvdl->kvdl_lock);
kfree(kvdl);
}
@@ -48,9 +53,14 @@ int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count, u32 *p_entry_index)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
+ int err;
+
+ mutex_lock(&kvdl->kvdl_lock);
+ err = kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
+ entry_count, p_entry_index);
+ mutex_unlock(&kvdl->kvdl_lock);
- return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
- entry_count, p_entry_index);
+ return err;
}
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
@@ -59,8 +69,10 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
+ mutex_lock(&kvdl->kvdl_lock);
kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type,
entry_count, entry_index);
+ mutex_unlock(&kvdl->kvdl_lock);
}
int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 336e5ecc68f8..47eb751a2570 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
+#include <linux/mutex.h>
#include <linux/rhashtable.h>
#include <net/ipv6.h>
@@ -12,6 +13,7 @@ struct mlxsw_sp_mr {
void *catchall_route_priv;
struct delayed_work stats_update_dw;
struct list_head table_list;
+ struct mutex table_list_lock; /* Protects table_list */
#define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */
unsigned long priv[0];
/* priv has to be always the last item */
@@ -66,9 +68,10 @@ struct mlxsw_sp_mr_table {
u32 vr_id;
struct mlxsw_sp_mr_vif vifs[MAXVIFS];
struct list_head route_list;
+ struct mutex route_list_lock; /* Protects route_list */
struct rhashtable route_ht;
const struct mlxsw_sp_mr_table_ops *ops;
- char catchall_route_priv[0];
+ char catchall_route_priv[];
/* catchall_route_priv has to be always the last item */
};
@@ -370,11 +373,13 @@ static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
struct mlxsw_sp_mr_route *mr_route)
{
+ WARN_ON_ONCE(!mutex_is_locked(&mr_table->route_list_lock));
+
mlxsw_sp_mr_mfc_offload_set(mr_route, false);
- mlxsw_sp_mr_route_erase(mr_table, mr_route);
rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
mlxsw_sp_mr_route_ht_params);
list_del(&mr_route->node);
+ mlxsw_sp_mr_route_erase(mr_table, mr_route);
mlxsw_sp_mr_route_destroy(mr_table, mr_route);
}
@@ -415,19 +420,21 @@ int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
goto err_duplicate_route;
}
+ /* Write the route to the hardware */
+ err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace);
+ if (err)
+ goto err_mr_route_write;
+
/* Put it in the table data-structures */
+ mutex_lock(&mr_table->route_list_lock);
list_add_tail(&mr_route->node, &mr_table->route_list);
+ mutex_unlock(&mr_table->route_list_lock);
err = rhashtable_insert_fast(&mr_table->route_ht,
&mr_route->ht_node,
mlxsw_sp_mr_route_ht_params);
if (err)
goto err_rhashtable_insert;
- /* Write the route to the hardware */
- err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace);
- if (err)
- goto err_mr_route_write;
-
/* Destroy the original route */
if (replace) {
rhashtable_remove_fast(&mr_table->route_ht,
@@ -440,11 +447,12 @@ int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
mlxsw_sp_mr_mfc_offload_update(mr_route);
return 0;
-err_mr_route_write:
- rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
- mlxsw_sp_mr_route_ht_params);
err_rhashtable_insert:
+ mutex_lock(&mr_table->route_list_lock);
list_del(&mr_route->node);
+ mutex_unlock(&mr_table->route_list_lock);
+ mlxsw_sp_mr_route_erase(mr_table, mr_route);
+err_mr_route_write:
err_no_orig_route:
err_duplicate_route:
mlxsw_sp_mr_route_destroy(mr_table, mr_route);
@@ -460,8 +468,11 @@ void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
mr_table->ops->key_create(mr_table, &key, mfc);
mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
mlxsw_sp_mr_route_ht_params);
- if (mr_route)
+ if (mr_route) {
+ mutex_lock(&mr_table->route_list_lock);
__mlxsw_sp_mr_route_del(mr_table, mr_route);
+ mutex_unlock(&mr_table->route_list_lock);
+ }
}
/* Should be called after the VIF struct is updated */
@@ -910,6 +921,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
mr_table->proto = proto;
mr_table->ops = &mlxsw_sp_mr_table_ops_arr[proto];
INIT_LIST_HEAD(&mr_table->route_list);
+ mutex_init(&mr_table->route_list_lock);
err = rhashtable_init(&mr_table->route_ht,
&mlxsw_sp_mr_route_ht_params);
@@ -927,12 +939,15 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
&catchall_route_params);
if (err)
goto err_ops_route_create;
+ mutex_lock(&mr->table_list_lock);
list_add_tail(&mr_table->node, &mr->table_list);
+ mutex_unlock(&mr->table_list_lock);
return mr_table;
err_ops_route_create:
rhashtable_destroy(&mr_table->route_ht);
err_route_rhashtable_init:
+ mutex_destroy(&mr_table->route_list_lock);
kfree(mr_table);
return ERR_PTR(err);
}
@@ -943,10 +958,13 @@ void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table)
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
WARN_ON(!mlxsw_sp_mr_table_empty(mr_table));
+ mutex_lock(&mr->table_list_lock);
list_del(&mr_table->node);
+ mutex_unlock(&mr->table_list_lock);
mr->mr_ops->route_destroy(mlxsw_sp, mr->priv,
&mr_table->catchall_route_priv);
rhashtable_destroy(&mr_table->route_ht);
+ mutex_destroy(&mr_table->route_list_lock);
kfree(mr_table);
}
@@ -955,8 +973,10 @@ void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table)
struct mlxsw_sp_mr_route *mr_route, *tmp;
int i;
+ mutex_lock(&mr_table->route_list_lock);
list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node)
__mlxsw_sp_mr_route_del(mr_table, mr_route);
+ mutex_unlock(&mr_table->route_list_lock);
for (i = 0; i < MAXVIFS; i++) {
mr_table->vifs[i].dev = NULL;
@@ -1000,12 +1020,15 @@ static void mlxsw_sp_mr_stats_update(struct work_struct *work)
struct mlxsw_sp_mr_route *mr_route;
unsigned long interval;
- rtnl_lock();
- list_for_each_entry(mr_table, &mr->table_list, node)
+ mutex_lock(&mr->table_list_lock);
+ list_for_each_entry(mr_table, &mr->table_list, node) {
+ mutex_lock(&mr_table->route_list_lock);
list_for_each_entry(mr_route, &mr_table->route_list, node)
mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp,
mr_route);
- rtnl_unlock();
+ mutex_unlock(&mr_table->route_list_lock);
+ }
+ mutex_unlock(&mr->table_list_lock);
interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
@@ -1024,6 +1047,7 @@ int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
mr->mr_ops = mr_ops;
mlxsw_sp->mr = mr;
INIT_LIST_HEAD(&mr->table_list);
+ mutex_init(&mr->table_list_lock);
err = mr_ops->init(mlxsw_sp, mr->priv);
if (err)
@@ -1035,6 +1059,7 @@ int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
return 0;
err:
+ mutex_destroy(&mr->table_list_lock);
kfree(mr);
return err;
}
@@ -1045,5 +1070,6 @@ void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
cancel_delayed_work_sync(&mr->stats_update_dw);
mr->mr_ops->fini(mlxsw_sp, mr->priv);
+ mutex_destroy(&mr->table_list_lock);
kfree(mr);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 2153bcc4b585..54d3e7dcd303 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -67,7 +67,7 @@ struct mlxsw_sp_nve_mc_record {
struct mlxsw_sp_nve_mc_list *mc_list;
const struct mlxsw_sp_nve_mc_record_ops *ops;
u32 kvdl_index;
- struct mlxsw_sp_nve_mc_entry entries[0];
+ struct mlxsw_sp_nve_mc_entry entries[];
};
struct mlxsw_sp_nve_mc_list {
@@ -713,27 +713,6 @@ static void mlxsw_sp_nve_flood_ip_flush(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
}
-u32 mlxsw_sp_nve_decap_tunnel_index_get(const struct mlxsw_sp *mlxsw_sp)
-{
- WARN_ON(mlxsw_sp->nve->num_nve_tunnels == 0);
-
- return mlxsw_sp->nve->tunnel_index;
-}
-
-bool mlxsw_sp_nve_ipv4_route_is_decap(const struct mlxsw_sp *mlxsw_sp,
- u32 tb_id, __be32 addr)
-{
- struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
- struct mlxsw_sp_nve_config *config = &nve->config;
-
- if (nve->num_nve_tunnels &&
- config->ul_proto == MLXSW_SP_L3_PROTO_IPV4 &&
- config->ul_sip.addr4 == addr && config->ul_tb_id == tb_id)
- return true;
-
- return false;
-}
-
static int mlxsw_sp_nve_tunnel_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nve_config *config)
{
@@ -744,6 +723,8 @@ static int mlxsw_sp_nve_tunnel_init(struct mlxsw_sp *mlxsw_sp,
if (nve->num_nve_tunnels++ != 0)
return 0;
+ nve->config = *config;
+
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
&nve->tunnel_index);
if (err)
@@ -760,6 +741,7 @@ err_ops_init:
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
nve->tunnel_index);
err_kvdl_alloc:
+ memset(&nve->config, 0, sizeof(nve->config));
nve->num_nve_tunnels--;
return err;
}
@@ -840,8 +822,6 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
goto err_fid_vni_set;
}
- nve->config = config;
-
err = ops->fdb_replay(params->dev, params->vni, extack);
if (err)
goto err_fdb_replay;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 34f7c3501b08..9650562fc0ef 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -922,6 +922,8 @@ static int mlxsw_sp_ptp_get_message_types(const struct hwtstamp_config *config,
case HWTSTAMP_TX_ONESTEP_SYNC:
case HWTSTAMP_TX_ONESTEP_P2P:
return -ERANGE;
+ default:
+ return -EINVAL;
}
switch (rx_filter) {
@@ -952,6 +954,8 @@ static int mlxsw_sp_ptp_get_message_types(const struct hwtstamp_config *config,
case HWTSTAMP_FILTER_SOME:
case HWTSTAMP_FILTER_NTP_ALL:
return -ERANGE;
+ default:
+ return -EINVAL;
}
*p_ing_types = ing_types;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 02526c53d4f5..670a43fe2a00 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -20,14 +20,17 @@ enum mlxsw_sp_qdisc_type {
MLXSW_SP_QDISC_PRIO,
MLXSW_SP_QDISC_ETS,
MLXSW_SP_QDISC_TBF,
+ MLXSW_SP_QDISC_FIFO,
};
+struct mlxsw_sp_qdisc;
+
struct mlxsw_sp_qdisc_ops {
enum mlxsw_sp_qdisc_type type;
int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params);
- int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
+ int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
@@ -64,6 +67,25 @@ struct mlxsw_sp_qdisc {
struct mlxsw_sp_qdisc_ops *ops;
};
+struct mlxsw_sp_qdisc_state {
+ struct mlxsw_sp_qdisc root_qdisc;
+ struct mlxsw_sp_qdisc tclass_qdiscs[IEEE_8021QAZ_MAX_TCS];
+
+ /* When a PRIO or ETS are added, the invisible FIFOs in their bands are
+ * created first. When notifications for these FIFOs arrive, it is not
+ * known what qdisc their parent handle refers to. It could be a
+ * newly-created PRIO that will replace the currently-offloaded one, or
+ * it could be e.g. a RED that will be attached below it.
+ *
+ * As the notifications start to arrive, use them to note what the
+ * future parent handle is, and keep track of which child FIFOs were
+ * seen. Then when the parent is known, retroactively offload those
+ * FIFOs.
+ */
+ u32 future_handle;
+ bool future_fifos[IEEE_8021QAZ_MAX_TCS];
+};
+
static bool
mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
enum mlxsw_sp_qdisc_type type)
@@ -77,36 +99,38 @@ static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
bool root_only)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
int tclass, child_index;
if (parent == TC_H_ROOT)
- return mlxsw_sp_port->root_qdisc;
+ return &qdisc_state->root_qdisc;
- if (root_only || !mlxsw_sp_port->root_qdisc ||
- !mlxsw_sp_port->root_qdisc->ops ||
- TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle ||
+ if (root_only || !qdisc_state ||
+ !qdisc_state->root_qdisc.ops ||
+ TC_H_MAJ(parent) != qdisc_state->root_qdisc.handle ||
TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
return NULL;
child_index = TC_H_MIN(parent);
tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
- return &mlxsw_sp_port->tclass_qdiscs[tclass];
+ return &qdisc_state->tclass_qdiscs[tclass];
}
static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
int i;
- if (mlxsw_sp_port->root_qdisc->handle == handle)
- return mlxsw_sp_port->root_qdisc;
+ if (qdisc_state->root_qdisc.handle == handle)
+ return &qdisc_state->root_qdisc;
- if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC)
+ if (qdisc_state->root_qdisc.handle == TC_H_UNSPEC)
return NULL;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle)
- return &mlxsw_sp_port->tclass_qdiscs[i];
+ if (qdisc_state->tclass_qdiscs[i].handle == handle)
+ return &qdisc_state->tclass_qdiscs[i];
return NULL;
}
@@ -147,11 +171,15 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
if (err)
goto err_bad_param;
- err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+ err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
if (err)
goto err_config;
- if (mlxsw_sp_qdisc->handle != handle) {
+ /* Check if the Qdisc changed. That includes a situation where an
+ * invisible Qdisc replaces another one, or is being added for the
+ * first time.
+ */
+ if (mlxsw_sp_qdisc->handle != handle || handle == TC_H_UNSPEC) {
mlxsw_sp_qdisc->ops = ops;
if (ops->clean_stats)
ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
@@ -295,7 +323,7 @@ mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
int tclass_num, u32 min, u32 max,
- u32 probability, bool is_ecn)
+ u32 probability, bool is_wred, bool is_ecn)
{
char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
char cwtp_cmd[MLXSW_REG_CWTP_LEN];
@@ -313,7 +341,7 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
- MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
+ MLXSW_REG_CWTP_DEFAULT_PROFILE, is_wred, is_ecn);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
}
@@ -347,7 +375,6 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_qdisc->prio_bitmap,
&stats_base->tx_packets,
&stats_base->tx_bytes);
- red_base->prob_mark = xstats->ecn;
red_base->prob_drop = xstats->wred_drop[tclass_num];
red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
@@ -361,7 +388,8 @@ static int
mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
if (root_qdisc != mlxsw_sp_qdisc)
root_qdisc->stats_base.backlog -=
@@ -400,7 +428,7 @@ mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
@@ -417,8 +445,9 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
prob = DIV_ROUND_UP(prob, 1 << 16);
min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
- return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
- max, prob, p->is_ecn);
+ return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num,
+ min, max, prob,
+ !p->is_nodrop, p->is_ecn);
}
static void
@@ -453,22 +482,19 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
struct red_stats *res = xstats_ptr;
- int early_drops, marks, pdrops;
+ int early_drops, pdrops;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
- marks = xstats->ecn - xstats_base->prob_mark;
pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
xstats_base->pdrop;
res->pdrop += pdrops;
res->prob_drop += early_drops;
- res->prob_mark += marks;
xstats_base->pdrop += pdrops;
xstats_base->prob_drop += early_drops;
- xstats_base->prob_mark += marks;
return 0;
}
@@ -486,8 +512,7 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
stats_base = &mlxsw_sp_qdisc->stats_base;
mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
- overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
- stats_base->overlimits;
+ overlimits = xstats->wred_drop[tclass_num] - stats_base->overlimits;
stats_ptr->qstats->overlimits += overlimits;
stats_base->overlimits += overlimits;
@@ -564,7 +589,8 @@ static int
mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
if (root_qdisc != mlxsw_sp_qdisc)
root_qdisc->stats_base.backlog -=
@@ -651,7 +677,7 @@ mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
@@ -740,8 +766,121 @@ int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
+mlxsw_sp_qdisc_fifo_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
+
+ if (root_qdisc != mlxsw_sp_qdisc)
+ root_qdisc->stats_base.backlog -=
+ mlxsw_sp_qdisc->stats_base.backlog;
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *params)
+{
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_qopt_offload_stats *stats_ptr)
+{
+ mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ stats_ptr);
+ return 0;
+}
+
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
+ .type = MLXSW_SP_QDISC_FIFO,
+ .check_params = mlxsw_sp_qdisc_fifo_check_params,
+ .replace = mlxsw_sp_qdisc_fifo_replace,
+ .destroy = mlxsw_sp_qdisc_fifo_destroy,
+ .get_stats = mlxsw_sp_qdisc_get_fifo_stats,
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
+};
+
+int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_fifo_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+ int tclass, child_index;
+ u32 parent_handle;
+
+ /* Invisible FIFOs are tracked in future_handle and future_fifos. Make
+ * sure that not more than one qdisc is created for a port at a time.
+ * RTNL is a simple proxy for that.
+ */
+ ASSERT_RTNL();
+
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
+ parent_handle = TC_H_MAJ(p->parent);
+ if (parent_handle != qdisc_state->future_handle) {
+ /* This notifications is for a different Qdisc than
+ * previously. Wipe the future cache.
+ */
+ memset(qdisc_state->future_fifos, 0,
+ sizeof(qdisc_state->future_fifos));
+ qdisc_state->future_handle = parent_handle;
+ }
+
+ child_index = TC_H_MIN(p->parent);
+ tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
+ if (tclass < IEEE_8021QAZ_MAX_TCS) {
+ if (p->command == TC_FIFO_REPLACE)
+ qdisc_state->future_fifos[tclass] = true;
+ else if (p->command == TC_FIFO_DESTROY)
+ qdisc_state->future_fifos[tclass] = false;
+ }
+ }
+ if (!mlxsw_sp_qdisc)
+ return -EOPNOTSUPP;
+
+ if (p->command == TC_FIFO_REPLACE) {
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc,
+ &mlxsw_sp_qdisc_ops_fifo, NULL);
+ }
+
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+ MLXSW_SP_QDISC_FIFO))
+ return -EOPNOTSUPP;
+
+ switch (p->command) {
+ case TC_FIFO_DESTROY:
+ if (p->handle == mlxsw_sp_qdisc->handle)
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ return 0;
+ case TC_FIFO_STATS:
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->stats);
+ case TC_FIFO_REPLACE: /* Handled above. */
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int
__mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
@@ -751,8 +890,8 @@ __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
MLXSW_REG_QEEC_HR_SUBGROUP,
i, 0, false, 0);
mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
- &mlxsw_sp_port->tclass_qdiscs[i]);
- mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
+ &qdisc_state->tclass_qdiscs[i]);
+ qdisc_state->tclass_qdiscs[i].prio_bitmap = 0;
}
return 0;
@@ -785,12 +924,13 @@ mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
unsigned int nbands,
const unsigned int *quanta,
const unsigned int *weights,
const u8 *priomap)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
struct mlxsw_sp_qdisc *child_qdisc;
int tclass, i, band, backlog;
u8 old_priomap;
@@ -798,7 +938,7 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
for (band = 0; band < nbands; band++) {
tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
- child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
+ child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
old_priomap = child_qdisc->prio_bitmap;
child_qdisc->prio_bitmap = 0;
@@ -827,28 +967,41 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
child_qdisc);
child_qdisc->stats_base.backlog = backlog;
}
+
+ if (handle == qdisc_state->future_handle &&
+ qdisc_state->future_fifos[tclass]) {
+ err = mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
+ child_qdisc,
+ &mlxsw_sp_qdisc_ops_fifo,
+ NULL);
+ if (err)
+ return err;
+ }
}
for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
- child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
+ child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
child_qdisc->prio_bitmap = 0;
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
tclass, 0, false, 0);
}
+
+ qdisc_state->future_handle = TC_H_UNSPEC;
+ memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
return 0;
}
static int
-mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct tc_prio_qopt_offload_params *p = params;
unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
- return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands,
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
zeroes, zeroes, p->priomap);
}
@@ -880,6 +1033,7 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
struct mlxsw_sp_qdisc *tc_qdisc;
u64 tx_packets = 0;
u64 tx_bytes = 0;
@@ -888,7 +1042,7 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- tc_qdisc = &mlxsw_sp_port->tclass_qdiscs[i];
+ tc_qdisc = &qdisc_state->tclass_qdiscs[i];
mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
&tx_bytes, &tx_packets,
&drops, &backlog);
@@ -946,13 +1100,13 @@ mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
-mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct tc_ets_qopt_offload_replace_params *p = params;
- return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands,
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
p->quanta, p->weights, p->priomap);
}
@@ -1014,11 +1168,12 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
u8 band, u32 child_handle)
{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
struct mlxsw_sp_qdisc *old_qdisc;
if (band < IEEE_8021QAZ_MAX_TCS &&
- mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == child_handle)
+ qdisc_state->tclass_qdiscs[tclass_num].handle == child_handle)
return 0;
if (!child_handle) {
@@ -1037,7 +1192,7 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
- &mlxsw_sp_port->tclass_qdiscs[tclass_num]);
+ &qdisc_state->tclass_qdiscs[tclass_num]);
return -EOPNOTSUPP;
}
@@ -1119,37 +1274,23 @@ int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+ struct mlxsw_sp_qdisc_state *qdisc_state;
int i;
- mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL);
- if (!mlxsw_sp_qdisc)
- goto err_root_qdisc_init;
-
- mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc;
- mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff;
- mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+ qdisc_state = kzalloc(sizeof(*qdisc_state), GFP_KERNEL);
+ if (!qdisc_state)
+ return -ENOMEM;
- mlxsw_sp_qdisc = kcalloc(IEEE_8021QAZ_MAX_TCS,
- sizeof(*mlxsw_sp_qdisc),
- GFP_KERNEL);
- if (!mlxsw_sp_qdisc)
- goto err_tclass_qdiscs_init;
-
- mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc;
+ qdisc_state->root_qdisc.prio_bitmap = 0xff;
+ qdisc_state->root_qdisc.tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i;
+ qdisc_state->tclass_qdiscs[i].tclass_num = i;
+ mlxsw_sp_port->qdisc = qdisc_state;
return 0;
-
-err_tclass_qdiscs_init:
- kfree(mlxsw_sp_port->root_qdisc);
-err_root_qdisc_init:
- return -ENOMEM;
}
void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
- kfree(mlxsw_sp_port->tclass_qdiscs);
- kfree(mlxsw_sp_port->root_qdisc);
+ kfree(mlxsw_sp_port->qdisc);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 4a77b511ead2..d5bca1be3ef5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -17,6 +17,7 @@
#include <linux/refcount.h>
#include <linux/jhash.h>
#include <linux/net_namespace.h>
+#include <linux/mutex.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -48,39 +49,6 @@ struct mlxsw_sp_vr;
struct mlxsw_sp_lpm_tree;
struct mlxsw_sp_rif_ops;
-struct mlxsw_sp_router {
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_rif **rifs;
- struct mlxsw_sp_vr *vrs;
- struct rhashtable neigh_ht;
- struct rhashtable nexthop_group_ht;
- struct rhashtable nexthop_ht;
- struct list_head nexthop_list;
- struct {
- /* One tree for each protocol: IPv4 and IPv6 */
- struct mlxsw_sp_lpm_tree *proto_trees[2];
- struct mlxsw_sp_lpm_tree *trees;
- unsigned int tree_count;
- } lpm;
- struct {
- struct delayed_work dw;
- unsigned long interval; /* ms */
- } neighs_update;
- struct delayed_work nexthop_probe_dw;
-#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
- struct list_head nexthop_neighs_list;
- struct list_head ipip_list;
- bool aborted;
- struct notifier_block fib_nb;
- struct notifier_block netevent_nb;
- struct notifier_block inetaddr_nb;
- struct notifier_block inet6addr_nb;
- const struct mlxsw_sp_rif_ops **rif_ops_arr;
- const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
- u32 adj_discard_index;
- bool adj_discard_index_valid;
-};
-
struct mlxsw_sp_rif {
struct list_head nexthop_list;
struct list_head neigh_list;
@@ -145,6 +113,9 @@ struct mlxsw_sp_rif_ops {
void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
};
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
@@ -760,13 +731,18 @@ int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
u16 *vr_id)
{
struct mlxsw_sp_vr *vr;
+ int err = 0;
+ mutex_lock(&mlxsw_sp->router->lock);
vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
- if (!vr)
- return -ESRCH;
+ if (!vr) {
+ err = -ESRCH;
+ goto out;
+ }
*vr_id = vr->id;
-
- return 0;
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
}
static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
@@ -988,17 +964,23 @@ __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
struct ip_tunnel *tun = netdev_priv(ol_dev);
struct net *net = dev_net(ol_dev);
- return __dev_get_by_index(net, tun->parms.link);
+ return dev_get_by_index_rcu(net, tun->parms.link);
}
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
{
- struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ struct net_device *d;
+ u32 tb_id;
+ rcu_read_lock();
+ d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
if (d)
- return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
+ tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
else
- return RT_TABLE_MAIN;
+ tb_id = RT_TABLE_MAIN;
+ rcu_read_unlock();
+
+ return tb_id;
}
static struct mlxsw_sp_rif *
@@ -1230,7 +1212,7 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
saddr_len = 4;
saddr_prefix_len = 32;
break;
- case MLXSW_SP_L3_PROTO_IPV6:
+ default:
WARN_ON(1);
return NULL;
}
@@ -1355,8 +1337,12 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
ipip_list_node);
list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
ipip_list_node) {
- struct net_device *ipip_ul_dev =
- __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
+ struct net_device *ol_dev = ipip_entry->ol_dev;
+ struct net_device *ipip_ul_dev;
+
+ rcu_read_lock();
+ ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ rcu_read_unlock();
if (ipip_ul_dev == ul_dev)
return ipip_entry;
@@ -1365,10 +1351,16 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
return NULL;
}
-bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
+bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
- return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
+ bool is_ipip_ul;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
+ mutex_unlock(&mlxsw_sp->router->lock);
+
+ return is_ipip_ul;
}
static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
@@ -1388,9 +1380,9 @@ static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *ol_dev)
{
+ enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
struct mlxsw_sp_ipip_entry *ipip_entry;
enum mlxsw_sp_l3proto ul_proto;
- enum mlxsw_sp_ipip_type ipipt;
union mlxsw_sp_l3addr saddr;
u32 ul_tb_id;
@@ -1543,13 +1535,17 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif);
/**
- * Update the offload related to an IPIP entry. This always updates decap, and
- * in addition to that it also:
- * @recreate_loopback: recreates the associated loopback RIF
- * @keep_encap: updates next hops that use the tunnel netdevice. This is only
+ * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
+ * @mlxsw_sp: mlxsw_sp.
+ * @ipip_entry: IPIP entry.
+ * @recreate_loopback: Recreates the associated loopback RIF.
+ * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
* relevant when recreate_loopback is true.
- * @update_nexthops: updates next hops, keeping the current loopback RIF. This
+ * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
* is only relevant when recreate_loopback is false.
+ * @extack: extack.
+ *
+ * Return: Non-zero value on failure.
*/
int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
@@ -1722,9 +1718,12 @@ static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
ipip_list_node) {
- struct net_device *ipip_ul_dev =
- __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
+ struct net_device *ol_dev = ipip_entry->ol_dev;
+ struct net_device *ipip_ul_dev;
+ rcu_read_lock();
+ ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ rcu_read_unlock();
if (ipip_ul_dev == ul_dev)
mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
}
@@ -1737,35 +1736,41 @@ int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
{
struct netdev_notifier_changeupper_info *chup;
struct netlink_ext_ack *extack;
+ int err = 0;
+ mutex_lock(&mlxsw_sp->router->lock);
switch (event) {
case NETDEV_REGISTER:
- return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
+ err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
+ break;
case NETDEV_UNREGISTER:
mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
- return 0;
+ break;
case NETDEV_UP:
mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
- return 0;
+ break;
case NETDEV_DOWN:
mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
- return 0;
+ break;
case NETDEV_CHANGEUPPER:
chup = container_of(info, typeof(*chup), info);
extack = info->extack;
if (netif_is_l3_master(chup->upper_dev))
- return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
- ol_dev,
- extack);
- return 0;
+ err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
+ ol_dev,
+ extack);
+ break;
case NETDEV_CHANGE:
extack = info->extack;
- return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
- ol_dev, extack);
+ err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
+ ol_dev, extack);
+ break;
case NETDEV_CHANGEMTU:
- return mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
+ err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
+ break;
}
- return 0;
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
}
static int
@@ -1809,8 +1814,9 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
struct netdev_notifier_info *info)
{
struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
- int err;
+ int err = 0;
+ mutex_lock(&mlxsw_sp->router->lock);
while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
ul_dev,
ipip_entry))) {
@@ -1823,7 +1829,7 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
if (err) {
mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
ul_dev);
- return err;
+ break;
}
if (demote_this) {
@@ -1840,8 +1846,9 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
ipip_entry = prev;
}
}
+ mutex_unlock(&mlxsw_sp->router->lock);
- return 0;
+ return err;
}
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
@@ -1850,8 +1857,22 @@ int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
u32 tunnel_index)
{
enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
struct mlxsw_sp_fib_entry *fib_entry;
- int err;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+
+ if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ router->nve_decap_config.ul_tb_id = ul_tb_id;
+ router->nve_decap_config.tunnel_index = tunnel_index;
+ router->nve_decap_config.ul_proto = ul_proto;
+ router->nve_decap_config.ul_sip = *ul_sip;
+ router->nve_decap_config.valid = true;
/* It is valid to create a tunnel with a local IP and only later
* assign this IP address to a local interface
@@ -1860,7 +1881,7 @@ int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
ul_proto, ul_sip,
type);
if (!fib_entry)
- return 0;
+ goto out;
fib_entry->decap.tunnel_index = tunnel_index;
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
@@ -1869,11 +1890,13 @@ int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
if (err)
goto err_fib_entry_update;
- return 0;
+ goto out;
err_fib_entry_update:
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
return err;
}
@@ -1882,16 +1905,40 @@ void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
const union mlxsw_sp_l3addr *ul_sip)
{
enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
struct mlxsw_sp_fib_entry *fib_entry;
+ mutex_lock(&mlxsw_sp->router->lock);
+
+ if (WARN_ON_ONCE(!router->nve_decap_config.valid))
+ goto out;
+
+ router->nve_decap_config.valid = false;
+
fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
ul_proto, ul_sip,
type);
if (!fib_entry)
- return;
+ goto out;
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+}
+
+static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
+ u32 ul_tb_id,
+ enum mlxsw_sp_l3proto ul_proto,
+ const union mlxsw_sp_l3addr *ul_sip)
+{
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+
+ return router->nve_decap_config.valid &&
+ router->nve_decap_config.ul_tb_id == ul_tb_id &&
+ router->nve_decap_config.ul_proto == ul_proto &&
+ !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
+ sizeof(*ul_sip));
}
struct mlxsw_sp_neigh_key {
@@ -2264,10 +2311,8 @@ __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
int i, num_rec;
int err;
- /* Make sure the neighbour's netdev isn't removed in the
- * process.
- */
- rtnl_lock();
+ /* Ensure the RIF we read from the device does not change mid-dump. */
+ mutex_lock(&mlxsw_sp->router->lock);
do {
mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
@@ -2281,7 +2326,7 @@ __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
i);
} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
return err;
}
@@ -2312,15 +2357,14 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
- /* Take RTNL mutex here to prevent lists from changes */
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
nexthop_neighs_list_node)
/* If this neigh have nexthops, make the kernel think this neigh
* is active regardless of the traffic.
*/
neigh_event_send(neigh_entry->key.n, NULL);
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
}
static void
@@ -2360,15 +2404,13 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
* the nexthop wouldn't get offloaded until the neighbor is resolved
* but it wouldn't get resolved ever in case traffic is flowing in HW
* using different nexthop.
- *
- * Take RTNL mutex here to prevent lists from changes.
*/
- rtnl_lock();
+ mutex_lock(&router->lock);
list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
nexthop_neighs_list_node)
if (!neigh_entry->connected)
neigh_event_send(neigh_entry->key.n, NULL);
- rtnl_unlock();
+ mutex_unlock(&router->lock);
mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
@@ -2506,7 +2548,7 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
dead = n->dead;
read_unlock_bh(&n->lock);
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
entry_connected = nud_state & NUD_VALID && !dead;
@@ -2528,7 +2570,7 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
out:
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
neigh_release(n);
kfree(net_work);
}
@@ -3189,7 +3231,6 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
u32 adj_index = nh_grp->adj_index; /* base */
struct mlxsw_sp_nexthop *nh;
int i;
- int err;
for (i = 0; i < nh_grp->count; i++) {
nh = &nh_grp->nexthops[i];
@@ -3200,6 +3241,8 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
}
if (nh->update || reallocate) {
+ int err = 0;
+
switch (nh->type) {
case MLXSW_SP_NEXTHOP_TYPE_ETH:
err = mlxsw_sp_nexthop_update
@@ -3711,9 +3754,15 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
{
- struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ struct net_device *ul_dev;
+ bool is_up;
+
+ rcu_read_lock();
+ ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
+ rcu_read_unlock();
- return ul_dev ? (ul_dev->flags & IFF_UP) : true;
+ return is_up;
}
static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
@@ -3840,10 +3889,14 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
if (!dev)
return 0;
- in_dev = __in_dev_get_rtnl(dev);
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(dev);
if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
- fib_nh->fib_nh_flags & RTNH_F_LINKDOWN)
+ fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
+ rcu_read_unlock();
return 0;
+ }
+ rcu_read_unlock();
err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
if (err)
@@ -4473,6 +4526,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
{
struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
struct mlxsw_sp_ipip_entry *ipip_entry;
struct fib_info *fi = fen_info->fi;
@@ -4487,12 +4541,13 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
fib_entry,
ipip_entry);
}
- if (mlxsw_sp_nve_ipv4_route_is_decap(mlxsw_sp, tb_id,
- dip.addr4)) {
- u32 t_index;
+ if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
+ MLXSW_SP_L3_PROTO_IPV4,
+ &dip)) {
+ u32 tunnel_index;
- t_index = mlxsw_sp_nve_decap_tunnel_index_get(mlxsw_sp);
- fib_entry->decap.tunnel_index = t_index;
+ tunnel_index = router->nve_decap_config.tunnel_index;
+ fib_entry->decap.tunnel_index = tunnel_index;
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
return 0;
}
@@ -5923,8 +5978,7 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
- /* Protect internal structures from changes */
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
switch (fib_work->event) {
@@ -5946,7 +6000,7 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
break;
}
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
kfree(fib_work);
}
@@ -5957,7 +6011,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
- rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
switch (fib_work->event) {
@@ -5984,7 +6038,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
break;
}
- rtnl_unlock();
+ mutex_unlock(&mlxsw_sp->router->lock);
kfree(fib_work);
}
@@ -5997,6 +6051,7 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
int err;
rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_ADD:
@@ -6025,6 +6080,7 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
dev_put(fib_work->ven_info.dev);
break;
}
+ mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
kfree(fib_work);
}
@@ -6233,7 +6289,7 @@ err_fib_event:
return NOTIFY_BAD;
}
-struct mlxsw_sp_rif *
+static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
@@ -6247,6 +6303,41 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
return NULL;
}
+bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ struct mlxsw_sp_rif *rif;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ mutex_unlock(&mlxsw_sp->router->lock);
+
+ return rif;
+}
+
+u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
+{
+ struct mlxsw_sp_rif *rif;
+ u16 vid = 0;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
+ goto out;
+
+ /* We only return the VID for VLAN RIFs. Otherwise we return an
+ * invalid value (0).
+ */
+ if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
+ goto out;
+
+ vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return vid;
+}
+
static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
{
char ritr_pl[MLXSW_REG_RITR_LEN];
@@ -6281,7 +6372,8 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
case NETDEV_UP:
return rif == NULL;
case NETDEV_DOWN:
- idev = __in_dev_get_rtnl(dev);
+ rcu_read_lock();
+ idev = __in_dev_get_rcu(dev);
if (idev && idev->ifa_list)
addr_list_empty = false;
@@ -6289,6 +6381,7 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
if (addr_list_empty && inet6_dev &&
!list_empty(&inet6_dev->addr_list))
addr_list_empty = false;
+ rcu_read_unlock();
/* macvlans do not have a RIF, but rather piggy back on the
* RIF of their lower device.
@@ -6411,11 +6504,6 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
return rif->dev;
}
-struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif)
-{
- return rif->fid;
-}
-
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
@@ -6528,10 +6616,13 @@ void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_rif *rif;
+ mutex_lock(&mlxsw_sp->router->lock);
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!rif)
- return;
+ goto out;
mlxsw_sp_rif_destroy(rif);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
}
static void
@@ -6631,8 +6722,8 @@ err_fid_port_vid_map:
return err;
}
-void
-mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+static void
+__mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
@@ -6650,6 +6741,16 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
mlxsw_sp_rif_subport_put(rif);
}
+void
+mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
+
+ mutex_lock(&mlxsw_sp->router->lock);
+ __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+ mutex_unlock(&mlxsw_sp->router->lock);
+}
+
static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
struct net_device *port_dev,
unsigned long event, u16 vid,
@@ -6667,7 +6768,7 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
l3_dev, extack);
case NETDEV_DOWN:
- mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+ __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
break;
}
@@ -6848,8 +6949,8 @@ err_rif_vrrp_add:
return err;
}
-void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *macvlan_dev)
+static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev)
{
struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
struct mlxsw_sp_rif *rif;
@@ -6866,6 +6967,14 @@ void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_index(rif->fid), false);
}
+void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *macvlan_dev)
+{
+ mutex_lock(&mlxsw_sp->router->lock);
+ __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
+ mutex_unlock(&mlxsw_sp->router->lock);
+}
+
static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *macvlan_dev,
unsigned long event,
@@ -6875,7 +6984,7 @@ static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
case NETDEV_UP:
return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
case NETDEV_DOWN:
- mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
+ __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
break;
}
@@ -6945,15 +7054,17 @@ static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
if (event == NETDEV_UP)
- goto out;
+ return NOTIFY_DONE;
router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
+ mutex_lock(&router->lock);
rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
out:
+ mutex_unlock(&router->lock);
return notifier_from_errno(err);
}
@@ -6968,8 +7079,9 @@ int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
mlxsw_sp = mlxsw_sp_lower_get(dev);
if (!mlxsw_sp)
- goto out;
+ return NOTIFY_DONE;
+ mutex_lock(&mlxsw_sp->router->lock);
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
@@ -6981,6 +7093,7 @@ int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
out:
+ mutex_unlock(&mlxsw_sp->router->lock);
return notifier_from_errno(err);
}
@@ -7001,6 +7114,7 @@ static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
struct mlxsw_sp_rif *rif;
rtnl_lock();
+ mutex_lock(&mlxsw_sp->router->lock);
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!mlxsw_sp_rif_should_config(rif, dev, event))
@@ -7008,6 +7122,7 @@ static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
out:
+ mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
dev_put(dev);
kfree(inet6addr_work);
@@ -7052,8 +7167,9 @@ int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
mlxsw_sp = mlxsw_sp_lower_get(dev);
if (!mlxsw_sp)
- goto out;
+ return NOTIFY_DONE;
+ mutex_lock(&mlxsw_sp->router->lock);
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
@@ -7065,6 +7181,7 @@ int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
out:
+ mutex_unlock(&mlxsw_sp->router->lock);
return notifier_from_errno(err);
}
@@ -7151,24 +7268,30 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
{
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *rif;
+ int err = 0;
mlxsw_sp = mlxsw_sp_lower_get(dev);
if (!mlxsw_sp)
return 0;
+ mutex_lock(&mlxsw_sp->router->lock);
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!rif)
- return 0;
+ goto out;
switch (event) {
case NETDEV_CHANGEMTU: /* fall through */
case NETDEV_CHANGEADDR:
- return mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
+ err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
+ break;
case NETDEV_PRE_CHANGEADDR:
- return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
+ err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
+ break;
}
- return 0;
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
}
static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
@@ -7211,9 +7334,10 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
if (!mlxsw_sp || netif_is_macvlan(l3_dev))
return 0;
+ mutex_lock(&mlxsw_sp->router->lock);
switch (event) {
case NETDEV_PRECHANGEUPPER:
- return 0;
+ break;
case NETDEV_CHANGEUPPER:
if (info->linking) {
struct netlink_ext_ack *extack;
@@ -7225,6 +7349,7 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
}
break;
}
+ mutex_unlock(&mlxsw_sp->router->lock);
return err;
}
@@ -7351,13 +7476,14 @@ u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
}
-static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
+static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
- u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ u16 fid_index = mlxsw_sp_fid_index(rif->fid);
int err;
- err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
+ err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
+ true);
if (err)
return err;
@@ -7386,13 +7512,13 @@ err_fid_bc_flood_set:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_mc_flood_set:
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
+ mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
return err;
}
-static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
+static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
{
- u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ u16 fid_index = mlxsw_sp_fid_index(rif->fid);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_fid *fid = rif->fid;
@@ -7404,10 +7530,41 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
+ mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
}
static struct mlxsw_sp_fid *
+mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
+}
+
+static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
+{
+ struct switchdev_notifier_fdb_info info;
+ struct net_device *dev;
+
+ dev = br_fdb_find_port(rif->dev, mac, 0);
+ if (!dev)
+ return;
+
+ info.addr = mac;
+ info.vid = 0;
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
+ NULL);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
+ .type = MLXSW_SP_RIF_TYPE_FID,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp_rif_fid_configure,
+ .deconfigure = mlxsw_sp_rif_fid_deconfigure,
+ .fid_get = mlxsw_sp_rif_fid_fid_get,
+ .fdb_del = mlxsw_sp_rif_fid_fdb_del,
+};
+
+static struct mlxsw_sp_fid *
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
@@ -7428,7 +7585,7 @@ mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
}
}
- return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, br_dev, vid, extack);
+ return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
}
static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
@@ -7449,103 +7606,6 @@ static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
NULL);
}
-static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
- .type = MLXSW_SP_RIF_TYPE_VLAN,
- .rif_size = sizeof(struct mlxsw_sp_rif),
- .configure = mlxsw_sp_rif_vlan_configure,
- .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
- .fid_get = mlxsw_sp_rif_vlan_fid_get,
- .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
-};
-
-static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
-{
- struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
- u16 fid_index = mlxsw_sp_fid_index(rif->fid);
- int err;
-
- err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
- true);
- if (err)
- return err;
-
- err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
- mlxsw_sp_router_port(mlxsw_sp), true);
- if (err)
- goto err_fid_mc_flood_set;
-
- err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
- mlxsw_sp_router_port(mlxsw_sp), true);
- if (err)
- goto err_fid_bc_flood_set;
-
- err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
- mlxsw_sp_fid_index(rif->fid), true);
- if (err)
- goto err_rif_fdb_op;
-
- mlxsw_sp_fid_rif_set(rif->fid, rif);
- return 0;
-
-err_rif_fdb_op:
- mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
- mlxsw_sp_router_port(mlxsw_sp), false);
-err_fid_bc_flood_set:
- mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
- mlxsw_sp_router_port(mlxsw_sp), false);
-err_fid_mc_flood_set:
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
- return err;
-}
-
-static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
-{
- u16 fid_index = mlxsw_sp_fid_index(rif->fid);
- struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
- struct mlxsw_sp_fid *fid = rif->fid;
-
- mlxsw_sp_fid_rif_set(fid, NULL);
- mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
- mlxsw_sp_fid_index(fid), false);
- mlxsw_sp_rif_macvlan_flush(rif);
- mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
- mlxsw_sp_router_port(mlxsw_sp), false);
- mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
- mlxsw_sp_router_port(mlxsw_sp), false);
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
-}
-
-static struct mlxsw_sp_fid *
-mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
- struct netlink_ext_ack *extack)
-{
- return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, rif->dev, 0, extack);
-}
-
-static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
-{
- struct switchdev_notifier_fdb_info info;
- struct net_device *dev;
-
- dev = br_fdb_find_port(rif->dev, mac, 0);
- if (!dev)
- return;
-
- info.addr = mac;
- info.vid = 0;
- call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
- NULL);
-}
-
-static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
- .type = MLXSW_SP_RIF_TYPE_FID,
- .rif_size = sizeof(struct mlxsw_sp_rif),
- .configure = mlxsw_sp_rif_fid_configure,
- .deconfigure = mlxsw_sp_rif_fid_deconfigure,
- .fid_get = mlxsw_sp_rif_fid_fid_get,
- .fdb_del = mlxsw_sp_rif_fid_fdb_del,
-};
-
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN,
.rif_size = sizeof(struct mlxsw_sp_rif),
@@ -7733,28 +7793,32 @@ int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
u16 *ul_rif_index)
{
struct mlxsw_sp_rif *ul_rif;
+ int err = 0;
- ASSERT_RTNL();
-
+ mutex_lock(&mlxsw_sp->router->lock);
ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
- if (IS_ERR(ul_rif))
- return PTR_ERR(ul_rif);
+ if (IS_ERR(ul_rif)) {
+ err = PTR_ERR(ul_rif);
+ goto out;
+ }
*ul_rif_index = ul_rif->rif_index;
-
- return 0;
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
+ return err;
}
void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
{
struct mlxsw_sp_rif *ul_rif;
- ASSERT_RTNL();
-
+ mutex_lock(&mlxsw_sp->router->lock);
ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
if (WARN_ON(!ul_rif))
- return;
+ goto out;
mlxsw_sp_ul_rif_put(ul_rif);
+out:
+ mutex_unlock(&mlxsw_sp->router->lock);
}
static int
@@ -8004,6 +8068,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
if (!router)
return -ENOMEM;
+ mutex_init(&router->lock);
mlxsw_sp->router = router;
router->mlxsw_sp = mlxsw_sp;
@@ -8107,6 +8172,7 @@ err_router_init:
err_register_inet6addr_notifier:
unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
+ mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
return err;
}
@@ -8127,5 +8193,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
__mlxsw_sp_router_fini(mlxsw_sp);
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
+ mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index c9b94f435cdd..8418dc3ae967 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -7,6 +7,49 @@
#include "spectrum.h"
#include "reg.h"
+struct mlxsw_sp_router_nve_decap {
+ u32 ul_tb_id;
+ u32 tunnel_index;
+ enum mlxsw_sp_l3proto ul_proto;
+ union mlxsw_sp_l3addr ul_sip;
+ u8 valid:1;
+};
+
+struct mlxsw_sp_router {
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif **rifs;
+ struct mlxsw_sp_vr *vrs;
+ struct rhashtable neigh_ht;
+ struct rhashtable nexthop_group_ht;
+ struct rhashtable nexthop_ht;
+ struct list_head nexthop_list;
+ struct {
+ /* One tree for each protocol: IPv4 and IPv6 */
+ struct mlxsw_sp_lpm_tree *proto_trees[2];
+ struct mlxsw_sp_lpm_tree *trees;
+ unsigned int tree_count;
+ } lpm;
+ struct {
+ struct delayed_work dw;
+ unsigned long interval; /* ms */
+ } neighs_update;
+ struct delayed_work nexthop_probe_dw;
+#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
+ struct list_head nexthop_neighs_list;
+ struct list_head ipip_list;
+ bool aborted;
+ struct notifier_block fib_nb;
+ struct notifier_block netevent_nb;
+ struct notifier_block inetaddr_nb;
+ struct notifier_block inet6addr_nb;
+ const struct mlxsw_sp_rif_ops **rif_ops_arr;
+ const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
+ u32 adj_discard_index;
+ bool adj_discard_index_valid;
+ struct mlxsw_sp_router_nve_decap nve_decap_config;
+ struct mutex lock; /* Protects shared router resources */
+};
+
struct mlxsw_sp_rif_ipip_lb;
struct mlxsw_sp_rif_ipip_lb_config {
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 0cdd7954a085..9fb2e9d93929 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -3,6 +3,8 @@
#include <linux/if_bridge.h>
#include <linux/list.h>
+#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
#include <net/arp.h>
#include <net/gre.h>
#include <net/lag.h>
@@ -14,38 +16,43 @@
#include "spectrum_span.h"
#include "spectrum_switchdev.h"
+struct mlxsw_sp_span {
+ struct work_struct work;
+ struct mlxsw_sp *mlxsw_sp;
+ atomic_t active_entries_count;
+ int entries_count;
+ struct mlxsw_sp_span_entry entries[0];
+};
+
+static void mlxsw_sp_span_respin_work(struct work_struct *work);
+
static u64 mlxsw_sp_span_occ_get(void *priv)
{
const struct mlxsw_sp *mlxsw_sp = priv;
- u64 occ = 0;
- int i;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- if (mlxsw_sp->span.entries[i].ref_count)
- occ++;
- }
-
- return occ;
+ return atomic_read(&mlxsw_sp->span->active_entries_count);
}
int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- int i;
+ struct mlxsw_sp_span *span;
+ int i, entries_count;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
return -EIO;
- mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- MAX_SPAN);
- mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
- sizeof(struct mlxsw_sp_span_entry),
- GFP_KERNEL);
- if (!mlxsw_sp->span.entries)
+ entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN);
+ span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL);
+ if (!span)
return -ENOMEM;
+ span->entries_count = entries_count;
+ atomic_set(&span->active_entries_count, 0);
+ span->mlxsw_sp = mlxsw_sp;
+ mlxsw_sp->span = span;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
INIT_LIST_HEAD(&curr->bound_ports_list);
curr->id = i;
@@ -53,6 +60,7 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
mlxsw_sp_span_occ_get, mlxsw_sp);
+ INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
return 0;
}
@@ -62,14 +70,15 @@ void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
+ cancel_work_sync(&mlxsw_sp->span->work);
devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
}
- kfree(mlxsw_sp->span.entries);
+ kfree(mlxsw_sp->span);
}
static int
@@ -645,15 +654,16 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
int i;
/* find a free entry to use */
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- if (!mlxsw_sp->span.entries[i].ref_count) {
- span_entry = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ if (!mlxsw_sp->span->entries[i].ref_count) {
+ span_entry = &mlxsw_sp->span->entries[i];
break;
}
}
if (!span_entry)
return NULL;
+ atomic_inc(&mlxsw_sp->span->active_entries_count);
span_entry->ops = ops;
span_entry->ref_count = 1;
span_entry->to_dev = to_dev;
@@ -662,9 +672,11 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
return span_entry;
}
-static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry)
+static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry)
{
mlxsw_sp_span_entry_deconfigure(span_entry);
+ atomic_dec(&mlxsw_sp->span->active_entries_count);
}
struct mlxsw_sp_span_entry *
@@ -673,8 +685,8 @@ mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
{
int i;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
if (curr->ref_count && curr->to_dev == to_dev)
return curr;
@@ -694,8 +706,8 @@ mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
{
int i;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
if (curr->ref_count && curr->id == span_id)
return curr;
@@ -726,7 +738,7 @@ static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
{
WARN_ON(!span_entry->ref_count);
if (--span_entry->ref_count == 0)
- mlxsw_sp_span_entry_destroy(span_entry);
+ mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
return 0;
}
@@ -736,8 +748,8 @@ static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
struct mlxsw_sp_span_inspected_port *p;
int i;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
list_for_each_entry(p, &curr->bound_ports_list, list)
if (p->local_port == port->local_port &&
@@ -842,9 +854,9 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
* so if a binding is requested, check for conflicts.
*/
if (bind)
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr =
- &mlxsw_sp->span.entries[i];
+ &mlxsw_sp->span->entries[i];
if (mlxsw_sp_span_entry_bound_port_find(curr, type,
port, bind))
@@ -988,14 +1000,18 @@ void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
}
-void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_span_respin_work(struct work_struct *work)
{
- int i;
- int err;
+ struct mlxsw_sp_span *span;
+ struct mlxsw_sp *mlxsw_sp;
+ int i, err;
- ASSERT_RTNL();
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ span = container_of(work, struct mlxsw_sp_span, work);
+ mlxsw_sp = span->mlxsw_sp;
+
+ rtnl_lock();
+ for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
struct mlxsw_sp_span_parms sparms = {NULL};
if (!curr->ref_count)
@@ -1010,4 +1026,12 @@ void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
}
}
+ rtnl_unlock();
+}
+
+void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
+{
+ if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
+ return;
+ mlxsw_core_schedule_work(&mlxsw_sp->span->work);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index a3af171c6358..a26162b08b7d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -153,16 +153,64 @@ static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp);
}
+static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev, *stop_dev;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev) && netif_running(dev)) {
+ err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp,
+ br_dev, dev, 0,
+ extack);
+ if (err) {
+ stop_dev = dev;
+ goto err_vxlan_join;
+ }
+ }
+ }
+
+ return 0;
+
+err_vxlan_join:
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev) && netif_running(dev)) {
+ if (stop_dev == dev)
+ break;
+ mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
+ }
+ }
+ return err;
+}
+
+static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
+ struct net_device *br_dev)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ if (netif_is_vxlan(dev) && netif_running(dev))
+ mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
+ }
+}
+
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
- struct net_device *br_dev)
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
{
struct device *dev = bridge->mlxsw_sp->bus_info->dev;
struct mlxsw_sp_bridge_device *bridge_device;
bool vlan_enabled = br_vlan_enabled(br_dev);
+ int err;
if (vlan_enabled && bridge->vlan_enabled_exists) {
dev_err(dev, "Only one VLAN-aware bridge is supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported");
return ERR_PTR(-EINVAL);
}
@@ -184,13 +232,29 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
INIT_LIST_HEAD(&bridge_device->mids_list);
list_add(&bridge_device->list, &bridge->bridges_list);
+ /* It is possible we already have VXLAN devices enslaved to the bridge.
+ * In which case, we need to replay their configuration as if they were
+ * just now enslaved to the bridge.
+ */
+ err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack);
+ if (err)
+ goto err_vxlan_init;
+
return bridge_device;
+
+err_vxlan_init:
+ list_del(&bridge_device->list);
+ if (bridge_device->vlan_enabled)
+ bridge->vlan_enabled_exists = false;
+ kfree(bridge_device);
+ return ERR_PTR(err);
}
static void
mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
struct mlxsw_sp_bridge_device *bridge_device)
{
+ mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev);
mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
bridge_device->dev);
list_del(&bridge_device->list);
@@ -203,7 +267,8 @@ mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
- struct net_device *br_dev)
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_bridge_device *bridge_device;
@@ -211,7 +276,7 @@ mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
if (bridge_device)
return bridge_device;
- return mlxsw_sp_bridge_device_create(bridge, br_dev);
+ return mlxsw_sp_bridge_device_create(bridge, br_dev, extack);
}
static void
@@ -292,7 +357,8 @@ mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
- struct net_device *brport_dev)
+ struct net_device *brport_dev,
+ struct netlink_ext_ack *extack)
{
struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
struct mlxsw_sp_bridge_device *bridge_device;
@@ -305,7 +371,7 @@ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
return bridge_port;
}
- bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
+ bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack);
if (IS_ERR(bridge_device))
return ERR_CAST(bridge_device);
@@ -1000,7 +1066,7 @@ mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
&bridge_vlan->port_vlan_list);
mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
- bridge_port->dev);
+ bridge_port->dev, extack);
mlxsw_sp_port_vlan->bridge_port = bridge_port;
return 0;
@@ -1107,16 +1173,12 @@ mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev,
const struct switchdev_obj_port_vlan *vlan)
{
- struct mlxsw_sp_rif *rif;
- struct mlxsw_sp_fid *fid;
u16 pvid;
u16 vid;
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
- if (!rif)
+ pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
+ if (!pvid)
return 0;
- fid = mlxsw_sp_rif_fid(rif);
- pvid = mlxsw_sp_fid_8021q_vid(fid);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
@@ -1712,36 +1774,6 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
-struct mlxsw_sp_span_respin_work {
- struct work_struct work;
- struct mlxsw_sp *mlxsw_sp;
-};
-
-static void mlxsw_sp_span_respin_work(struct work_struct *work)
-{
- struct mlxsw_sp_span_respin_work *respin_work =
- container_of(work, struct mlxsw_sp_span_respin_work, work);
-
- rtnl_lock();
- mlxsw_sp_span_respin(respin_work->mlxsw_sp);
- rtnl_unlock();
- kfree(respin_work);
-}
-
-static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_span_respin_work *respin_work;
-
- respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
- if (!respin_work)
- return;
-
- INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
- respin_work->mlxsw_sp = mlxsw_sp;
-
- mlxsw_core_schedule_work(&respin_work->work);
-}
-
static int mlxsw_sp_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct switchdev_trans *trans,
@@ -1763,7 +1795,7 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
* call for later, so that the respin logic sees the
* updated bridge state.
*/
- mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
}
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
@@ -1916,7 +1948,7 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
break;
}
- mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
return err;
}
@@ -1990,12 +2022,11 @@ mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
return err;
}
- /* If no other port is member in the VLAN, then the FID does not exist.
- * NVE will be enabled on the FID once a port joins the VLAN
- */
- fid = mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
- if (!fid)
- return 0;
+ fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
+ if (IS_ERR(fid)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID");
+ return PTR_ERR(fid);
+ }
if (mlxsw_sp_fid_vni_is_set(fid)) {
NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
@@ -2007,11 +2038,6 @@ mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
if (err)
goto err_nve_fid_enable;
- /* The tunnel port does not hold a reference on the FID. Only
- * local ports and the router port
- */
- mlxsw_sp_fid_put(fid);
-
return 0;
err_nve_fid_enable:
@@ -2048,38 +2074,8 @@ mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid, struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
- struct net_device *vxlan_dev;
- struct mlxsw_sp_fid *fid;
- int err;
-
- fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
- if (IS_ERR(fid))
- return fid;
-
- if (mlxsw_sp_fid_vni_is_set(fid))
- return fid;
-
- /* Find the VxLAN device that has the specified VLAN configured as
- * PVID and egress untagged. There can be at most one such device
- */
- vxlan_dev = mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev,
- vid);
- if (!vxlan_dev)
- return fid;
-
- if (!netif_running(vxlan_dev))
- return fid;
-
- err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
- extack);
- if (err)
- goto err_vxlan_join;
-
- return fid;
-err_vxlan_join:
- mlxsw_sp_fid_put(fid);
- return ERR_PTR(err);
+ return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
}
static struct mlxsw_sp_fid *
@@ -2184,9 +2180,9 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_fid *fid;
int err;
- fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
- if (!fid) {
- NL_SET_ERR_MSG_MOD(extack, "Did not find a corresponding FID");
+ fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
+ if (IS_ERR(fid)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID");
return -EINVAL;
}
@@ -2200,11 +2196,6 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
if (err)
goto err_nve_fid_enable;
- /* The tunnel port does not hold a reference on the FID. Only
- * local ports and the router port
- */
- mlxsw_sp_fid_put(fid);
-
return 0;
err_nve_fid_enable:
@@ -2218,34 +2209,8 @@ mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid, struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
- struct net_device *vxlan_dev;
- struct mlxsw_sp_fid *fid;
- int err;
- fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
- if (IS_ERR(fid))
- return fid;
-
- if (mlxsw_sp_fid_vni_is_set(fid))
- return fid;
-
- vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev);
- if (!vxlan_dev)
- return fid;
-
- if (!netif_running(vxlan_dev))
- return fid;
-
- err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, 0,
- extack);
- if (err)
- goto err_vxlan_join;
-
- return fid;
-
-err_vxlan_join:
- mlxsw_sp_fid_put(fid);
- return ERR_PTR(err);
+ return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
}
static struct mlxsw_sp_fid *
@@ -2287,7 +2252,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port;
int err;
- bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
+ bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev,
+ extack);
if (IS_ERR(bridge_port))
return PTR_ERR(bridge_port);
bridge_device = bridge_port->bridge_device;
@@ -2351,21 +2317,11 @@ void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
return;
mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
+ /* Drop both the reference we just took during lookup and the reference
+ * the VXLAN device took.
+ */
+ mlxsw_sp_fid_put(fid);
mlxsw_sp_fid_put(fid);
-}
-
-struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
- u16 vid,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_bridge_device *bridge_device;
-
- bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
- if (WARN_ON(!bridge_device))
- return ERR_PTR(-EINVAL);
-
- return bridge_device->ops->fid_get(bridge_device, vid, extack);
}
static void
@@ -2718,19 +2674,24 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
}
}
-static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
+ bool no_delay)
{
struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
+ unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
- msecs_to_jiffies(bridge->fdb_notify.interval));
+ msecs_to_jiffies(interval));
}
+#define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
+
static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
{
struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp *mlxsw_sp;
char *sfn_pl;
+ int queries;
u8 num_rec;
int i;
int err;
@@ -2743,20 +2704,26 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
mlxsw_sp = bridge->mlxsw_sp;
rtnl_lock();
- mlxsw_reg_sfn_pack(sfn_pl);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
- if (err) {
- dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
- goto out;
+ queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
+ while (queries > 0) {
+ mlxsw_reg_sfn_pack(sfn_pl);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
+ if (err) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
+ goto out;
+ }
+ num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
+ for (i = 0; i < num_rec; i++)
+ mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
+ if (num_rec != MLXSW_REG_SFN_REC_MAX_COUNT)
+ goto out;
+ queries--;
}
- num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
- for (i = 0; i < num_rec; i++)
- mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
out:
rtnl_unlock();
kfree(sfn_pl);
- mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
+ mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
}
struct mlxsw_sp_switchdev_event_work {
@@ -3502,7 +3469,7 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
- mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
+ mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, false);
return 0;
err_register_switchdev_blocking_notifier:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 60205aa3f6a5..9096ffd89e50 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -1,13 +1,16 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+#include <linux/bitops.h>
#include <linux/kernel.h>
+#include <linux/netlink.h>
#include <net/devlink.h>
#include <uapi/linux/devlink.h>
#include "core.h"
#include "reg.h"
#include "spectrum.h"
+#include "spectrum_trap.h"
/* All driver-specific traps must be documented in
* Documentation/networking/devlink/mlxsw.rst
@@ -25,36 +28,166 @@ enum {
#define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
+static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+
+ if (unlikely(!mlxsw_sp_port)) {
+ dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
+ local_port);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ skb->dev = mlxsw_sp_port->dev;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ return 0;
+}
+
static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
- void *priv);
+ void *trap_ctx)
+{
+ struct devlink_port *in_devlink_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+ int err;
+
+ mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ err = mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port);
+ if (err)
+ return;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ local_port);
+ skb_push(skb, ETH_HLEN);
+ devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port, NULL);
+ consume_skb(skb);
+}
+
+static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ u32 cookie_index = mlxsw_skb_cb(skb)->cookie_index;
+ const struct flow_action_cookie *fa_cookie;
+ struct devlink_port *in_devlink_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+ int err;
+
+ mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ err = mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port);
+ if (err)
+ return;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ local_port);
+ skb_push(skb, ETH_HLEN);
+ rcu_read_lock();
+ fa_cookie = mlxsw_sp_acl_act_cookie_lookup(mlxsw_sp, cookie_index);
+ devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port, fa_cookie);
+ rcu_read_unlock();
+ consume_skb(skb);
+}
+
static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
- void *trap_ctx);
+ void *trap_ctx)
+{
+ struct devlink_port *in_devlink_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+ int err;
+
+ mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ err = mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port);
+ if (err)
+ return;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ local_port);
+ skb_push(skb, ETH_HLEN);
+ devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port, NULL);
+ skb_pull(skb, ETH_HLEN);
+ skb->offload_fwd_mark = 1;
+ netif_receive_skb(skb);
+}
#define MLXSW_SP_TRAP_DROP(_id, _group_id) \
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
- DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
MLXSW_SP_TRAP_METADATA)
+#define MLXSW_SP_TRAP_DROP_EXT(_id, _group_id, _metadata) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ MLXSW_SP_TRAP_METADATA | (_metadata))
+
#define MLXSW_SP_TRAP_DRIVER_DROP(_id, _group_id) \
DEVLINK_TRAP_DRIVER(DROP, DROP, DEVLINK_MLXSW_TRAP_ID_##_id, \
DEVLINK_MLXSW_TRAP_NAME_##_id, \
- DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
MLXSW_SP_TRAP_METADATA)
#define MLXSW_SP_TRAP_EXCEPTION(_id, _group_id) \
DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
- DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
MLXSW_SP_TRAP_METADATA)
#define MLXSW_SP_RXL_DISCARD(_id, _group_id) \
- MLXSW_RXL(mlxsw_sp_rx_drop_listener, DISCARD_##_id, SET_FW_DEFAULT, \
- false, SP_##_group_id, DISCARD)
+ MLXSW_RXL_DIS(mlxsw_sp_rx_drop_listener, DISCARD_##_id, \
+ TRAP_EXCEPTION_TO_CPU, false, SP_##_group_id, \
+ SET_FW_DEFAULT, SP_##_group_id)
+
+#define MLXSW_SP_RXL_ACL_DISCARD(_id, _en_group_id, _dis_group_id) \
+ MLXSW_RXL_DIS(mlxsw_sp_rx_acl_drop_listener, DISCARD_##_id, \
+ TRAP_EXCEPTION_TO_CPU, false, SP_##_en_group_id, \
+ SET_FW_DEFAULT, SP_##_dis_group_id)
#define MLXSW_SP_RXL_EXCEPTION(_id, _group_id, _action) \
MLXSW_RXL(mlxsw_sp_rx_exception_listener, _id, \
- _action, false, SP_##_group_id, DISCARD)
+ _action, false, SP_##_group_id, SET_FW_DEFAULT)
+
+#define MLXSW_SP_TRAP_POLICER(_id, _rate, _burst) \
+ DEVLINK_TRAP_POLICER(_id, _rate, _burst, \
+ MLXSW_REG_QPCR_HIGHEST_CIR, \
+ MLXSW_REG_QPCR_LOWEST_CIR, \
+ 1 << MLXSW_REG_QPCR_HIGHEST_CBS, \
+ 1 << MLXSW_REG_QPCR_LOWEST_CBS)
+
+/* Ordered by policer identifier */
+static const struct devlink_trap_policer mlxsw_sp_trap_policers_arr[] = {
+ MLXSW_SP_TRAP_POLICER(1, 10 * 1024, 128),
+};
-static struct devlink_trap mlxsw_sp_traps_arr[] = {
+static const struct devlink_trap_group mlxsw_sp_trap_groups_arr[] = {
+ DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 1),
+ DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 1),
+ DEVLINK_TRAP_GROUP_GENERIC(TUNNEL_DROPS, 1),
+ DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 1),
+};
+
+static const struct devlink_trap mlxsw_sp_traps_arr[] = {
MLXSW_SP_TRAP_DROP(SMAC_MC, L2_DROPS),
MLXSW_SP_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
MLXSW_SP_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
@@ -83,9 +216,13 @@ static struct devlink_trap mlxsw_sp_traps_arr[] = {
MLXSW_SP_TRAP_DROP(NON_ROUTABLE, L3_DROPS),
MLXSW_SP_TRAP_EXCEPTION(DECAP_ERROR, TUNNEL_DROPS),
MLXSW_SP_TRAP_DROP(OVERLAY_SMAC_MC, TUNNEL_DROPS),
+ MLXSW_SP_TRAP_DROP_EXT(INGRESS_FLOW_ACTION_DROP, ACL_DROPS,
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
+ MLXSW_SP_TRAP_DROP_EXT(EGRESS_FLOW_ACTION_DROP, ACL_DROPS,
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
};
-static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
+static const struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
MLXSW_SP_RXL_DISCARD(ING_PACKET_SMAC_MC, L2_DISCARDS),
MLXSW_SP_RXL_DISCARD(ING_SWITCH_VTAG_ALLOW, L2_DISCARDS),
MLXSW_SP_RXL_DISCARD(ING_SWITCH_VLAN, L2_DISCARDS),
@@ -103,34 +240,37 @@ static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
MLXSW_SP_RXL_DISCARD(ING_ROUTER_IPV4_SIP_BC, L3_DISCARDS),
MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_RESERVED_SCOPE, L3_DISCARDS),
MLXSW_SP_RXL_DISCARD(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, L3_DISCARDS),
- MLXSW_SP_RXL_EXCEPTION(MTUERROR, ROUTER_EXP, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(TTLERROR, ROUTER_EXP, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(RPF, RPF, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(RTR_INGRESS1, REMOTE_ROUTE, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV4, HOST_MISS, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV6, HOST_MISS, TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER3, REMOTE_ROUTE,
+ MLXSW_SP_RXL_EXCEPTION(MTUERROR, L3_DISCARDS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(TTLERROR, L3_DISCARDS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(RPF, L3_DISCARDS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(RTR_INGRESS1, L3_DISCARDS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV4, L3_DISCARDS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV6, L3_DISCARDS, TRAP_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER3, L3_DISCARDS,
TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM4, ROUTER_EXP,
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM4, L3_DISCARDS,
TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM6, ROUTER_EXP,
+ MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER_LPM6, L3_DISCARDS,
TRAP_EXCEPTION_TO_CPU),
MLXSW_SP_RXL_DISCARD(ROUTER_IRIF_EN, L3_DISCARDS),
MLXSW_SP_RXL_DISCARD(ROUTER_ERIF_EN, L3_DISCARDS),
MLXSW_SP_RXL_DISCARD(NON_ROUTABLE, L3_DISCARDS),
- MLXSW_SP_RXL_EXCEPTION(DECAP_ECN0, ROUTER_EXP, TRAP_EXCEPTION_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(IPIP_DECAP_ERROR, ROUTER_EXP,
+ MLXSW_SP_RXL_EXCEPTION(DECAP_ECN0, TUNNEL_DISCARDS,
+ TRAP_EXCEPTION_TO_CPU),
+ MLXSW_SP_RXL_EXCEPTION(IPIP_DECAP_ERROR, TUNNEL_DISCARDS,
TRAP_EXCEPTION_TO_CPU),
MLXSW_SP_RXL_EXCEPTION(DISCARD_DEC_PKT, TUNNEL_DISCARDS,
TRAP_EXCEPTION_TO_CPU),
MLXSW_SP_RXL_DISCARD(OVERLAY_SMAC_MC, TUNNEL_DISCARDS),
+ MLXSW_SP_RXL_ACL_DISCARD(INGRESS_ACL, ACL_DISCARDS, DUMMY),
+ MLXSW_SP_RXL_ACL_DISCARD(EGRESS_ACL, ACL_DISCARDS, DUMMY),
};
/* Mapping between hardware trap and devlink trap. Multiple hardware traps can
* be mapped to the same devlink trap. Order is according to
* 'mlxsw_sp_listeners_arr'.
*/
-static u16 mlxsw_sp_listener_devlink_map[] = {
+static const u16 mlxsw_sp_listener_devlink_map[] = {
DEVLINK_TRAP_GENERIC_ID_SMAC_MC,
DEVLINK_TRAP_GENERIC_ID_VLAN_TAG_MISMATCH,
DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER,
@@ -164,99 +304,168 @@ static u16 mlxsw_sp_listener_devlink_map[] = {
DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC,
+ DEVLINK_TRAP_GENERIC_ID_INGRESS_FLOW_ACTION_DROP,
+ DEVLINK_TRAP_GENERIC_ID_EGRESS_FLOW_ACTION_DROP,
};
-static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port,
- struct mlxsw_sp_port *mlxsw_sp_port)
+#define MLXSW_SP_THIN_POLICER_ID (MLXSW_REG_HTGT_TRAP_GROUP_MAX + 1)
+
+static struct mlxsw_sp_trap_policer_item *
+mlxsw_sp_trap_policer_item_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
{
- struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+ struct mlxsw_sp_trap_policer_item *policer_item;
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
- if (unlikely(!mlxsw_sp_port)) {
- dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
- local_port);
- kfree_skb(skb);
- return -EINVAL;
+ list_for_each_entry(policer_item, &trap->policer_item_list, list) {
+ if (policer_item->id == id)
+ return policer_item;
}
- skb->dev = mlxsw_sp_port->dev;
+ return NULL;
+}
- pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
- u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->rx_packets++;
- pcpu_stats->rx_bytes += skb->len;
- u64_stats_update_end(&pcpu_stats->syncp);
+static int mlxsw_sp_trap_cpu_policers_set(struct mlxsw_sp *mlxsw_sp)
+{
+ char qpcr_pl[MLXSW_REG_QPCR_LEN];
- skb->protocol = eth_type_trans(skb, skb->dev);
+ /* The purpose of "thin" policer is to drop as many packets
+ * as possible. The dummy group is using it.
+ */
+ __set_bit(MLXSW_SP_THIN_POLICER_ID, mlxsw_sp->trap->policers_usage);
+ mlxsw_reg_qpcr_pack(qpcr_pl, MLXSW_SP_THIN_POLICER_ID,
+ MLXSW_REG_QPCR_IR_UNITS_M, false, 1, 4);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
+}
- return 0;
+static int mlxsw_sp_trap_dummy_group_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SP_DUMMY,
+ MLXSW_SP_THIN_POLICER_ID, 0, 1);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
}
-static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
- void *trap_ctx)
+static int mlxsw_sp_trap_policers_init(struct mlxsw_sp *mlxsw_sp)
{
- struct devlink_port *in_devlink_port;
- struct mlxsw_sp_port *mlxsw_sp_port;
- struct mlxsw_sp *mlxsw_sp;
- struct devlink *devlink;
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ u64 free_policers = 0;
+ u32 last_id = 0;
+ int err, i;
- mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
- mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ for_each_clear_bit(i, trap->policers_usage, trap->max_policers)
+ free_policers++;
- if (mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port))
- return;
+ if (ARRAY_SIZE(mlxsw_sp_trap_policers_arr) > free_policers) {
+ dev_err(mlxsw_sp->bus_info->dev, "Exceeded number of supported packet trap policers\n");
+ return -ENOBUFS;
+ }
- devlink = priv_to_devlink(mlxsw_sp->core);
- in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
- local_port);
- skb_push(skb, ETH_HLEN);
- devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
- consume_skb(skb);
-}
+ trap->policers_arr = kcalloc(free_policers,
+ sizeof(struct devlink_trap_policer),
+ GFP_KERNEL);
+ if (!trap->policers_arr)
+ return -ENOMEM;
+
+ trap->policers_count = free_policers;
+
+ for (i = 0; i < free_policers; i++) {
+ const struct devlink_trap_policer *policer;
+
+ if (i < ARRAY_SIZE(mlxsw_sp_trap_policers_arr)) {
+ policer = &mlxsw_sp_trap_policers_arr[i];
+ trap->policers_arr[i] = *policer;
+ last_id = policer->id;
+ } else {
+ /* Use parameters set for first policer and override
+ * relevant ones.
+ */
+ policer = &mlxsw_sp_trap_policers_arr[0];
+ trap->policers_arr[i] = *policer;
+ trap->policers_arr[i].id = ++last_id;
+ trap->policers_arr[i].init_rate = 1;
+ trap->policers_arr[i].init_burst = 16;
+ }
+ }
-static void mlxsw_sp_rx_exception_listener(struct sk_buff *skb, u8 local_port,
- void *trap_ctx)
-{
- struct devlink_port *in_devlink_port;
- struct mlxsw_sp_port *mlxsw_sp_port;
- struct mlxsw_sp *mlxsw_sp;
- struct devlink *devlink;
+ INIT_LIST_HEAD(&trap->policer_item_list);
- mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
- mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ err = devlink_trap_policers_register(devlink, trap->policers_arr,
+ trap->policers_count);
+ if (err)
+ goto err_trap_policers_register;
- if (mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port))
- return;
+ return 0;
- devlink = priv_to_devlink(mlxsw_sp->core);
- in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
- local_port);
- skb_push(skb, ETH_HLEN);
- devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
- skb_pull(skb, ETH_HLEN);
- skb->offload_fwd_mark = 1;
- netif_receive_skb(skb);
+err_trap_policers_register:
+ kfree(trap->policers_arr);
+ return err;
+}
+
+static void mlxsw_sp_trap_policers_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+
+ devlink_trap_policers_unregister(devlink, trap->policers_arr,
+ trap->policers_count);
+ WARN_ON(!list_empty(&trap->policer_item_list));
+ kfree(trap->policers_arr);
}
int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
{
+ size_t groups_count = ARRAY_SIZE(mlxsw_sp_trap_groups_arr);
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ int err;
+
+ err = mlxsw_sp_trap_cpu_policers_set(mlxsw_sp);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_trap_dummy_group_init(mlxsw_sp);
+ if (err)
+ return err;
if (WARN_ON(ARRAY_SIZE(mlxsw_sp_listener_devlink_map) !=
ARRAY_SIZE(mlxsw_sp_listeners_arr)))
return -EINVAL;
- return devlink_traps_register(devlink, mlxsw_sp_traps_arr,
- ARRAY_SIZE(mlxsw_sp_traps_arr),
- mlxsw_sp);
+ err = mlxsw_sp_trap_policers_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ err = devlink_trap_groups_register(devlink, mlxsw_sp_trap_groups_arr,
+ groups_count);
+ if (err)
+ goto err_trap_groups_register;
+
+ err = devlink_traps_register(devlink, mlxsw_sp_traps_arr,
+ ARRAY_SIZE(mlxsw_sp_traps_arr), mlxsw_sp);
+ if (err)
+ goto err_traps_register;
+
+ return 0;
+
+err_traps_register:
+ devlink_trap_groups_unregister(devlink, mlxsw_sp_trap_groups_arr,
+ groups_count);
+err_trap_groups_register:
+ mlxsw_sp_trap_policers_fini(mlxsw_sp);
+ return err;
}
void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp)
{
+ size_t groups_count = ARRAY_SIZE(mlxsw_sp_trap_groups_arr);
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
devlink_traps_unregister(devlink, mlxsw_sp_traps_arr,
ARRAY_SIZE(mlxsw_sp_traps_arr));
+ devlink_trap_groups_unregister(devlink, mlxsw_sp_trap_groups_arr,
+ groups_count);
+ mlxsw_sp_trap_policers_fini(mlxsw_sp);
}
int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
@@ -265,7 +474,7 @@ int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
int i;
for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
- struct mlxsw_listener *listener;
+ const struct mlxsw_listener *listener;
int err;
if (mlxsw_sp_listener_devlink_map[i] != trap->id)
@@ -286,7 +495,7 @@ void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
int i;
for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
- struct mlxsw_listener *listener;
+ const struct mlxsw_listener *listener;
if (mlxsw_sp_listener_devlink_map[i] != trap->id)
continue;
@@ -303,27 +512,24 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
int i;
for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
- enum mlxsw_reg_hpkt_action hw_action;
- struct mlxsw_listener *listener;
+ const struct mlxsw_listener *listener;
+ bool enabled;
int err;
if (mlxsw_sp_listener_devlink_map[i] != trap->id)
continue;
listener = &mlxsw_sp_listeners_arr[i];
-
switch (action) {
case DEVLINK_TRAP_ACTION_DROP:
- hw_action = MLXSW_REG_HPKT_ACTION_SET_FW_DEFAULT;
+ enabled = false;
break;
case DEVLINK_TRAP_ACTION_TRAP:
- hw_action = MLXSW_REG_HPKT_ACTION_TRAP_EXCEPTION_TO_CPU;
+ enabled = true;
break;
default:
return -EINVAL;
}
-
- err = mlxsw_core_trap_action_set(mlxsw_core, listener,
- hw_action);
+ err = mlxsw_core_trap_state_set(mlxsw_core, listener, enabled);
if (err)
return err;
}
@@ -331,62 +537,34 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
return 0;
}
-#define MLXSW_SP_DISCARD_POLICER_ID (MLXSW_REG_HTGT_TRAP_GROUP_MAX + 1)
-
-static int
-mlxsw_sp_trap_group_policer_init(struct mlxsw_sp *mlxsw_sp,
- const struct devlink_trap_group *group)
-{
- enum mlxsw_reg_qpcr_ir_units ir_units;
- char qpcr_pl[MLXSW_REG_QPCR_LEN];
- u16 policer_id;
- u8 burst_size;
- bool is_bytes;
- u32 rate;
-
- switch (group->id) {
- case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS: /* fall through */
- case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS: /* fall through */
- case DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS:
- policer_id = MLXSW_SP_DISCARD_POLICER_ID;
- ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
- is_bytes = false;
- rate = 10 * 1024; /* 10Kpps */
- burst_size = 7;
- break;
- default:
- return -EINVAL;
- }
-
- mlxsw_reg_qpcr_pack(qpcr_pl, policer_id, ir_units, is_bytes, rate,
- burst_size);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
-}
-
static int
-__mlxsw_sp_trap_group_init(struct mlxsw_sp *mlxsw_sp,
- const struct devlink_trap_group *group)
+__mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group,
+ u32 policer_id)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u16 hw_policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
char htgt_pl[MLXSW_REG_HTGT_LEN];
u8 priority, tc, group_id;
- u16 policer_id;
switch (group->id) {
case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS:
group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS;
- policer_id = MLXSW_SP_DISCARD_POLICER_ID;
priority = 0;
tc = 1;
break;
case DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS:
group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_DISCARDS;
- policer_id = MLXSW_SP_DISCARD_POLICER_ID;
priority = 0;
tc = 1;
break;
case DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS:
group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS;
- policer_id = MLXSW_SP_DISCARD_POLICER_ID;
+ priority = 0;
+ tc = 1;
+ break;
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_DROPS:
+ group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS;
priority = 0;
tc = 1;
break;
@@ -394,23 +572,179 @@ __mlxsw_sp_trap_group_init(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
}
- mlxsw_reg_htgt_pack(htgt_pl, group_id, policer_id, priority, tc);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+ if (policer_id) {
+ struct mlxsw_sp_trap_policer_item *policer_item;
+
+ policer_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp,
+ policer_id);
+ if (WARN_ON(!policer_item))
+ return -EINVAL;
+ hw_policer_id = policer_item->hw_id;
+ }
+
+ mlxsw_reg_htgt_pack(htgt_pl, group_id, hw_policer_id, priority, tc);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ return __mlxsw_sp_trap_group_init(mlxsw_core, group,
+ group->init_policer_id);
+}
+
+int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer)
+{
+ u32 policer_id = policer ? policer->id : 0;
+
+ return __mlxsw_sp_trap_group_init(mlxsw_core, group, policer_id);
+}
+
+static struct mlxsw_sp_trap_policer_item *
+mlxsw_sp_trap_policer_item_init(struct mlxsw_sp *mlxsw_sp, u32 id)
+{
+ struct mlxsw_sp_trap_policer_item *policer_item;
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ u16 hw_id;
+
+ /* We should be able to allocate a policer because the number of
+ * policers we registered with devlink is in according with the number
+ * of available policers.
+ */
+ hw_id = find_first_zero_bit(trap->policers_usage, trap->max_policers);
+ if (WARN_ON(hw_id == trap->max_policers))
+ return ERR_PTR(-ENOBUFS);
+
+ policer_item = kzalloc(sizeof(*policer_item), GFP_KERNEL);
+ if (!policer_item)
+ return ERR_PTR(-ENOMEM);
+
+ __set_bit(hw_id, trap->policers_usage);
+ policer_item->hw_id = hw_id;
+ policer_item->id = id;
+ list_add_tail(&policer_item->list, &trap->policer_item_list);
+
+ return policer_item;
+}
+
+static void
+mlxsw_sp_trap_policer_item_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_trap_policer_item *policer_item)
+{
+ list_del(&policer_item->list);
+ __clear_bit(policer_item->hw_id, mlxsw_sp->trap->policers_usage);
+ kfree(policer_item);
+}
+
+static int mlxsw_sp_trap_policer_bs(u64 burst, u8 *p_burst_size,
+ struct netlink_ext_ack *extack)
+{
+ int bs = fls64(burst) - 1;
+
+ if (burst != (1 << bs)) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer burst size is not power of two");
+ return -EINVAL;
+ }
+
+ *p_burst_size = bs;
+
+ return 0;
+}
+
+static int __mlxsw_sp_trap_policer_set(struct mlxsw_sp *mlxsw_sp, u16 hw_id,
+ u64 rate, u64 burst, bool clear_counter,
+ struct netlink_ext_ack *extack)
+{
+ char qpcr_pl[MLXSW_REG_QPCR_LEN];
+ u8 burst_size;
int err;
- err = mlxsw_sp_trap_group_policer_init(mlxsw_sp, group);
+ err = mlxsw_sp_trap_policer_bs(burst, &burst_size, extack);
if (err)
return err;
- err = __mlxsw_sp_trap_group_init(mlxsw_sp, group);
+ mlxsw_reg_qpcr_pack(qpcr_pl, hw_id, MLXSW_REG_QPCR_IR_UNITS_M, false,
+ rate, burst_size);
+ mlxsw_reg_qpcr_clear_counter_set(qpcr_pl, clear_counter);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
+}
+
+int mlxsw_sp_trap_policer_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_trap_policer_item *policer_item;
+ int err;
+
+ policer_item = mlxsw_sp_trap_policer_item_init(mlxsw_sp, policer->id);
+ if (IS_ERR(policer_item))
+ return PTR_ERR(policer_item);
+
+ err = __mlxsw_sp_trap_policer_set(mlxsw_sp, policer_item->hw_id,
+ policer->init_rate,
+ policer->init_burst, true, NULL);
+ if (err)
+ goto err_trap_policer_set;
+
+ return 0;
+
+err_trap_policer_set:
+ mlxsw_sp_trap_policer_item_fini(mlxsw_sp, policer_item);
+ return err;
+}
+
+void mlxsw_sp_trap_policer_fini(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_trap_policer_item *policer_item;
+
+ policer_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp, policer->id);
+ if (WARN_ON(!policer_item))
+ return;
+
+ mlxsw_sp_trap_policer_item_fini(mlxsw_sp, policer_item);
+}
+
+int mlxsw_sp_trap_policer_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_trap_policer_item *policer_item;
+
+ policer_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp, policer->id);
+ if (WARN_ON(!policer_item))
+ return -EINVAL;
+
+ return __mlxsw_sp_trap_policer_set(mlxsw_sp, policer_item->hw_id,
+ rate, burst, false, extack);
+}
+
+int
+mlxsw_sp_trap_policer_counter_get(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_trap_policer_item *policer_item;
+ char qpcr_pl[MLXSW_REG_QPCR_LEN];
+ int err;
+
+ policer_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp, policer->id);
+ if (WARN_ON(!policer_item))
+ return -EINVAL;
+
+ mlxsw_reg_qpcr_pack(qpcr_pl, policer_item->hw_id,
+ MLXSW_REG_QPCR_IR_UNITS_M, false, 0, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
if (err)
return err;
+ *p_drops = mlxsw_reg_qpcr_violate_count_get(qpcr_pl);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
new file mode 100644
index 000000000000..8c54897ba173
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020 Mellanox Technologies. All rights reserved */
+
+#ifndef _MLXSW_SPECTRUM_TRAP_H
+#define _MLXSW_SPECTRUM_TRAP_H
+
+#include <linux/list.h>
+#include <net/devlink.h>
+
+struct mlxsw_sp_trap {
+ struct devlink_trap_policer *policers_arr; /* Registered policers */
+ u64 policers_count; /* Number of registered policers */
+ struct list_head policer_item_list;
+ u64 max_policers;
+ unsigned long policers_usage[]; /* Usage bitmap */
+};
+
+struct mlxsw_sp_trap_policer_item {
+ u16 hw_id;
+ u32 id;
+ struct list_head list; /* Member of policer_item_list */
+};
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index f0e98ec8f1ee..90535820b559 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -180,7 +180,7 @@ static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
if (err)
return err;
oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
- *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
+ *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 12e1fa998d42..eaa521b7561b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -102,6 +102,8 @@ enum {
MLXSW_TRAP_ID_ACL1 = 0x1C1,
/* Multicast trap used for routes with trap-and-forward action */
MLXSW_TRAP_ID_ACL2 = 0x1C2,
+ MLXSW_TRAP_ID_DISCARD_INGRESS_ACL = 0x1C3,
+ MLXSW_TRAP_ID_DISCARD_EGRESS_ACL = 0x1C4,
MLXSW_TRAP_ID_MAX = 0x1FF
};
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index d1444ba36e10..4fe6aedca22f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5694,7 +5694,7 @@ static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
* from the bridge.
*/
if ((hw->features & STP_SUPPORT) && !promiscuous &&
- (dev->priv_flags & IFF_BRIDGE_PORT)) {
+ netif_is_bridge_port(dev)) {
struct ksz_switch *sw = hw->ksz_switch;
int port = priv->port.first_port;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index d3b7373c5961..b4731df186f4 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -442,8 +442,23 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port,
ocelot_port_writel(ocelot_port, DEV_MAC_MODE_CFG_FDX_ENA |
mode, DEV_MAC_MODE_CFG);
- if (ocelot->ops->pcs_init)
- ocelot->ops->pcs_init(ocelot, port);
+ /* Disable HDX fast control */
+ ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS,
+ DEV_PORT_MISC);
+
+ /* SGMII only for now */
+ ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ PCS1G_MODE_CFG);
+ ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
+
+ /* Enable PCS */
+ ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
+
+ /* No aneg on SGMII */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG);
+
+ /* No loopback */
+ ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
/* Enable MAC module */
ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
@@ -1398,7 +1413,7 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
* a source for the other ports.
*/
for (p = 0; p < ocelot->num_phys_ports; p++) {
- if (p == ocelot->cpu || (ocelot->bridge_fwd_mask & BIT(p))) {
+ if (ocelot->bridge_fwd_mask & BIT(p)) {
unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(p);
for (i = 0; i < ocelot->num_phys_ports; i++) {
@@ -1413,18 +1428,10 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
}
}
- /* Avoid the NPI port from looping back to itself */
- if (p != ocelot->cpu)
- mask |= BIT(ocelot->cpu);
-
ocelot_write_rix(ocelot, mask,
ANA_PGID_PGID, PGID_SRC + p);
} else {
- /* Only the CPU port, this is compatible with link
- * aggregation.
- */
- ocelot_write_rix(ocelot,
- BIT(ocelot->cpu),
+ ocelot_write_rix(ocelot, 0,
ANA_PGID_PGID, PGID_SRC + p);
}
}
@@ -2178,13 +2185,25 @@ static int ocelot_init_timestamp(struct ocelot *ocelot)
/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
* The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
+ * In the special case that it's the NPI port that we're configuring, the
+ * length of the tag and optional prefix needs to be accounted for privately,
+ * in order to be able to sustain communication at the requested @sdu.
*/
-static void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
+void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
int atop_wm;
+ if (port == ocelot->npi) {
+ maxlen += OCELOT_TAG_LEN;
+
+ if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_SHORT)
+ maxlen += OCELOT_SHORT_PREFIX_LEN;
+ else if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_LONG)
+ maxlen += OCELOT_LONG_PREFIX_LEN;
+ }
+
ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
/* Set Pause WM hysteresis
@@ -2202,6 +2221,24 @@ static void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
SYS_ATOP, port);
ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
}
+EXPORT_SYMBOL(ocelot_port_set_maxlen);
+
+int ocelot_get_max_mtu(struct ocelot *ocelot, int port)
+{
+ int max_mtu = 65535 - ETH_HLEN - ETH_FCS_LEN;
+
+ if (port == ocelot->npi) {
+ max_mtu -= OCELOT_TAG_LEN;
+
+ if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_SHORT)
+ max_mtu -= OCELOT_SHORT_PREFIX_LEN;
+ else if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_LONG)
+ max_mtu -= OCELOT_LONG_PREFIX_LEN;
+ }
+
+ return max_mtu;
+}
+EXPORT_SYMBOL(ocelot_get_max_mtu);
void ocelot_init_port(struct ocelot *ocelot, int port)
{
@@ -2299,42 +2336,57 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
}
EXPORT_SYMBOL(ocelot_probe_port);
-void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
- enum ocelot_tag_prefix injection,
- enum ocelot_tag_prefix extraction)
+/* Configure and enable the CPU port module, which is a set of queues.
+ * If @npi contains a valid port index, the CPU port module is connected
+ * to the Node Processor Interface (NPI). This is the mode through which
+ * frames can be injected from and extracted to an external CPU,
+ * over Ethernet.
+ */
+void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
+ enum ocelot_tag_prefix injection,
+ enum ocelot_tag_prefix extraction)
{
- /* Configure and enable the CPU port. */
+ int cpu = ocelot->num_phys_ports;
+
+ ocelot->npi = npi;
+ ocelot->inj_prefix = injection;
+ ocelot->xtr_prefix = extraction;
+
+ /* The unicast destination PGID for the CPU port module is unused */
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
+ /* Instead set up a multicast destination PGID for traffic copied to
+ * the CPU. Whitelisted MAC addresses like the port netdevice MAC
+ * addresses will be copied to the CPU via this PGID.
+ */
ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
ANA_PORT_PORT_CFG, cpu);
- /* If the CPU port is a physical port, set up the port in Node
- * Processor Interface (NPI) mode. This is the mode through which
- * frames can be injected from and extracted to an external CPU.
- * Only one port can be an NPI at the same time.
- */
- if (cpu < ocelot->num_phys_ports) {
- int sdu = ETH_DATA_LEN + OCELOT_TAG_LEN;
-
+ if (npi >= 0 && npi < ocelot->num_phys_ports) {
ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
- QSYS_EXT_CPU_CFG_EXT_CPU_PORT(cpu),
+ QSYS_EXT_CPU_CFG_EXT_CPU_PORT(npi),
QSYS_EXT_CPU_CFG);
- if (injection == OCELOT_TAG_PREFIX_SHORT)
- sdu += OCELOT_SHORT_PREFIX_LEN;
- else if (injection == OCELOT_TAG_PREFIX_LONG)
- sdu += OCELOT_LONG_PREFIX_LEN;
-
- ocelot_port_set_maxlen(ocelot, cpu, sdu);
+ /* Enable NPI port */
+ ocelot_write_rix(ocelot,
+ QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
+ QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
+ QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE, npi);
+ /* NPI port Injection/Extraction configuration */
+ ocelot_write_rix(ocelot,
+ SYS_PORT_MODE_INCL_XTR_HDR(extraction) |
+ SYS_PORT_MODE_INCL_INJ_HDR(injection),
+ SYS_PORT_MODE, npi);
}
- /* CPU port Injection/Extraction configuration */
+ /* Enable CPU port module */
ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
QSYS_SWITCH_PORT_MODE_PORT_ENA,
QSYS_SWITCH_PORT_MODE, cpu);
+ /* CPU port Injection/Extraction configuration */
ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(extraction) |
SYS_PORT_MODE_INCL_INJ_HDR(injection),
SYS_PORT_MODE, cpu);
@@ -2344,10 +2396,8 @@ void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
ANA_PORT_VLAN_CFG, cpu);
-
- ocelot->cpu = cpu;
}
-EXPORT_SYMBOL(ocelot_set_cpu_port);
+EXPORT_SYMBOL(ocelot_configure_cpu);
int ocelot_init(struct ocelot *ocelot)
{
@@ -2499,7 +2549,6 @@ void ocelot_deinit(struct ocelot *ocelot)
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
mutex_destroy(&ocelot->stats_lock);
- ocelot_ace_deinit();
if (ocelot->ptp_clock)
ptp_clock_unregister(ocelot->ptp_clock);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 04372ba72fec..e34ef8380eb3 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -28,16 +28,6 @@
#include "ocelot_tc.h"
#include "ocelot_ptp.h"
-#define PGID_AGGR 64
-#define PGID_SRC 80
-
-/* Reserved PGIDs */
-#define PGID_CPU (PGID_AGGR - 5)
-#define PGID_UC (PGID_AGGR - 4)
-#define PGID_MC (PGID_AGGR - 3)
-#define PGID_MCIPV4 (PGID_AGGR - 2)
-#define PGID_MCIPV6 (PGID_AGGR - 1)
-
#define OCELOT_BUFFER_CELL_SZ 60
#define OCELOT_STATS_CHECK_DELAY (2 * HZ)
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c
index 86fc6e6b46dd..3bd286044480 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.c
+++ b/drivers/net/ethernet/mscc/ocelot_ace.c
@@ -6,60 +6,13 @@
#include <linux/iopoll.h>
#include <linux/proc_fs.h>
+#include <soc/mscc/ocelot_vcap.h>
+#include "ocelot_police.h"
#include "ocelot_ace.h"
-#include "ocelot_vcap.h"
#include "ocelot_s2.h"
#define OCELOT_POLICER_DISCARD 0x17f
-
-static struct ocelot_acl_block *acl_block;
-
-struct vcap_props {
- const char *name; /* Symbolic name */
- u16 tg_width; /* Type-group width (in bits) */
- u16 sw_count; /* Sub word count */
- u16 entry_count; /* Entry count */
- u16 entry_words; /* Number of entry words */
- u16 entry_width; /* Entry width (in bits) */
- u16 action_count; /* Action count */
- u16 action_words; /* Number of action words */
- u16 action_width; /* Action width (in bits) */
- u16 action_type_width; /* Action type width (in bits) */
- struct {
- u16 width; /* Action type width (in bits) */
- u16 count; /* Action type sub word count */
- } action_table[2];
- u16 counter_words; /* Number of counter words */
- u16 counter_width; /* Counter width (in bits) */
-};
-
#define ENTRY_WIDTH 32
-#define BITS_TO_32BIT(x) (1 + (((x) - 1) / ENTRY_WIDTH))
-
-static const struct vcap_props vcap_is2 = {
- .name = "IS2",
- .tg_width = 2,
- .sw_count = 4,
- .entry_count = VCAP_IS2_CNT,
- .entry_words = BITS_TO_32BIT(VCAP_IS2_ENTRY_WIDTH),
- .entry_width = VCAP_IS2_ENTRY_WIDTH,
- .action_count = (VCAP_IS2_CNT + VCAP_PORT_CNT + 2),
- .action_words = BITS_TO_32BIT(VCAP_IS2_ACTION_WIDTH),
- .action_width = (VCAP_IS2_ACTION_WIDTH),
- .action_type_width = 1,
- .action_table = {
- {
- .width = (IS2_AO_ACL_ID + IS2_AL_ACL_ID),
- .count = 2
- },
- {
- .width = 6,
- .count = 4
- },
- },
- .counter_words = BITS_TO_32BIT(4 * ENTRY_WIDTH),
- .counter_width = ENTRY_WIDTH,
-};
enum vcap_sel {
VCAP_SEL_ENTRY = 0x1,
@@ -95,18 +48,20 @@ struct vcap_data {
u32 tg_mask; /* Current type-group mask */
};
-static u32 vcap_s2_read_update_ctrl(struct ocelot *oc)
+static u32 vcap_s2_read_update_ctrl(struct ocelot *ocelot)
{
- return ocelot_read(oc, S2_CORE_UPDATE_CTRL);
+ return ocelot_read(ocelot, S2_CORE_UPDATE_CTRL);
}
-static void vcap_cmd(struct ocelot *oc, u16 ix, int cmd, int sel)
+static void vcap_cmd(struct ocelot *ocelot, u16 ix, int cmd, int sel)
{
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+
u32 value = (S2_CORE_UPDATE_CTRL_UPDATE_CMD(cmd) |
S2_CORE_UPDATE_CTRL_UPDATE_ADDR(ix) |
S2_CORE_UPDATE_CTRL_UPDATE_SHOT);
- if ((sel & VCAP_SEL_ENTRY) && ix >= vcap_is2.entry_count)
+ if ((sel & VCAP_SEL_ENTRY) && ix >= vcap_is2->entry_count)
return;
if (!(sel & VCAP_SEL_ENTRY))
@@ -118,83 +73,101 @@ static void vcap_cmd(struct ocelot *oc, u16 ix, int cmd, int sel)
if (!(sel & VCAP_SEL_COUNTER))
value |= S2_CORE_UPDATE_CTRL_UPDATE_CNT_DIS;
- ocelot_write(oc, value, S2_CORE_UPDATE_CTRL);
- readx_poll_timeout(vcap_s2_read_update_ctrl, oc, value,
+ ocelot_write(ocelot, value, S2_CORE_UPDATE_CTRL);
+ readx_poll_timeout(vcap_s2_read_update_ctrl, ocelot, value,
(value & S2_CORE_UPDATE_CTRL_UPDATE_SHOT) == 0,
10, 100000);
}
/* Convert from 0-based row to VCAP entry row and run command */
-static void vcap_row_cmd(struct ocelot *oc, u32 row, int cmd, int sel)
+static void vcap_row_cmd(struct ocelot *ocelot, u32 row, int cmd, int sel)
{
- vcap_cmd(oc, vcap_is2.entry_count - row - 1, cmd, sel);
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+
+ vcap_cmd(ocelot, vcap_is2->entry_count - row - 1, cmd, sel);
}
-static void vcap_entry2cache(struct ocelot *oc, struct vcap_data *data)
+static void vcap_entry2cache(struct ocelot *ocelot, struct vcap_data *data)
{
- u32 i;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 entry_words, i;
- for (i = 0; i < vcap_is2.entry_words; i++) {
- ocelot_write_rix(oc, data->entry[i], S2_CACHE_ENTRY_DAT, i);
- ocelot_write_rix(oc, ~data->mask[i], S2_CACHE_MASK_DAT, i);
+ entry_words = DIV_ROUND_UP(vcap_is2->entry_width, ENTRY_WIDTH);
+
+ for (i = 0; i < entry_words; i++) {
+ ocelot_write_rix(ocelot, data->entry[i], S2_CACHE_ENTRY_DAT, i);
+ ocelot_write_rix(ocelot, ~data->mask[i], S2_CACHE_MASK_DAT, i);
}
- ocelot_write(oc, data->tg, S2_CACHE_TG_DAT);
+ ocelot_write(ocelot, data->tg, S2_CACHE_TG_DAT);
}
-static void vcap_cache2entry(struct ocelot *oc, struct vcap_data *data)
+static void vcap_cache2entry(struct ocelot *ocelot, struct vcap_data *data)
{
- u32 i;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 entry_words, i;
+
+ entry_words = DIV_ROUND_UP(vcap_is2->entry_width, ENTRY_WIDTH);
- for (i = 0; i < vcap_is2.entry_words; i++) {
- data->entry[i] = ocelot_read_rix(oc, S2_CACHE_ENTRY_DAT, i);
+ for (i = 0; i < entry_words; i++) {
+ data->entry[i] = ocelot_read_rix(ocelot, S2_CACHE_ENTRY_DAT, i);
// Invert mask
- data->mask[i] = ~ocelot_read_rix(oc, S2_CACHE_MASK_DAT, i);
+ data->mask[i] = ~ocelot_read_rix(ocelot, S2_CACHE_MASK_DAT, i);
}
- data->tg = ocelot_read(oc, S2_CACHE_TG_DAT);
+ data->tg = ocelot_read(ocelot, S2_CACHE_TG_DAT);
}
-static void vcap_action2cache(struct ocelot *oc, struct vcap_data *data)
+static void vcap_action2cache(struct ocelot *ocelot, struct vcap_data *data)
{
- u32 i, width, mask;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 action_words, i, width, mask;
/* Encode action type */
- width = vcap_is2.action_type_width;
+ width = vcap_is2->action_type_width;
if (width) {
mask = GENMASK(width, 0);
data->action[0] = ((data->action[0] & ~mask) | data->type);
}
- for (i = 0; i < vcap_is2.action_words; i++)
- ocelot_write_rix(oc, data->action[i], S2_CACHE_ACTION_DAT, i);
+ action_words = DIV_ROUND_UP(vcap_is2->action_width, ENTRY_WIDTH);
- for (i = 0; i < vcap_is2.counter_words; i++)
- ocelot_write_rix(oc, data->counter[i], S2_CACHE_CNT_DAT, i);
+ for (i = 0; i < action_words; i++)
+ ocelot_write_rix(ocelot, data->action[i], S2_CACHE_ACTION_DAT,
+ i);
+
+ for (i = 0; i < vcap_is2->counter_words; i++)
+ ocelot_write_rix(ocelot, data->counter[i], S2_CACHE_CNT_DAT, i);
}
-static void vcap_cache2action(struct ocelot *oc, struct vcap_data *data)
+static void vcap_cache2action(struct ocelot *ocelot, struct vcap_data *data)
{
- u32 i, width;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 action_words, i, width;
+
+ action_words = DIV_ROUND_UP(vcap_is2->action_width, ENTRY_WIDTH);
- for (i = 0; i < vcap_is2.action_words; i++)
- data->action[i] = ocelot_read_rix(oc, S2_CACHE_ACTION_DAT, i);
+ for (i = 0; i < action_words; i++)
+ data->action[i] = ocelot_read_rix(ocelot, S2_CACHE_ACTION_DAT,
+ i);
- for (i = 0; i < vcap_is2.counter_words; i++)
- data->counter[i] = ocelot_read_rix(oc, S2_CACHE_CNT_DAT, i);
+ for (i = 0; i < vcap_is2->counter_words; i++)
+ data->counter[i] = ocelot_read_rix(ocelot, S2_CACHE_CNT_DAT, i);
/* Extract action type */
- width = vcap_is2.action_type_width;
+ width = vcap_is2->action_type_width;
data->type = (width ? (data->action[0] & GENMASK(width, 0)) : 0);
}
/* Calculate offsets for entry */
-static void is2_data_get(struct vcap_data *data, int ix)
+static void is2_data_get(struct ocelot *ocelot, struct vcap_data *data, int ix)
{
- u32 i, col, offset, count, cnt, base, width = vcap_is2.tg_width;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 i, col, offset, count, cnt, base;
+ u32 width = vcap_is2->tg_width;
count = (data->tg_sw == VCAP_TG_HALF ? 2 : 4);
col = (ix % 2);
- cnt = (vcap_is2.sw_count / count);
- base = (vcap_is2.sw_count - col * cnt - cnt);
+ cnt = (vcap_is2->sw_count / count);
+ base = (vcap_is2->sw_count - col * cnt - cnt);
data->tg_value = 0;
data->tg_mask = 0;
for (i = 0; i < cnt; i++) {
@@ -205,13 +178,13 @@ static void is2_data_get(struct vcap_data *data, int ix)
/* Calculate key/action/counter offsets */
col = (count - col - 1);
- data->key_offset = (base * vcap_is2.entry_width) / vcap_is2.sw_count;
- data->counter_offset = (cnt * col * vcap_is2.counter_width);
+ data->key_offset = (base * vcap_is2->entry_width) / vcap_is2->sw_count;
+ data->counter_offset = (cnt * col * vcap_is2->counter_width);
i = data->type;
- width = vcap_is2.action_table[i].width;
- cnt = vcap_is2.action_table[i].count;
+ width = vcap_is2->action_table[i].width;
+ cnt = vcap_is2->action_table[i].count;
data->action_offset =
- (((cnt * col * width) / count) + vcap_is2.action_type_width);
+ (((cnt * col * width) / count) + vcap_is2->action_type_width);
}
static void vcap_data_set(u32 *data, u32 offset, u32 len, u32 value)
@@ -242,22 +215,39 @@ static u32 vcap_data_get(u32 *data, u32 offset, u32 len)
return value;
}
-static void vcap_key_set(struct vcap_data *data, u32 offset, u32 width,
- u32 value, u32 mask)
+static void vcap_key_field_set(struct vcap_data *data, u32 offset, u32 width,
+ u32 value, u32 mask)
{
vcap_data_set(data->entry, offset + data->key_offset, width, value);
vcap_data_set(data->mask, offset + data->key_offset, width, mask);
}
-static void vcap_key_bytes_set(struct vcap_data *data, u32 offset, u8 *val,
- u8 *msk, u32 count)
+static void vcap_key_set(struct ocelot *ocelot, struct vcap_data *data,
+ enum vcap_is2_half_key_field field,
+ u32 value, u32 mask)
{
+ u32 offset = ocelot->vcap_is2_keys[field].offset;
+ u32 length = ocelot->vcap_is2_keys[field].length;
+
+ vcap_key_field_set(data, offset, length, value, mask);
+}
+
+static void vcap_key_bytes_set(struct ocelot *ocelot, struct vcap_data *data,
+ enum vcap_is2_half_key_field field,
+ u8 *val, u8 *msk)
+{
+ u32 offset = ocelot->vcap_is2_keys[field].offset;
+ u32 count = ocelot->vcap_is2_keys[field].length;
u32 i, j, n = 0, value = 0, mask = 0;
+ WARN_ON(count % 8);
+
/* Data wider than 32 bits are split up in chunks of maximum 32 bits.
* The 32 LSB of the data are written to the 32 MSB of the TCAM.
*/
- offset += (count * 8);
+ offset += count;
+ count /= 8;
+
for (i = 0; i < count; i++) {
j = (count - i - 1);
value += (val[j] << n);
@@ -265,7 +255,7 @@ static void vcap_key_bytes_set(struct vcap_data *data, u32 offset, u8 *val,
n += 8;
if (n == ENTRY_WIDTH || (i + 1) == count) {
offset -= n;
- vcap_key_set(data, offset, n, value, mask);
+ vcap_key_field_set(data, offset, n, value, mask);
n = 0;
value = 0;
mask = 0;
@@ -273,55 +263,71 @@ static void vcap_key_bytes_set(struct vcap_data *data, u32 offset, u8 *val,
}
}
-static void vcap_key_l4_port_set(struct vcap_data *data, u32 offset,
+static void vcap_key_l4_port_set(struct ocelot *ocelot, struct vcap_data *data,
+ enum vcap_is2_half_key_field field,
struct ocelot_vcap_udp_tcp *port)
{
- vcap_key_set(data, offset, 16, port->value, port->mask);
+ u32 offset = ocelot->vcap_is2_keys[field].offset;
+ u32 length = ocelot->vcap_is2_keys[field].length;
+
+ WARN_ON(length != 16);
+
+ vcap_key_field_set(data, offset, length, port->value, port->mask);
}
-static void vcap_key_bit_set(struct vcap_data *data, u32 offset,
+static void vcap_key_bit_set(struct ocelot *ocelot, struct vcap_data *data,
+ enum vcap_is2_half_key_field field,
enum ocelot_vcap_bit val)
{
- vcap_key_set(data, offset, 1, val == OCELOT_VCAP_BIT_1 ? 1 : 0,
- val == OCELOT_VCAP_BIT_ANY ? 0 : 1);
-}
+ u32 offset = ocelot->vcap_is2_keys[field].offset;
+ u32 length = ocelot->vcap_is2_keys[field].length;
+ u32 value = (val == OCELOT_VCAP_BIT_1 ? 1 : 0);
+ u32 msk = (val == OCELOT_VCAP_BIT_ANY ? 0 : 1);
-#define VCAP_KEY_SET(fld, val, msk) \
- vcap_key_set(&data, IS2_HKO_##fld, IS2_HKL_##fld, val, msk)
-#define VCAP_KEY_ANY_SET(fld) \
- vcap_key_set(&data, IS2_HKO_##fld, IS2_HKL_##fld, 0, 0)
-#define VCAP_KEY_BIT_SET(fld, val) vcap_key_bit_set(&data, IS2_HKO_##fld, val)
-#define VCAP_KEY_BYTES_SET(fld, val, msk) \
- vcap_key_bytes_set(&data, IS2_HKO_##fld, val, msk, IS2_HKL_##fld / 8)
+ WARN_ON(length != 1);
-static void vcap_action_set(struct vcap_data *data, u32 offset, u32 width,
- u32 value)
-{
- vcap_data_set(data->action, offset + data->action_offset, width, value);
+ vcap_key_field_set(data, offset, length, value, msk);
}
-#define VCAP_ACT_SET(fld, val) \
- vcap_action_set(data, IS2_AO_##fld, IS2_AL_##fld, val)
+static void vcap_action_set(struct ocelot *ocelot, struct vcap_data *data,
+ enum vcap_is2_action_field field, u32 value)
+{
+ int offset = ocelot->vcap_is2_actions[field].offset;
+ int length = ocelot->vcap_is2_actions[field].length;
+
+ vcap_data_set(data->action, offset + data->action_offset, length,
+ value);
+}
-static void is2_action_set(struct vcap_data *data,
- enum ocelot_ace_action action)
+static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data,
+ struct ocelot_ace_rule *ace)
{
- switch (action) {
+ switch (ace->action) {
case OCELOT_ACL_ACTION_DROP:
- VCAP_ACT_SET(PORT_MASK, 0x0);
- VCAP_ACT_SET(MASK_MODE, 0x1);
- VCAP_ACT_SET(POLICE_ENA, 0x1);
- VCAP_ACT_SET(POLICE_IDX, OCELOT_POLICER_DISCARD);
- VCAP_ACT_SET(CPU_QU_NUM, 0x0);
- VCAP_ACT_SET(CPU_COPY_ENA, 0x0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX,
+ OCELOT_POLICER_DISCARD);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0);
break;
case OCELOT_ACL_ACTION_TRAP:
- VCAP_ACT_SET(PORT_MASK, 0x0);
- VCAP_ACT_SET(MASK_MODE, 0x1);
- VCAP_ACT_SET(POLICE_ENA, 0x0);
- VCAP_ACT_SET(POLICE_IDX, 0x0);
- VCAP_ACT_SET(CPU_QU_NUM, 0x0);
- VCAP_ACT_SET(CPU_COPY_ENA, 0x1);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 1);
+ break;
+ case OCELOT_ACL_ACTION_POLICE:
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX,
+ ace->pol_ix);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
+ vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0);
break;
}
}
@@ -329,6 +335,7 @@ static void is2_action_set(struct vcap_data *data,
static void is2_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_ace_rule *ace)
{
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
u32 val, msk, type, type_mask = 0xf, i, count;
struct ocelot_ace_vlan *tag = &ace->vlan;
struct ocelot_vcap_u64 payload;
@@ -344,60 +351,76 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
vcap_cache2action(ocelot, &data);
data.tg_sw = VCAP_TG_HALF;
- is2_data_get(&data, ix);
+ is2_data_get(ocelot, &data, ix);
data.tg = (data.tg & ~data.tg_mask);
if (ace->prio != 0)
data.tg |= data.tg_value;
data.type = IS2_ACTION_TYPE_NORMAL;
- VCAP_KEY_ANY_SET(PAG);
- VCAP_KEY_SET(IGR_PORT_MASK, 0, ~BIT(ace->chip_port));
- VCAP_KEY_BIT_SET(FIRST, OCELOT_VCAP_BIT_1);
- VCAP_KEY_BIT_SET(HOST_MATCH, OCELOT_VCAP_BIT_ANY);
- VCAP_KEY_BIT_SET(L2_MC, ace->dmac_mc);
- VCAP_KEY_BIT_SET(L2_BC, ace->dmac_bc);
- VCAP_KEY_BIT_SET(VLAN_TAGGED, tag->tagged);
- VCAP_KEY_SET(VID, tag->vid.value, tag->vid.mask);
- VCAP_KEY_SET(PCP, tag->pcp.value[0], tag->pcp.mask[0]);
- VCAP_KEY_BIT_SET(DEI, tag->dei);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_PAG, 0, 0);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
+ ~ace->ingress_port_mask);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_1);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_HOST_MATCH,
+ OCELOT_VCAP_BIT_ANY);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_MC, ace->dmac_mc);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_BC, ace->dmac_bc);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_VLAN_TAGGED, tag->tagged);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_VID,
+ tag->vid.value, tag->vid.mask);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_PCP,
+ tag->pcp.value[0], tag->pcp.mask[0]);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_DEI, tag->dei);
switch (ace->type) {
case OCELOT_ACE_TYPE_ETYPE: {
struct ocelot_ace_frame_etype *etype = &ace->frame.etype;
type = IS2_TYPE_ETYPE;
- VCAP_KEY_BYTES_SET(L2_DMAC, etype->dmac.value,
- etype->dmac.mask);
- VCAP_KEY_BYTES_SET(L2_SMAC, etype->smac.value,
- etype->smac.mask);
- VCAP_KEY_BYTES_SET(MAC_ETYPE_ETYPE, etype->etype.value,
- etype->etype.mask);
- VCAP_KEY_ANY_SET(MAC_ETYPE_L2_PAYLOAD); // Clear unused bits
- vcap_key_bytes_set(&data, IS2_HKO_MAC_ETYPE_L2_PAYLOAD,
- etype->data.value, etype->data.mask, 2);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
+ etype->dmac.value, etype->dmac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC,
+ etype->smac.value, etype->smac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_ETYPE,
+ etype->etype.value, etype->etype.mask);
+ /* Clear unused bits */
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
+ 0, 0);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1,
+ 0, 0);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2,
+ 0, 0);
+ vcap_key_bytes_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
+ etype->data.value, etype->data.mask);
break;
}
case OCELOT_ACE_TYPE_LLC: {
struct ocelot_ace_frame_llc *llc = &ace->frame.llc;
type = IS2_TYPE_LLC;
- VCAP_KEY_BYTES_SET(L2_DMAC, llc->dmac.value, llc->dmac.mask);
- VCAP_KEY_BYTES_SET(L2_SMAC, llc->smac.value, llc->smac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
+ llc->dmac.value, llc->dmac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC,
+ llc->smac.value, llc->smac.mask);
for (i = 0; i < 4; i++) {
payload.value[i] = llc->llc.value[i];
payload.mask[i] = llc->llc.mask[i];
}
- VCAP_KEY_BYTES_SET(MAC_LLC_L2_LLC, payload.value, payload.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_LLC_L2_LLC,
+ payload.value, payload.mask);
break;
}
case OCELOT_ACE_TYPE_SNAP: {
struct ocelot_ace_frame_snap *snap = &ace->frame.snap;
type = IS2_TYPE_SNAP;
- VCAP_KEY_BYTES_SET(L2_DMAC, snap->dmac.value, snap->dmac.mask);
- VCAP_KEY_BYTES_SET(L2_SMAC, snap->smac.value, snap->smac.mask);
- VCAP_KEY_BYTES_SET(MAC_SNAP_L2_SNAP,
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
+ snap->dmac.value, snap->dmac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC,
+ snap->smac.value, snap->smac.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_SNAP_L2_SNAP,
ace->frame.snap.snap.value,
ace->frame.snap.snap.mask);
break;
@@ -406,26 +429,42 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_ace_frame_arp *arp = &ace->frame.arp;
type = IS2_TYPE_ARP;
- VCAP_KEY_BYTES_SET(MAC_ARP_L2_SMAC, arp->smac.value,
- arp->smac.mask);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_ADDR_SPACE_OK, arp->ethernet);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_PROTO_SPACE_OK, arp->ip);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_LEN_OK, arp->length);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_TGT_MATCH, arp->dmac_match);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_SENDER_MATCH, arp->smac_match);
- VCAP_KEY_BIT_SET(MAC_ARP_ARP_OPCODE_UNKNOWN, arp->unknown);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_SMAC,
+ arp->smac.value, arp->smac.mask);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK,
+ arp->ethernet);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK,
+ arp->ip);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_LEN_OK,
+ arp->length);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_TARGET_MATCH,
+ arp->dmac_match);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_SENDER_MATCH,
+ arp->smac_match);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN,
+ arp->unknown);
/* OPCODE is inverse, bit 0 is reply flag, bit 1 is RARP flag */
val = ((arp->req == OCELOT_VCAP_BIT_0 ? 1 : 0) |
(arp->arp == OCELOT_VCAP_BIT_0 ? 2 : 0));
msk = ((arp->req == OCELOT_VCAP_BIT_ANY ? 0 : 1) |
(arp->arp == OCELOT_VCAP_BIT_ANY ? 0 : 2));
- VCAP_KEY_SET(MAC_ARP_ARP_OPCODE, val, msk);
- vcap_key_bytes_set(&data, IS2_HKO_MAC_ARP_L3_IP4_DIP,
- arp->dip.value.addr, arp->dip.mask.addr, 4);
- vcap_key_bytes_set(&data, IS2_HKO_MAC_ARP_L3_IP4_SIP,
- arp->sip.value.addr, arp->sip.mask.addr, 4);
- VCAP_KEY_ANY_SET(MAC_ARP_DIP_EQ_SIP);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_OPCODE,
+ val, msk);
+ vcap_key_bytes_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP,
+ arp->dip.value.addr, arp->dip.mask.addr);
+ vcap_key_bytes_set(ocelot, &data,
+ VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP,
+ arp->sip.value.addr, arp->sip.mask.addr);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP,
+ 0, 0);
break;
}
case OCELOT_ACE_TYPE_IPV4:
@@ -493,18 +532,23 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
seq_zero = ipv6->seq_zero;
}
- VCAP_KEY_BIT_SET(IP4,
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_IP4,
ipv4 ? OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
- VCAP_KEY_BIT_SET(L3_FRAGMENT, fragment);
- VCAP_KEY_ANY_SET(L3_FRAG_OFS_GT0);
- VCAP_KEY_BIT_SET(L3_OPTIONS, options);
- VCAP_KEY_BIT_SET(L3_TTL_GT0, ttl);
- VCAP_KEY_BYTES_SET(L3_TOS, ds.value, ds.mask);
- vcap_key_bytes_set(&data, IS2_HKO_L3_IP4_DIP, dip.value.addr,
- dip.mask.addr, 4);
- vcap_key_bytes_set(&data, IS2_HKO_L3_IP4_SIP, sip.value.addr,
- sip.mask.addr, 4);
- VCAP_KEY_BIT_SET(DIP_EQ_SIP, sip_eq_dip);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L3_FRAGMENT,
+ fragment);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_L3_FRAG_OFS_GT0, 0, 0);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L3_OPTIONS,
+ options);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_IP4_L3_TTL_GT0,
+ ttl);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L3_TOS,
+ ds.value, ds.mask);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L3_IP4_DIP,
+ dip.value.addr, dip.mask.addr);
+ vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L3_IP4_SIP,
+ sip.value.addr, sip.mask.addr);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_DIP_EQ_SIP,
+ sip_eq_dip);
val = proto.value[0];
msk = proto.mask[0];
type = IS2_TYPE_IP_UDP_TCP;
@@ -512,25 +556,34 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
/* UDP/TCP protocol match */
tcp = (val == 6 ?
OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_TCP, tcp);
- vcap_key_l4_port_set(&data,
- IS2_HKO_IP4_TCP_UDP_L4_DPORT,
- dport);
- vcap_key_l4_port_set(&data,
- IS2_HKO_IP4_TCP_UDP_L4_SPORT,
- sport);
- VCAP_KEY_ANY_SET(IP4_TCP_UDP_L4_RNG);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_SPORT_EQ_DPORT,
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_TCP, tcp);
+ vcap_key_l4_port_set(ocelot, &data,
+ VCAP_IS2_HK_L4_DPORT, dport);
+ vcap_key_l4_port_set(ocelot, &data,
+ VCAP_IS2_HK_L4_SPORT, sport);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_L4_RNG, 0, 0);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_L4_SPORT_EQ_DPORT,
sport_eq_dport);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_SEQUENCE_EQ0, seq_zero);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_FIN, tcp_fin);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_SYN, tcp_syn);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_RST, tcp_rst);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_PSH, tcp_psh);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_ACK, tcp_ack);
- VCAP_KEY_BIT_SET(IP4_TCP_UDP_L4_URG, tcp_urg);
- VCAP_KEY_ANY_SET(IP4_TCP_UDP_L4_1588_DOM);
- VCAP_KEY_ANY_SET(IP4_TCP_UDP_L4_1588_VER);
+ vcap_key_bit_set(ocelot, &data,
+ VCAP_IS2_HK_L4_SEQUENCE_EQ0,
+ seq_zero);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_FIN,
+ tcp_fin);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_SYN,
+ tcp_syn);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_RST,
+ tcp_rst);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_PSH,
+ tcp_psh);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_ACK,
+ tcp_ack);
+ vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_URG,
+ tcp_urg);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_L4_1588_DOM,
+ 0, 0);
+ vcap_key_set(ocelot, &data, VCAP_IS2_HK_L4_1588_VER,
+ 0, 0);
} else {
if (msk == 0) {
/* Any IP protocol match */
@@ -543,10 +596,12 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
payload.mask[i] = ip_data->mask[i];
}
}
- VCAP_KEY_BYTES_SET(IP4_OTHER_L3_PROTO, proto.value,
- proto.mask);
- VCAP_KEY_BYTES_SET(IP4_OTHER_L3_PAYLOAD, payload.value,
- payload.mask);
+ vcap_key_bytes_set(ocelot, &data,
+ VCAP_IS2_HK_IP4_L3_PROTO,
+ proto.value, proto.mask);
+ vcap_key_bytes_set(ocelot, &data,
+ VCAP_IS2_HK_L3_PAYLOAD,
+ payload.value, payload.mask);
}
break;
}
@@ -554,19 +609,21 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
default:
type = 0;
type_mask = 0;
- count = (vcap_is2.entry_width / 2);
- for (i = (IS2_HKO_PCP + IS2_HKL_PCP); i < count;
- i += ENTRY_WIDTH) {
- /* Clear entry data */
- vcap_key_set(&data, i, min(32u, count - i), 0, 0);
+ count = vcap_is2->entry_width / 2;
+ /* Iterate over the non-common part of the key and
+ * clear entry data
+ */
+ for (i = ocelot->vcap_is2_keys[VCAP_IS2_HK_L2_DMAC].offset;
+ i < count; i += ENTRY_WIDTH) {
+ vcap_key_field_set(&data, i, min(32u, count - i), 0, 0);
}
break;
}
- VCAP_KEY_SET(TYPE, type, type_mask);
- is2_action_set(&data, ace->action);
- vcap_data_set(data.counter, data.counter_offset, vcap_is2.counter_width,
- ace->stats.pkts);
+ vcap_key_set(ocelot, &data, VCAP_IS2_TYPE, type, type_mask);
+ is2_action_set(ocelot, &data, ace);
+ vcap_data_set(data.counter, data.counter_offset,
+ vcap_is2->counter_width, ace->stats.pkts);
/* Write row */
vcap_entry2cache(ocelot, &data);
@@ -574,29 +631,37 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
vcap_row_cmd(ocelot, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
}
-static void is2_entry_get(struct ocelot_ace_rule *rule, int ix)
+static void is2_entry_get(struct ocelot *ocelot, struct ocelot_ace_rule *rule,
+ int ix)
{
- struct ocelot *op = rule->port->ocelot;
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
struct vcap_data data;
int row = (ix / 2);
u32 cnt;
- vcap_row_cmd(op, row, VCAP_CMD_READ, VCAP_SEL_COUNTER);
- vcap_cache2action(op, &data);
+ vcap_row_cmd(ocelot, row, VCAP_CMD_READ, VCAP_SEL_COUNTER);
+ vcap_cache2action(ocelot, &data);
data.tg_sw = VCAP_TG_HALF;
- is2_data_get(&data, ix);
+ is2_data_get(ocelot, &data, ix);
cnt = vcap_data_get(data.counter, data.counter_offset,
- vcap_is2.counter_width);
+ vcap_is2->counter_width);
rule->stats.pkts = cnt;
}
-static void ocelot_ace_rule_add(struct ocelot_acl_block *block,
+static void ocelot_ace_rule_add(struct ocelot *ocelot,
+ struct ocelot_acl_block *block,
struct ocelot_ace_rule *rule)
{
struct ocelot_ace_rule *tmp;
struct list_head *pos, *n;
+ if (rule->action == OCELOT_ACL_ACTION_POLICE) {
+ block->pol_lpr--;
+ rule->pol_ix = block->pol_lpr;
+ ocelot_ace_policer_add(ocelot, rule->pol_ix, &rule->pol);
+ }
+
block->count++;
if (list_empty(&block->rules)) {
@@ -641,29 +706,57 @@ ocelot_ace_rule_get_rule_index(struct ocelot_acl_block *block, int index)
return NULL;
}
-int ocelot_ace_rule_offload_add(struct ocelot_ace_rule *rule)
+int ocelot_ace_rule_offload_add(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule)
{
+ struct ocelot_acl_block *block = &ocelot->acl_block;
struct ocelot_ace_rule *ace;
int i, index;
/* Add rule to the linked list */
- ocelot_ace_rule_add(acl_block, rule);
+ ocelot_ace_rule_add(ocelot, block, rule);
/* Get the index of the inserted rule */
- index = ocelot_ace_rule_get_index_id(acl_block, rule);
+ index = ocelot_ace_rule_get_index_id(block, rule);
/* Move down the rules to make place for the new rule */
- for (i = acl_block->count - 1; i > index; i--) {
- ace = ocelot_ace_rule_get_rule_index(acl_block, i);
- is2_entry_set(rule->port->ocelot, i, ace);
+ for (i = block->count - 1; i > index; i--) {
+ ace = ocelot_ace_rule_get_rule_index(block, i);
+ is2_entry_set(ocelot, i, ace);
}
/* Now insert the new rule */
- is2_entry_set(rule->port->ocelot, index, rule);
+ is2_entry_set(ocelot, index, rule);
return 0;
}
-static void ocelot_ace_rule_del(struct ocelot_acl_block *block,
+static void ocelot_ace_police_del(struct ocelot *ocelot,
+ struct ocelot_acl_block *block,
+ u32 ix)
+{
+ struct ocelot_ace_rule *ace;
+ int index = -1;
+
+ if (ix < block->pol_lpr)
+ return;
+
+ list_for_each_entry(ace, &block->rules, list) {
+ index++;
+ if (ace->action == OCELOT_ACL_ACTION_POLICE &&
+ ace->pol_ix < ix) {
+ ace->pol_ix += 1;
+ ocelot_ace_policer_add(ocelot, ace->pol_ix,
+ &ace->pol);
+ is2_entry_set(ocelot, index, ace);
+ }
+ }
+
+ ocelot_ace_policer_del(ocelot, block->pol_lpr);
+ block->pol_lpr++;
+}
+
+static void ocelot_ace_rule_del(struct ocelot *ocelot,
+ struct ocelot_acl_block *block,
struct ocelot_ace_rule *rule)
{
struct ocelot_ace_rule *tmp;
@@ -672,6 +765,10 @@ static void ocelot_ace_rule_del(struct ocelot_acl_block *block,
list_for_each_safe(pos, q, &block->rules) {
tmp = list_entry(pos, struct ocelot_ace_rule, list);
if (tmp->id == rule->id) {
+ if (tmp->action == OCELOT_ACL_ACTION_POLICE)
+ ocelot_ace_police_del(ocelot, block,
+ tmp->pol_ix);
+
list_del(pos);
kfree(tmp);
}
@@ -680,8 +777,10 @@ static void ocelot_ace_rule_del(struct ocelot_acl_block *block,
block->count--;
}
-int ocelot_ace_rule_offload_del(struct ocelot_ace_rule *rule)
+int ocelot_ace_rule_offload_del(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule)
{
+ struct ocelot_acl_block *block = &ocelot->acl_block;
struct ocelot_ace_rule del_ace;
struct ocelot_ace_rule *ace;
int i, index;
@@ -689,70 +788,55 @@ int ocelot_ace_rule_offload_del(struct ocelot_ace_rule *rule)
memset(&del_ace, 0, sizeof(del_ace));
/* Gets index of the rule */
- index = ocelot_ace_rule_get_index_id(acl_block, rule);
+ index = ocelot_ace_rule_get_index_id(block, rule);
/* Delete rule */
- ocelot_ace_rule_del(acl_block, rule);
+ ocelot_ace_rule_del(ocelot, block, rule);
/* Move up all the blocks over the deleted rule */
- for (i = index; i < acl_block->count; i++) {
- ace = ocelot_ace_rule_get_rule_index(acl_block, i);
- is2_entry_set(rule->port->ocelot, i, ace);
+ for (i = index; i < block->count; i++) {
+ ace = ocelot_ace_rule_get_rule_index(block, i);
+ is2_entry_set(ocelot, i, ace);
}
/* Now delete the last rule, because it is duplicated */
- is2_entry_set(rule->port->ocelot, acl_block->count, &del_ace);
+ is2_entry_set(ocelot, block->count, &del_ace);
return 0;
}
-int ocelot_ace_rule_stats_update(struct ocelot_ace_rule *rule)
+int ocelot_ace_rule_stats_update(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule)
{
+ struct ocelot_acl_block *block = &ocelot->acl_block;
struct ocelot_ace_rule *tmp;
int index;
- index = ocelot_ace_rule_get_index_id(acl_block, rule);
- is2_entry_get(rule, index);
+ index = ocelot_ace_rule_get_index_id(block, rule);
+ is2_entry_get(ocelot, rule, index);
/* After we get the result we need to clear the counters */
- tmp = ocelot_ace_rule_get_rule_index(acl_block, index);
+ tmp = ocelot_ace_rule_get_rule_index(block, index);
tmp->stats.pkts = 0;
- is2_entry_set(rule->port->ocelot, index, tmp);
+ is2_entry_set(ocelot, index, tmp);
return 0;
}
-static struct ocelot_acl_block *ocelot_acl_block_create(struct ocelot *ocelot)
-{
- struct ocelot_acl_block *block;
-
- block = kzalloc(sizeof(*block), GFP_KERNEL);
- if (!block)
- return NULL;
-
- INIT_LIST_HEAD(&block->rules);
- block->count = 0;
- block->ocelot = ocelot;
-
- return block;
-}
-
-static void ocelot_acl_block_destroy(struct ocelot_acl_block *block)
-{
- kfree(block);
-}
-
int ocelot_ace_init(struct ocelot *ocelot)
{
+ const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ struct ocelot_acl_block *block = &ocelot->acl_block;
struct vcap_data data;
memset(&data, 0, sizeof(data));
+
vcap_entry2cache(ocelot, &data);
- ocelot_write(ocelot, vcap_is2.entry_count, S2_CORE_MV_CFG);
+ ocelot_write(ocelot, vcap_is2->entry_count, S2_CORE_MV_CFG);
vcap_cmd(ocelot, 0, VCAP_CMD_INITIALIZE, VCAP_SEL_ENTRY);
vcap_action2cache(ocelot, &data);
- ocelot_write(ocelot, vcap_is2.action_count, S2_CORE_MV_CFG);
+ ocelot_write(ocelot, vcap_is2->action_count, S2_CORE_MV_CFG);
vcap_cmd(ocelot, 0, VCAP_CMD_INITIALIZE,
VCAP_SEL_ACTION | VCAP_SEL_COUNTER);
@@ -771,12 +855,9 @@ int ocelot_ace_init(struct ocelot *ocelot)
ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_CIR_STATE,
OCELOT_POLICER_DISCARD);
- acl_block = ocelot_acl_block_create(ocelot);
+ block->pol_lpr = OCELOT_POLICER_DISCARD - 1;
- return 0;
-}
+ INIT_LIST_HEAD(&ocelot->acl_block.rules);
-void ocelot_ace_deinit(void)
-{
- ocelot_acl_block_destroy(acl_block);
+ return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.h b/drivers/net/ethernet/mscc/ocelot_ace.h
index c08e3e8482e7..29d22c566786 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.h
+++ b/drivers/net/ethernet/mscc/ocelot_ace.h
@@ -7,6 +7,7 @@
#define _MSCC_OCELOT_ACE_H_
#include "ocelot.h"
+#include "ocelot_police.h"
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
@@ -176,6 +177,7 @@ struct ocelot_ace_frame_ipv6 {
enum ocelot_ace_action {
OCELOT_ACL_ACTION_DROP,
OCELOT_ACL_ACTION_TRAP,
+ OCELOT_ACL_ACTION_POLICE,
};
struct ocelot_ace_stats {
@@ -186,14 +188,13 @@ struct ocelot_ace_stats {
struct ocelot_ace_rule {
struct list_head list;
- struct ocelot_port *port;
u16 prio;
u32 id;
enum ocelot_ace_action action;
struct ocelot_ace_stats stats;
- int chip_port;
+ u16 ingress_port_mask;
enum ocelot_vcap_bit dmac_mc;
enum ocelot_vcap_bit dmac_bc;
@@ -209,24 +210,21 @@ struct ocelot_ace_rule {
struct ocelot_ace_frame_ipv4 ipv4;
struct ocelot_ace_frame_ipv6 ipv6;
} frame;
+ struct ocelot_policer pol;
+ u32 pol_ix;
};
-struct ocelot_acl_block {
- struct list_head rules;
- struct ocelot *ocelot;
- int count;
-};
-
-int ocelot_ace_rule_offload_add(struct ocelot_ace_rule *rule);
-int ocelot_ace_rule_offload_del(struct ocelot_ace_rule *rule);
-int ocelot_ace_rule_stats_update(struct ocelot_ace_rule *rule);
+int ocelot_ace_rule_offload_add(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule);
+int ocelot_ace_rule_offload_del(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule);
+int ocelot_ace_rule_stats_update(struct ocelot *ocelot,
+ struct ocelot_ace_rule *rule);
int ocelot_ace_init(struct ocelot *ocelot);
-void ocelot_ace_deinit(void);
-int ocelot_setup_tc_block_flower_bind(struct ocelot_port_private *priv,
- struct flow_block_offload *f);
-void ocelot_setup_tc_block_flower_unbind(struct ocelot_port_private *priv,
- struct flow_block_offload *f);
+int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
+ struct flow_cls_offload *f,
+ bool ingress);
#endif /* _MSCC_OCELOT_ACE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 1135a18019c7..0ac9fbf77a01 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -14,9 +14,14 @@
#include <linux/skbuff.h>
#include <net/switchdev.h>
+#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
#define IFH_EXTRACT_BITFIELD64(x, o, w) (((x) >> (o)) & GENMASK_ULL((w) - 1, 0))
+#define VSC7514_VCAP_IS2_CNT 64
+#define VSC7514_VCAP_IS2_ENTRY_WIDTH 376
+#define VSC7514_VCAP_IS2_ACTION_WIDTH 99
+#define VSC7514_VCAP_PORT_CNT 11
static int ocelot_parse_ifh(u32 *_ifh, struct frame_info *info)
{
@@ -211,29 +216,6 @@ static const struct of_device_id mscc_ocelot_match[] = {
};
MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
-static void ocelot_port_pcs_init(struct ocelot *ocelot, int port)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
-
- /* Disable HDX fast control */
- ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS,
- DEV_PORT_MISC);
-
- /* SGMII only for now */
- ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA,
- PCS1G_MODE_CFG);
- ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
-
- /* Enable PCS */
- ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
-
- /* No aneg on SGMII */
- ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG);
-
- /* No loopback */
- ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
-}
-
static int ocelot_reset(struct ocelot *ocelot)
{
int retries = 100;
@@ -258,10 +240,132 @@ static int ocelot_reset(struct ocelot *ocelot)
}
static const struct ocelot_ops ocelot_ops = {
- .pcs_init = ocelot_port_pcs_init,
.reset = ocelot_reset,
};
+static const struct vcap_field vsc7514_vcap_is2_keys[] = {
+ /* Common: 46 bits */
+ [VCAP_IS2_TYPE] = { 0, 4},
+ [VCAP_IS2_HK_FIRST] = { 4, 1},
+ [VCAP_IS2_HK_PAG] = { 5, 8},
+ [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 12},
+ [VCAP_IS2_HK_RSV2] = { 25, 1},
+ [VCAP_IS2_HK_HOST_MATCH] = { 26, 1},
+ [VCAP_IS2_HK_L2_MC] = { 27, 1},
+ [VCAP_IS2_HK_L2_BC] = { 28, 1},
+ [VCAP_IS2_HK_VLAN_TAGGED] = { 29, 1},
+ [VCAP_IS2_HK_VID] = { 30, 12},
+ [VCAP_IS2_HK_DEI] = { 42, 1},
+ [VCAP_IS2_HK_PCP] = { 43, 3},
+ /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+ [VCAP_IS2_HK_L2_DMAC] = { 46, 48},
+ [VCAP_IS2_HK_L2_SMAC] = { 94, 48},
+ /* MAC_ETYPE (TYPE=000) */
+ [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {142, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {158, 16},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {174, 8},
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {182, 3},
+ /* MAC_LLC (TYPE=001) */
+ [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {142, 40},
+ /* MAC_SNAP (TYPE=010) */
+ [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {142, 40},
+ /* MAC_ARP (TYPE=011) */
+ [VCAP_IS2_HK_MAC_ARP_SMAC] = { 46, 48},
+ [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 94, 1},
+ [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 95, 1},
+ [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 96, 1},
+ [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 97, 1},
+ [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 98, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 99, 1},
+ [VCAP_IS2_HK_MAC_ARP_OPCODE] = {100, 2},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = {102, 32},
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {134, 32},
+ [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {166, 1},
+ /* IP4_TCP_UDP / IP4_OTHER common */
+ [VCAP_IS2_HK_IP4] = { 46, 1},
+ [VCAP_IS2_HK_L3_FRAGMENT] = { 47, 1},
+ [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 48, 1},
+ [VCAP_IS2_HK_L3_OPTIONS] = { 49, 1},
+ [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 50, 1},
+ [VCAP_IS2_HK_L3_TOS] = { 51, 8},
+ [VCAP_IS2_HK_L3_IP4_DIP] = { 59, 32},
+ [VCAP_IS2_HK_L3_IP4_SIP] = { 91, 32},
+ [VCAP_IS2_HK_DIP_EQ_SIP] = {123, 1},
+ /* IP4_TCP_UDP (TYPE=100) */
+ [VCAP_IS2_HK_TCP] = {124, 1},
+ [VCAP_IS2_HK_L4_SPORT] = {125, 16},
+ [VCAP_IS2_HK_L4_DPORT] = {141, 16},
+ [VCAP_IS2_HK_L4_RNG] = {157, 8},
+ [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {165, 1},
+ [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {166, 1},
+ [VCAP_IS2_HK_L4_URG] = {167, 1},
+ [VCAP_IS2_HK_L4_ACK] = {168, 1},
+ [VCAP_IS2_HK_L4_PSH] = {169, 1},
+ [VCAP_IS2_HK_L4_RST] = {170, 1},
+ [VCAP_IS2_HK_L4_SYN] = {171, 1},
+ [VCAP_IS2_HK_L4_FIN] = {172, 1},
+ [VCAP_IS2_HK_L4_1588_DOM] = {173, 8},
+ [VCAP_IS2_HK_L4_1588_VER] = {181, 4},
+ /* IP4_OTHER (TYPE=101) */
+ [VCAP_IS2_HK_IP4_L3_PROTO] = {124, 8},
+ [VCAP_IS2_HK_L3_PAYLOAD] = {132, 56},
+ /* IP6_STD (TYPE=110) */
+ [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 46, 1},
+ [VCAP_IS2_HK_L3_IP6_SIP] = { 47, 128},
+ [VCAP_IS2_HK_IP6_L3_PROTO] = {175, 8},
+ /* OAM (TYPE=111) */
+ [VCAP_IS2_HK_OAM_MEL_FLAGS] = {142, 7},
+ [VCAP_IS2_HK_OAM_VER] = {149, 5},
+ [VCAP_IS2_HK_OAM_OPCODE] = {154, 8},
+ [VCAP_IS2_HK_OAM_FLAGS] = {162, 8},
+ [VCAP_IS2_HK_OAM_MEPID] = {170, 16},
+ [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = {186, 1},
+ [VCAP_IS2_HK_OAM_IS_Y1731] = {187, 1},
+};
+
+static const struct vcap_field vsc7514_vcap_is2_actions[] = {
+ [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1},
+ [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1},
+ [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3},
+ [VCAP_IS2_ACT_MASK_MODE] = { 5, 2},
+ [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1},
+ [VCAP_IS2_ACT_LRN_DIS] = { 8, 1},
+ [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1},
+ [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9},
+ [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1},
+ [VCAP_IS2_ACT_PORT_MASK] = { 20, 11},
+ [VCAP_IS2_ACT_REW_OP] = { 31, 9},
+ [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1},
+ [VCAP_IS2_ACT_RSV] = { 41, 2},
+ [VCAP_IS2_ACT_ACL_ID] = { 43, 6},
+ [VCAP_IS2_ACT_HIT_CNT] = { 49, 32},
+};
+
+static const struct vcap_props vsc7514_vcap_props[] = {
+ [VCAP_IS2] = {
+ .tg_width = 2,
+ .sw_count = 4,
+ .entry_count = VSC7514_VCAP_IS2_CNT,
+ .entry_width = VSC7514_VCAP_IS2_ENTRY_WIDTH,
+ .action_count = VSC7514_VCAP_IS2_CNT +
+ VSC7514_VCAP_PORT_CNT + 2,
+ .action_width = 99,
+ .action_type_width = 1,
+ .action_table = {
+ [IS2_ACTION_TYPE_NORMAL] = {
+ .width = 49,
+ .count = 2
+ },
+ [IS2_ACTION_TYPE_SMAC_SIP] = {
+ .width = 6,
+ .count = 4
+ },
+ },
+ .counter_words = 4,
+ .counter_width = 32,
+ },
+};
+
static int mscc_ocelot_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -349,8 +453,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->ptp = 1;
}
- ocelot->num_cpu_ports = 1; /* 1 port on the switch, two groups */
-
ports = of_get_child_by_name(np, "ethernet-ports");
if (!ports) {
dev_err(&pdev->dev, "no ethernet-ports child node found\n");
@@ -362,9 +464,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->ports = devm_kcalloc(&pdev->dev, ocelot->num_phys_ports,
sizeof(struct ocelot_port *), GFP_KERNEL);
+ ocelot->vcap_is2_keys = vsc7514_vcap_is2_keys;
+ ocelot->vcap_is2_actions = vsc7514_vcap_is2_actions;
+ ocelot->vcap = vsc7514_vcap_props;
+
ocelot_init(ocelot);
- ocelot_set_cpu_port(ocelot, ocelot->num_phys_ports,
- OCELOT_TAG_PREFIX_NONE, OCELOT_TAG_PREFIX_NONE);
+ /* No NPI port */
+ ocelot_configure_cpu(ocelot, -1, OCELOT_TAG_PREFIX_NONE,
+ OCELOT_TAG_PREFIX_NONE);
for_each_available_child_of_node(ports, portnp) {
struct ocelot_port_private *priv;
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 3d65b99b9734..341923311fec 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -8,27 +8,35 @@
#include "ocelot_ace.h"
-struct ocelot_port_block {
- struct ocelot_acl_block *block;
- struct ocelot_port_private *priv;
-};
-
static int ocelot_flower_parse_action(struct flow_cls_offload *f,
- struct ocelot_ace_rule *rule)
+ struct ocelot_ace_rule *ace)
{
const struct flow_action_entry *a;
+ s64 burst;
+ u64 rate;
int i;
- if (f->rule->action.num_entries != 1)
+ if (!flow_offload_has_one_action(&f->rule->action))
+ return -EOPNOTSUPP;
+
+ if (!flow_action_basic_hw_stats_check(&f->rule->action,
+ f->common.extack))
return -EOPNOTSUPP;
flow_action_for_each(i, a, &f->rule->action) {
switch (a->id) {
case FLOW_ACTION_DROP:
- rule->action = OCELOT_ACL_ACTION_DROP;
+ ace->action = OCELOT_ACL_ACTION_DROP;
break;
case FLOW_ACTION_TRAP:
- rule->action = OCELOT_ACL_ACTION_TRAP;
+ ace->action = OCELOT_ACL_ACTION_TRAP;
+ break;
+ case FLOW_ACTION_POLICE:
+ ace->action = OCELOT_ACL_ACTION_POLICE;
+ rate = a->police.rate_bytes_ps;
+ ace->pol.rate = div_u64(rate, 1000) * 8;
+ burst = rate * PSCHED_NS2TICKS(a->police.burst);
+ ace->pol.burst = div_u64(burst, PSCHED_TICKS_PER_SEC);
break;
default:
return -EOPNOTSUPP;
@@ -39,7 +47,7 @@ static int ocelot_flower_parse_action(struct flow_cls_offload *f,
}
static int ocelot_flower_parse(struct flow_cls_offload *f,
- struct ocelot_ace_rule *ocelot_rule)
+ struct ocelot_ace_rule *ace)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
@@ -84,14 +92,14 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
return -EOPNOTSUPP;
flow_rule_match_eth_addrs(rule, &match);
- ocelot_rule->type = OCELOT_ACE_TYPE_ETYPE;
- ether_addr_copy(ocelot_rule->frame.etype.dmac.value,
+ ace->type = OCELOT_ACE_TYPE_ETYPE;
+ ether_addr_copy(ace->frame.etype.dmac.value,
match.key->dst);
- ether_addr_copy(ocelot_rule->frame.etype.smac.value,
+ ether_addr_copy(ace->frame.etype.smac.value,
match.key->src);
- ether_addr_copy(ocelot_rule->frame.etype.dmac.mask,
+ ether_addr_copy(ace->frame.etype.dmac.mask,
match.mask->dst);
- ether_addr_copy(ocelot_rule->frame.etype.smac.mask,
+ ether_addr_copy(ace->frame.etype.smac.mask,
match.mask->src);
goto finished_key_parsing;
}
@@ -101,17 +109,17 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
flow_rule_match_basic(rule, &match);
if (ntohs(match.key->n_proto) == ETH_P_IP) {
- ocelot_rule->type = OCELOT_ACE_TYPE_IPV4;
- ocelot_rule->frame.ipv4.proto.value[0] =
+ ace->type = OCELOT_ACE_TYPE_IPV4;
+ ace->frame.ipv4.proto.value[0] =
match.key->ip_proto;
- ocelot_rule->frame.ipv4.proto.mask[0] =
+ ace->frame.ipv4.proto.mask[0] =
match.mask->ip_proto;
}
if (ntohs(match.key->n_proto) == ETH_P_IPV6) {
- ocelot_rule->type = OCELOT_ACE_TYPE_IPV6;
- ocelot_rule->frame.ipv6.proto.value[0] =
+ ace->type = OCELOT_ACE_TYPE_IPV6;
+ ace->frame.ipv6.proto.value[0] =
match.key->ip_proto;
- ocelot_rule->frame.ipv6.proto.mask[0] =
+ ace->frame.ipv6.proto.mask[0] =
match.mask->ip_proto;
}
}
@@ -122,16 +130,16 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
u8 *tmp;
flow_rule_match_ipv4_addrs(rule, &match);
- tmp = &ocelot_rule->frame.ipv4.sip.value.addr[0];
+ tmp = &ace->frame.ipv4.sip.value.addr[0];
memcpy(tmp, &match.key->src, 4);
- tmp = &ocelot_rule->frame.ipv4.sip.mask.addr[0];
+ tmp = &ace->frame.ipv4.sip.mask.addr[0];
memcpy(tmp, &match.mask->src, 4);
- tmp = &ocelot_rule->frame.ipv4.dip.value.addr[0];
+ tmp = &ace->frame.ipv4.dip.value.addr[0];
memcpy(tmp, &match.key->dst, 4);
- tmp = &ocelot_rule->frame.ipv4.dip.mask.addr[0];
+ tmp = &ace->frame.ipv4.dip.mask.addr[0];
memcpy(tmp, &match.mask->dst, 4);
}
@@ -144,213 +152,111 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
struct flow_match_ports match;
flow_rule_match_ports(rule, &match);
- ocelot_rule->frame.ipv4.sport.value = ntohs(match.key->src);
- ocelot_rule->frame.ipv4.sport.mask = ntohs(match.mask->src);
- ocelot_rule->frame.ipv4.dport.value = ntohs(match.key->dst);
- ocelot_rule->frame.ipv4.dport.mask = ntohs(match.mask->dst);
+ ace->frame.ipv4.sport.value = ntohs(match.key->src);
+ ace->frame.ipv4.sport.mask = ntohs(match.mask->src);
+ ace->frame.ipv4.dport.value = ntohs(match.key->dst);
+ ace->frame.ipv4.dport.mask = ntohs(match.mask->dst);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
- ocelot_rule->type = OCELOT_ACE_TYPE_ANY;
- ocelot_rule->vlan.vid.value = match.key->vlan_id;
- ocelot_rule->vlan.vid.mask = match.mask->vlan_id;
- ocelot_rule->vlan.pcp.value[0] = match.key->vlan_priority;
- ocelot_rule->vlan.pcp.mask[0] = match.mask->vlan_priority;
+ ace->type = OCELOT_ACE_TYPE_ANY;
+ ace->vlan.vid.value = match.key->vlan_id;
+ ace->vlan.vid.mask = match.mask->vlan_id;
+ ace->vlan.pcp.value[0] = match.key->vlan_priority;
+ ace->vlan.pcp.mask[0] = match.mask->vlan_priority;
}
finished_key_parsing:
- ocelot_rule->prio = f->common.prio;
- ocelot_rule->id = f->cookie;
- return ocelot_flower_parse_action(f, ocelot_rule);
+ ace->prio = f->common.prio;
+ ace->id = f->cookie;
+ return ocelot_flower_parse_action(f, ace);
}
static
-struct ocelot_ace_rule *ocelot_ace_rule_create(struct flow_cls_offload *f,
- struct ocelot_port_block *block)
+struct ocelot_ace_rule *ocelot_ace_rule_create(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f)
{
- struct ocelot_ace_rule *rule;
+ struct ocelot_ace_rule *ace;
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
- if (!rule)
+ ace = kzalloc(sizeof(*ace), GFP_KERNEL);
+ if (!ace)
return NULL;
- rule->port = &block->priv->port;
- rule->chip_port = block->priv->chip_port;
- return rule;
+ ace->ingress_port_mask = BIT(port);
+ return ace;
}
-static int ocelot_flower_replace(struct flow_cls_offload *f,
- struct ocelot_port_block *port_block)
+int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress)
{
- struct ocelot_ace_rule *rule;
+ struct ocelot_ace_rule *ace;
int ret;
- rule = ocelot_ace_rule_create(f, port_block);
- if (!rule)
+ ace = ocelot_ace_rule_create(ocelot, port, f);
+ if (!ace)
return -ENOMEM;
- ret = ocelot_flower_parse(f, rule);
+ ret = ocelot_flower_parse(f, ace);
if (ret) {
- kfree(rule);
+ kfree(ace);
return ret;
}
- ret = ocelot_ace_rule_offload_add(rule);
- if (ret)
- return ret;
-
- port_block->priv->tc.offload_cnt++;
- return 0;
+ return ocelot_ace_rule_offload_add(ocelot, ace);
}
+EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace);
-static int ocelot_flower_destroy(struct flow_cls_offload *f,
- struct ocelot_port_block *port_block)
+int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress)
{
- struct ocelot_ace_rule rule;
- int ret;
-
- rule.prio = f->common.prio;
- rule.port = &port_block->priv->port;
- rule.id = f->cookie;
+ struct ocelot_ace_rule ace;
- ret = ocelot_ace_rule_offload_del(&rule);
- if (ret)
- return ret;
+ ace.prio = f->common.prio;
+ ace.id = f->cookie;
- port_block->priv->tc.offload_cnt--;
- return 0;
+ return ocelot_ace_rule_offload_del(ocelot, &ace);
}
+EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy);
-static int ocelot_flower_stats_update(struct flow_cls_offload *f,
- struct ocelot_port_block *port_block)
+int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress)
{
- struct ocelot_ace_rule rule;
+ struct ocelot_ace_rule ace;
int ret;
- rule.prio = f->common.prio;
- rule.port = &port_block->priv->port;
- rule.id = f->cookie;
- ret = ocelot_ace_rule_stats_update(&rule);
+ ace.prio = f->common.prio;
+ ace.id = f->cookie;
+ ret = ocelot_ace_rule_stats_update(ocelot, &ace);
if (ret)
return ret;
- flow_stats_update(&f->stats, 0x0, rule.stats.pkts, 0x0);
+ flow_stats_update(&f->stats, 0x0, ace.stats.pkts, 0x0,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
return 0;
}
+EXPORT_SYMBOL_GPL(ocelot_cls_flower_stats);
-static int ocelot_setup_tc_cls_flower(struct flow_cls_offload *f,
- struct ocelot_port_block *port_block)
+int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
+ struct flow_cls_offload *f,
+ bool ingress)
{
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ if (!ingress)
+ return -EOPNOTSUPP;
+
switch (f->command) {
case FLOW_CLS_REPLACE:
- return ocelot_flower_replace(f, port_block);
+ return ocelot_cls_flower_replace(ocelot, port, f, ingress);
case FLOW_CLS_DESTROY:
- return ocelot_flower_destroy(f, port_block);
+ return ocelot_cls_flower_destroy(ocelot, port, f, ingress);
case FLOW_CLS_STATS:
- return ocelot_flower_stats_update(f, port_block);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int ocelot_setup_tc_block_cb_flower(enum tc_setup_type type,
- void *type_data, void *cb_priv)
-{
- struct ocelot_port_block *port_block = cb_priv;
-
- if (!tc_cls_can_offload_and_chain0(port_block->priv->dev, type_data))
- return -EOPNOTSUPP;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- return ocelot_setup_tc_cls_flower(type_data, cb_priv);
- case TC_SETUP_CLSMATCHALL:
- return 0;
+ return ocelot_cls_flower_stats(ocelot, port, f, ingress);
default:
return -EOPNOTSUPP;
}
}
-
-static struct ocelot_port_block*
-ocelot_port_block_create(struct ocelot_port_private *priv)
-{
- struct ocelot_port_block *port_block;
-
- port_block = kzalloc(sizeof(*port_block), GFP_KERNEL);
- if (!port_block)
- return NULL;
-
- port_block->priv = priv;
-
- return port_block;
-}
-
-static void ocelot_port_block_destroy(struct ocelot_port_block *block)
-{
- kfree(block);
-}
-
-static void ocelot_tc_block_unbind(void *cb_priv)
-{
- struct ocelot_port_block *port_block = cb_priv;
-
- ocelot_port_block_destroy(port_block);
-}
-
-int ocelot_setup_tc_block_flower_bind(struct ocelot_port_private *priv,
- struct flow_block_offload *f)
-{
- struct ocelot_port_block *port_block;
- struct flow_block_cb *block_cb;
- int ret;
-
- if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
- return -EOPNOTSUPP;
-
- block_cb = flow_block_cb_lookup(f->block,
- ocelot_setup_tc_block_cb_flower, priv);
- if (!block_cb) {
- port_block = ocelot_port_block_create(priv);
- if (!port_block)
- return -ENOMEM;
-
- block_cb = flow_block_cb_alloc(ocelot_setup_tc_block_cb_flower,
- priv, port_block,
- ocelot_tc_block_unbind);
- if (IS_ERR(block_cb)) {
- ret = PTR_ERR(block_cb);
- goto err_cb_register;
- }
- flow_block_cb_add(block_cb, f);
- list_add_tail(&block_cb->driver_list, f->driver_block_list);
- } else {
- port_block = flow_block_cb_priv(block_cb);
- }
-
- flow_block_cb_incref(block_cb);
- return 0;
-
-err_cb_register:
- ocelot_port_block_destroy(port_block);
-
- return ret;
-}
-
-void ocelot_setup_tc_block_flower_unbind(struct ocelot_port_private *priv,
- struct flow_block_offload *f)
-{
- struct flow_block_cb *block_cb;
-
- block_cb = flow_block_cb_lookup(f->block,
- ocelot_setup_tc_block_cb_flower, priv);
- if (!block_cb)
- return;
-
- if (!flow_block_cb_decref(block_cb)) {
- flow_block_cb_remove(block_cb, f);
- list_del(&block_cb->driver_list);
- }
-}
diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c
index faddce43f2e3..2e1d8e187332 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.c
+++ b/drivers/net/ethernet/mscc/ocelot_police.c
@@ -4,6 +4,7 @@
* Copyright (c) 2019 Microsemi Corporation
*/
+#include <soc/mscc/ocelot.h>
#include "ocelot_police.h"
enum mscc_qos_rate_mode {
@@ -203,6 +204,7 @@ int ocelot_port_policer_add(struct ocelot *ocelot, int port,
return 0;
}
+EXPORT_SYMBOL(ocelot_port_policer_add);
int ocelot_port_policer_del(struct ocelot *ocelot, int port)
{
@@ -225,3 +227,28 @@ int ocelot_port_policer_del(struct ocelot *ocelot, int port)
return 0;
}
+EXPORT_SYMBOL(ocelot_port_policer_del);
+
+int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix,
+ struct ocelot_policer *pol)
+{
+ struct qos_policer_conf pp = { 0 };
+
+ if (!pol)
+ return -EINVAL;
+
+ pp.mode = MSCC_QOS_RATE_MODE_DATA;
+ pp.pir = pol->rate;
+ pp.pbs = pol->burst;
+
+ return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+}
+
+int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix)
+{
+ struct qos_policer_conf pp = { 0 };
+
+ pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
+
+ return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h
index ae9509229463..792abd28010a 100644
--- a/drivers/net/ethernet/mscc/ocelot_police.h
+++ b/drivers/net/ethernet/mscc/ocelot_police.h
@@ -9,14 +9,9 @@
#include "ocelot.h"
-struct ocelot_policer {
- u32 rate; /* kilobit per second */
- u32 burst; /* bytes */
-};
+int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix,
+ struct ocelot_policer *pol);
-int ocelot_port_policer_add(struct ocelot *ocelot, int port,
- struct ocelot_policer *pol);
-
-int ocelot_port_policer_del(struct ocelot *ocelot, int port);
+int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix);
#endif /* _MSCC_OCELOT_POLICE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c
index a4f7fbd76507..d326e231f0ad 100644
--- a/drivers/net/ethernet/mscc/ocelot_tc.c
+++ b/drivers/net/ethernet/mscc/ocelot_tc.c
@@ -4,8 +4,8 @@
* Copyright (c) 2019 Microsemi Corporation
*/
+#include <soc/mscc/ocelot.h>
#include "ocelot_tc.h"
-#include "ocelot_police.h"
#include "ocelot_ace.h"
#include <net/pkt_cls.h>
@@ -20,9 +20,6 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
int port = priv->chip_port;
int err;
- netdev_dbg(priv->dev, "%s: port %u command %d cookie %lu\n",
- __func__, port, f->command, f->cookie);
-
if (!ingress) {
NL_SET_ERR_MSG_MOD(extack, "Only ingress is supported");
return -EOPNOTSUPP;
@@ -99,17 +96,10 @@ static int ocelot_setup_tc_block_cb(enum tc_setup_type type,
switch (type) {
case TC_SETUP_CLSMATCHALL:
- netdev_dbg(priv->dev, "tc_block_cb: TC_SETUP_CLSMATCHALL %s\n",
- ingress ? "ingress" : "egress");
-
return ocelot_setup_tc_cls_matchall(priv, type_data, ingress);
case TC_SETUP_CLSFLOWER:
- return 0;
+ return ocelot_setup_tc_cls_flower(priv, type_data, ingress);
default:
- netdev_dbg(priv->dev, "tc_block_cb: type %d %s\n",
- type,
- ingress ? "ingress" : "egress");
-
return -EOPNOTSUPP;
}
}
@@ -137,10 +127,6 @@ static int ocelot_setup_tc_block(struct ocelot_port_private *priv,
{
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
- int err;
-
- netdev_dbg(priv->dev, "tc_block command %d, binder_type %d\n",
- f->command, f->binder_type);
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
cb = ocelot_setup_tc_block_cb_ig;
@@ -162,11 +148,6 @@ static int ocelot_setup_tc_block(struct ocelot_port_private *priv,
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
- err = ocelot_setup_tc_block_flower_bind(priv, f);
- if (err < 0) {
- flow_block_cb_free(block_cb);
- return err;
- }
flow_block_cb_add(block_cb, f);
list_add_tail(&block_cb->driver_list, f->driver_block_list);
return 0;
@@ -175,7 +156,6 @@ static int ocelot_setup_tc_block(struct ocelot_port_private *priv,
if (!block_cb)
return -ENOENT;
- ocelot_setup_tc_block_flower_unbind(priv, f);
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
return 0;
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h
deleted file mode 100644
index e22eac1da783..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_vcap.h
+++ /dev/null
@@ -1,403 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
- * Microsemi Ocelot Switch driver
- * Copyright (c) 2019 Microsemi Corporation
- */
-
-#ifndef _OCELOT_VCAP_H_
-#define _OCELOT_VCAP_H_
-
-/* =================================================================
- * VCAP Common
- * =================================================================
- */
-
-/* VCAP Type-Group values */
-#define VCAP_TG_NONE 0 /* Entry is invalid */
-#define VCAP_TG_FULL 1 /* Full entry */
-#define VCAP_TG_HALF 2 /* Half entry */
-#define VCAP_TG_QUARTER 3 /* Quarter entry */
-
-/* =================================================================
- * VCAP IS2
- * =================================================================
- */
-
-#define VCAP_IS2_CNT 64
-#define VCAP_IS2_ENTRY_WIDTH 376
-#define VCAP_IS2_ACTION_WIDTH 99
-#define VCAP_PORT_CNT 11
-
-/* IS2 half key types */
-#define IS2_TYPE_ETYPE 0
-#define IS2_TYPE_LLC 1
-#define IS2_TYPE_SNAP 2
-#define IS2_TYPE_ARP 3
-#define IS2_TYPE_IP_UDP_TCP 4
-#define IS2_TYPE_IP_OTHER 5
-#define IS2_TYPE_IPV6 6
-#define IS2_TYPE_OAM 7
-#define IS2_TYPE_SMAC_SIP6 8
-#define IS2_TYPE_ANY 100 /* Pseudo type */
-
-/* IS2 half key type mask for matching any IP */
-#define IS2_TYPE_MASK_IP_ANY 0xe
-
-/* IS2 action types */
-#define IS2_ACTION_TYPE_NORMAL 0
-#define IS2_ACTION_TYPE_SMAC_SIP 1
-
-/* IS2 MASK_MODE values */
-#define IS2_ACT_MASK_MODE_NONE 0
-#define IS2_ACT_MASK_MODE_FILTER 1
-#define IS2_ACT_MASK_MODE_POLICY 2
-#define IS2_ACT_MASK_MODE_REDIR 3
-
-/* IS2 REW_OP values */
-#define IS2_ACT_REW_OP_NONE 0
-#define IS2_ACT_REW_OP_PTP_ONE 2
-#define IS2_ACT_REW_OP_PTP_TWO 3
-#define IS2_ACT_REW_OP_SPECIAL 8
-#define IS2_ACT_REW_OP_PTP_ORG 9
-#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_1 (IS2_ACT_REW_OP_PTP_ONE | (1 << 3))
-#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_2 (IS2_ACT_REW_OP_PTP_ONE | (2 << 3))
-#define IS2_ACT_REW_OP_PTP_ONE_ADD_DELAY (IS2_ACT_REW_OP_PTP_ONE | (1 << 5))
-#define IS2_ACT_REW_OP_PTP_ONE_ADD_SUB BIT(7)
-
-#define VCAP_PORT_WIDTH 4
-
-/* IS2 quarter key - SMAC_SIP4 */
-#define IS2_QKO_IGR_PORT 0
-#define IS2_QKL_IGR_PORT VCAP_PORT_WIDTH
-#define IS2_QKO_L2_SMAC (IS2_QKO_IGR_PORT + IS2_QKL_IGR_PORT)
-#define IS2_QKL_L2_SMAC 48
-#define IS2_QKO_L3_IP4_SIP (IS2_QKO_L2_SMAC + IS2_QKL_L2_SMAC)
-#define IS2_QKL_L3_IP4_SIP 32
-
-/* IS2 half key - common */
-#define IS2_HKO_TYPE 0
-#define IS2_HKL_TYPE 4
-#define IS2_HKO_FIRST (IS2_HKO_TYPE + IS2_HKL_TYPE)
-#define IS2_HKL_FIRST 1
-#define IS2_HKO_PAG (IS2_HKO_FIRST + IS2_HKL_FIRST)
-#define IS2_HKL_PAG 8
-#define IS2_HKO_IGR_PORT_MASK (IS2_HKO_PAG + IS2_HKL_PAG)
-#define IS2_HKL_IGR_PORT_MASK (VCAP_PORT_CNT + 1)
-#define IS2_HKO_SERVICE_FRM (IS2_HKO_IGR_PORT_MASK + IS2_HKL_IGR_PORT_MASK)
-#define IS2_HKL_SERVICE_FRM 1
-#define IS2_HKO_HOST_MATCH (IS2_HKO_SERVICE_FRM + IS2_HKL_SERVICE_FRM)
-#define IS2_HKL_HOST_MATCH 1
-#define IS2_HKO_L2_MC (IS2_HKO_HOST_MATCH + IS2_HKL_HOST_MATCH)
-#define IS2_HKL_L2_MC 1
-#define IS2_HKO_L2_BC (IS2_HKO_L2_MC + IS2_HKL_L2_MC)
-#define IS2_HKL_L2_BC 1
-#define IS2_HKO_VLAN_TAGGED (IS2_HKO_L2_BC + IS2_HKL_L2_BC)
-#define IS2_HKL_VLAN_TAGGED 1
-#define IS2_HKO_VID (IS2_HKO_VLAN_TAGGED + IS2_HKL_VLAN_TAGGED)
-#define IS2_HKL_VID 12
-#define IS2_HKO_DEI (IS2_HKO_VID + IS2_HKL_VID)
-#define IS2_HKL_DEI 1
-#define IS2_HKO_PCP (IS2_HKO_DEI + IS2_HKL_DEI)
-#define IS2_HKL_PCP 3
-
-/* IS2 half key - MAC_ETYPE/MAC_LLC/MAC_SNAP/OAM common */
-#define IS2_HKO_L2_DMAC (IS2_HKO_PCP + IS2_HKL_PCP)
-#define IS2_HKL_L2_DMAC 48
-#define IS2_HKO_L2_SMAC (IS2_HKO_L2_DMAC + IS2_HKL_L2_DMAC)
-#define IS2_HKL_L2_SMAC 48
-
-/* IS2 half key - MAC_ETYPE */
-#define IS2_HKO_MAC_ETYPE_ETYPE (IS2_HKO_L2_SMAC + IS2_HKL_L2_SMAC)
-#define IS2_HKL_MAC_ETYPE_ETYPE 16
-#define IS2_HKO_MAC_ETYPE_L2_PAYLOAD \
- (IS2_HKO_MAC_ETYPE_ETYPE + IS2_HKL_MAC_ETYPE_ETYPE)
-#define IS2_HKL_MAC_ETYPE_L2_PAYLOAD 27
-
-/* IS2 half key - MAC_LLC */
-#define IS2_HKO_MAC_LLC_L2_LLC IS2_HKO_MAC_ETYPE_ETYPE
-#define IS2_HKL_MAC_LLC_L2_LLC 40
-
-/* IS2 half key - MAC_SNAP */
-#define IS2_HKO_MAC_SNAP_L2_SNAP IS2_HKO_MAC_ETYPE_ETYPE
-#define IS2_HKL_MAC_SNAP_L2_SNAP 40
-
-/* IS2 half key - ARP */
-#define IS2_HKO_MAC_ARP_L2_SMAC IS2_HKO_L2_DMAC
-#define IS2_HKL_MAC_ARP_L2_SMAC 48
-#define IS2_HKO_MAC_ARP_ARP_ADDR_SPACE_OK \
- (IS2_HKO_MAC_ARP_L2_SMAC + IS2_HKL_MAC_ARP_L2_SMAC)
-#define IS2_HKL_MAC_ARP_ARP_ADDR_SPACE_OK 1
-#define IS2_HKO_MAC_ARP_ARP_PROTO_SPACE_OK \
- (IS2_HKO_MAC_ARP_ARP_ADDR_SPACE_OK + IS2_HKL_MAC_ARP_ARP_ADDR_SPACE_OK)
-#define IS2_HKL_MAC_ARP_ARP_PROTO_SPACE_OK 1
-#define IS2_HKO_MAC_ARP_ARP_LEN_OK \
- (IS2_HKO_MAC_ARP_ARP_PROTO_SPACE_OK + \
- IS2_HKL_MAC_ARP_ARP_PROTO_SPACE_OK)
-#define IS2_HKL_MAC_ARP_ARP_LEN_OK 1
-#define IS2_HKO_MAC_ARP_ARP_TGT_MATCH \
- (IS2_HKO_MAC_ARP_ARP_LEN_OK + IS2_HKL_MAC_ARP_ARP_LEN_OK)
-#define IS2_HKL_MAC_ARP_ARP_TGT_MATCH 1
-#define IS2_HKO_MAC_ARP_ARP_SENDER_MATCH \
- (IS2_HKO_MAC_ARP_ARP_TGT_MATCH + IS2_HKL_MAC_ARP_ARP_TGT_MATCH)
-#define IS2_HKL_MAC_ARP_ARP_SENDER_MATCH 1
-#define IS2_HKO_MAC_ARP_ARP_OPCODE_UNKNOWN \
- (IS2_HKO_MAC_ARP_ARP_SENDER_MATCH + IS2_HKL_MAC_ARP_ARP_SENDER_MATCH)
-#define IS2_HKL_MAC_ARP_ARP_OPCODE_UNKNOWN 1
-#define IS2_HKO_MAC_ARP_ARP_OPCODE \
- (IS2_HKO_MAC_ARP_ARP_OPCODE_UNKNOWN + \
- IS2_HKL_MAC_ARP_ARP_OPCODE_UNKNOWN)
-#define IS2_HKL_MAC_ARP_ARP_OPCODE 2
-#define IS2_HKO_MAC_ARP_L3_IP4_DIP \
- (IS2_HKO_MAC_ARP_ARP_OPCODE + IS2_HKL_MAC_ARP_ARP_OPCODE)
-#define IS2_HKL_MAC_ARP_L3_IP4_DIP 32
-#define IS2_HKO_MAC_ARP_L3_IP4_SIP \
- (IS2_HKO_MAC_ARP_L3_IP4_DIP + IS2_HKL_MAC_ARP_L3_IP4_DIP)
-#define IS2_HKL_MAC_ARP_L3_IP4_SIP 32
-#define IS2_HKO_MAC_ARP_DIP_EQ_SIP \
- (IS2_HKO_MAC_ARP_L3_IP4_SIP + IS2_HKL_MAC_ARP_L3_IP4_SIP)
-#define IS2_HKL_MAC_ARP_DIP_EQ_SIP 1
-
-/* IS2 half key - IP4_TCP_UDP/IP4_OTHER common */
-#define IS2_HKO_IP4 IS2_HKO_L2_DMAC
-#define IS2_HKL_IP4 1
-#define IS2_HKO_L3_FRAGMENT (IS2_HKO_IP4 + IS2_HKL_IP4)
-#define IS2_HKL_L3_FRAGMENT 1
-#define IS2_HKO_L3_FRAG_OFS_GT0 (IS2_HKO_L3_FRAGMENT + IS2_HKL_L3_FRAGMENT)
-#define IS2_HKL_L3_FRAG_OFS_GT0 1
-#define IS2_HKO_L3_OPTIONS (IS2_HKO_L3_FRAG_OFS_GT0 + IS2_HKL_L3_FRAG_OFS_GT0)
-#define IS2_HKL_L3_OPTIONS 1
-#define IS2_HKO_L3_TTL_GT0 (IS2_HKO_L3_OPTIONS + IS2_HKL_L3_OPTIONS)
-#define IS2_HKL_L3_TTL_GT0 1
-#define IS2_HKO_L3_TOS (IS2_HKO_L3_TTL_GT0 + IS2_HKL_L3_TTL_GT0)
-#define IS2_HKL_L3_TOS 8
-#define IS2_HKO_L3_IP4_DIP (IS2_HKO_L3_TOS + IS2_HKL_L3_TOS)
-#define IS2_HKL_L3_IP4_DIP 32
-#define IS2_HKO_L3_IP4_SIP (IS2_HKO_L3_IP4_DIP + IS2_HKL_L3_IP4_DIP)
-#define IS2_HKL_L3_IP4_SIP 32
-#define IS2_HKO_DIP_EQ_SIP (IS2_HKO_L3_IP4_SIP + IS2_HKL_L3_IP4_SIP)
-#define IS2_HKL_DIP_EQ_SIP 1
-
-/* IS2 half key - IP4_TCP_UDP */
-#define IS2_HKO_IP4_TCP_UDP_TCP (IS2_HKO_DIP_EQ_SIP + IS2_HKL_DIP_EQ_SIP)
-#define IS2_HKL_IP4_TCP_UDP_TCP 1
-#define IS2_HKO_IP4_TCP_UDP_L4_DPORT \
- (IS2_HKO_IP4_TCP_UDP_TCP + IS2_HKL_IP4_TCP_UDP_TCP)
-#define IS2_HKL_IP4_TCP_UDP_L4_DPORT 16
-#define IS2_HKO_IP4_TCP_UDP_L4_SPORT \
- (IS2_HKO_IP4_TCP_UDP_L4_DPORT + IS2_HKL_IP4_TCP_UDP_L4_DPORT)
-#define IS2_HKL_IP4_TCP_UDP_L4_SPORT 16
-#define IS2_HKO_IP4_TCP_UDP_L4_RNG \
- (IS2_HKO_IP4_TCP_UDP_L4_SPORT + IS2_HKL_IP4_TCP_UDP_L4_SPORT)
-#define IS2_HKL_IP4_TCP_UDP_L4_RNG 8
-#define IS2_HKO_IP4_TCP_UDP_SPORT_EQ_DPORT \
- (IS2_HKO_IP4_TCP_UDP_L4_RNG + IS2_HKL_IP4_TCP_UDP_L4_RNG)
-#define IS2_HKL_IP4_TCP_UDP_SPORT_EQ_DPORT 1
-#define IS2_HKO_IP4_TCP_UDP_SEQUENCE_EQ0 \
- (IS2_HKO_IP4_TCP_UDP_SPORT_EQ_DPORT + \
- IS2_HKL_IP4_TCP_UDP_SPORT_EQ_DPORT)
-#define IS2_HKL_IP4_TCP_UDP_SEQUENCE_EQ0 1
-#define IS2_HKO_IP4_TCP_UDP_L4_FIN \
- (IS2_HKO_IP4_TCP_UDP_SEQUENCE_EQ0 + IS2_HKL_IP4_TCP_UDP_SEQUENCE_EQ0)
-#define IS2_HKL_IP4_TCP_UDP_L4_FIN 1
-#define IS2_HKO_IP4_TCP_UDP_L4_SYN \
- (IS2_HKO_IP4_TCP_UDP_L4_FIN + IS2_HKL_IP4_TCP_UDP_L4_FIN)
-#define IS2_HKL_IP4_TCP_UDP_L4_SYN 1
-#define IS2_HKO_IP4_TCP_UDP_L4_RST \
- (IS2_HKO_IP4_TCP_UDP_L4_SYN + IS2_HKL_IP4_TCP_UDP_L4_SYN)
-#define IS2_HKL_IP4_TCP_UDP_L4_RST 1
-#define IS2_HKO_IP4_TCP_UDP_L4_PSH \
- (IS2_HKO_IP4_TCP_UDP_L4_RST + IS2_HKL_IP4_TCP_UDP_L4_RST)
-#define IS2_HKL_IP4_TCP_UDP_L4_PSH 1
-#define IS2_HKO_IP4_TCP_UDP_L4_ACK \
- (IS2_HKO_IP4_TCP_UDP_L4_PSH + IS2_HKL_IP4_TCP_UDP_L4_PSH)
-#define IS2_HKL_IP4_TCP_UDP_L4_ACK 1
-#define IS2_HKO_IP4_TCP_UDP_L4_URG \
- (IS2_HKO_IP4_TCP_UDP_L4_ACK + IS2_HKL_IP4_TCP_UDP_L4_ACK)
-#define IS2_HKL_IP4_TCP_UDP_L4_URG 1
-#define IS2_HKO_IP4_TCP_UDP_L4_1588_DOM \
- (IS2_HKO_IP4_TCP_UDP_L4_URG + IS2_HKL_IP4_TCP_UDP_L4_URG)
-#define IS2_HKL_IP4_TCP_UDP_L4_1588_DOM 8
-#define IS2_HKO_IP4_TCP_UDP_L4_1588_VER \
- (IS2_HKO_IP4_TCP_UDP_L4_1588_DOM + IS2_HKL_IP4_TCP_UDP_L4_1588_DOM)
-#define IS2_HKL_IP4_TCP_UDP_L4_1588_VER 4
-
-/* IS2 half key - IP4_OTHER */
-#define IS2_HKO_IP4_OTHER_L3_PROTO IS2_HKO_IP4_TCP_UDP_TCP
-#define IS2_HKL_IP4_OTHER_L3_PROTO 8
-#define IS2_HKO_IP4_OTHER_L3_PAYLOAD \
- (IS2_HKO_IP4_OTHER_L3_PROTO + IS2_HKL_IP4_OTHER_L3_PROTO)
-#define IS2_HKL_IP4_OTHER_L3_PAYLOAD 56
-
-/* IS2 half key - IP6_STD */
-#define IS2_HKO_IP6_STD_L3_TTL_GT0 IS2_HKO_L2_DMAC
-#define IS2_HKL_IP6_STD_L3_TTL_GT0 1
-#define IS2_HKO_IP6_STD_L3_IP6_SIP \
- (IS2_HKO_IP6_STD_L3_TTL_GT0 + IS2_HKL_IP6_STD_L3_TTL_GT0)
-#define IS2_HKL_IP6_STD_L3_IP6_SIP 128
-#define IS2_HKO_IP6_STD_L3_PROTO \
- (IS2_HKO_IP6_STD_L3_IP6_SIP + IS2_HKL_IP6_STD_L3_IP6_SIP)
-#define IS2_HKL_IP6_STD_L3_PROTO 8
-
-/* IS2 half key - OAM */
-#define IS2_HKO_OAM_OAM_MEL_FLAGS IS2_HKO_MAC_ETYPE_ETYPE
-#define IS2_HKL_OAM_OAM_MEL_FLAGS 7
-#define IS2_HKO_OAM_OAM_VER \
- (IS2_HKO_OAM_OAM_MEL_FLAGS + IS2_HKL_OAM_OAM_MEL_FLAGS)
-#define IS2_HKL_OAM_OAM_VER 5
-#define IS2_HKO_OAM_OAM_OPCODE (IS2_HKO_OAM_OAM_VER + IS2_HKL_OAM_OAM_VER)
-#define IS2_HKL_OAM_OAM_OPCODE 8
-#define IS2_HKO_OAM_OAM_FLAGS (IS2_HKO_OAM_OAM_OPCODE + IS2_HKL_OAM_OAM_OPCODE)
-#define IS2_HKL_OAM_OAM_FLAGS 8
-#define IS2_HKO_OAM_OAM_MEPID (IS2_HKO_OAM_OAM_FLAGS + IS2_HKL_OAM_OAM_FLAGS)
-#define IS2_HKL_OAM_OAM_MEPID 16
-#define IS2_HKO_OAM_OAM_CCM_CNTS_EQ0 \
- (IS2_HKO_OAM_OAM_MEPID + IS2_HKL_OAM_OAM_MEPID)
-#define IS2_HKL_OAM_OAM_CCM_CNTS_EQ0 1
-
-/* IS2 half key - SMAC_SIP6 */
-#define IS2_HKO_SMAC_SIP6_IGR_PORT IS2_HKL_TYPE
-#define IS2_HKL_SMAC_SIP6_IGR_PORT VCAP_PORT_WIDTH
-#define IS2_HKO_SMAC_SIP6_L2_SMAC \
- (IS2_HKO_SMAC_SIP6_IGR_PORT + IS2_HKL_SMAC_SIP6_IGR_PORT)
-#define IS2_HKL_SMAC_SIP6_L2_SMAC 48
-#define IS2_HKO_SMAC_SIP6_L3_IP6_SIP \
- (IS2_HKO_SMAC_SIP6_L2_SMAC + IS2_HKL_SMAC_SIP6_L2_SMAC)
-#define IS2_HKL_SMAC_SIP6_L3_IP6_SIP 128
-
-/* IS2 full key - common */
-#define IS2_FKO_TYPE 0
-#define IS2_FKL_TYPE 2
-#define IS2_FKO_FIRST (IS2_FKO_TYPE + IS2_FKL_TYPE)
-#define IS2_FKL_FIRST 1
-#define IS2_FKO_PAG (IS2_FKO_FIRST + IS2_FKL_FIRST)
-#define IS2_FKL_PAG 8
-#define IS2_FKO_IGR_PORT_MASK (IS2_FKO_PAG + IS2_FKL_PAG)
-#define IS2_FKL_IGR_PORT_MASK (VCAP_PORT_CNT + 1)
-#define IS2_FKO_SERVICE_FRM (IS2_FKO_IGR_PORT_MASK + IS2_FKL_IGR_PORT_MASK)
-#define IS2_FKL_SERVICE_FRM 1
-#define IS2_FKO_HOST_MATCH (IS2_FKO_SERVICE_FRM + IS2_FKL_SERVICE_FRM)
-#define IS2_FKL_HOST_MATCH 1
-#define IS2_FKO_L2_MC (IS2_FKO_HOST_MATCH + IS2_FKL_HOST_MATCH)
-#define IS2_FKL_L2_MC 1
-#define IS2_FKO_L2_BC (IS2_FKO_L2_MC + IS2_FKL_L2_MC)
-#define IS2_FKL_L2_BC 1
-#define IS2_FKO_VLAN_TAGGED (IS2_FKO_L2_BC + IS2_FKL_L2_BC)
-#define IS2_FKL_VLAN_TAGGED 1
-#define IS2_FKO_VID (IS2_FKO_VLAN_TAGGED + IS2_FKL_VLAN_TAGGED)
-#define IS2_FKL_VID 12
-#define IS2_FKO_DEI (IS2_FKO_VID + IS2_FKL_VID)
-#define IS2_FKL_DEI 1
-#define IS2_FKO_PCP (IS2_FKO_DEI + IS2_FKL_DEI)
-#define IS2_FKL_PCP 3
-
-/* IS2 full key - IP6_TCP_UDP/IP6_OTHER common */
-#define IS2_FKO_L3_TTL_GT0 (IS2_FKO_PCP + IS2_FKL_PCP)
-#define IS2_FKL_L3_TTL_GT0 1
-#define IS2_FKO_L3_TOS (IS2_FKO_L3_TTL_GT0 + IS2_FKL_L3_TTL_GT0)
-#define IS2_FKL_L3_TOS 8
-#define IS2_FKO_L3_IP6_DIP (IS2_FKO_L3_TOS + IS2_FKL_L3_TOS)
-#define IS2_FKL_L3_IP6_DIP 128
-#define IS2_FKO_L3_IP6_SIP (IS2_FKO_L3_IP6_DIP + IS2_FKL_L3_IP6_DIP)
-#define IS2_FKL_L3_IP6_SIP 128
-#define IS2_FKO_DIP_EQ_SIP (IS2_FKO_L3_IP6_SIP + IS2_FKL_L3_IP6_SIP)
-#define IS2_FKL_DIP_EQ_SIP 1
-
-/* IS2 full key - IP6_TCP_UDP */
-#define IS2_FKO_IP6_TCP_UDP_TCP (IS2_FKO_DIP_EQ_SIP + IS2_FKL_DIP_EQ_SIP)
-#define IS2_FKL_IP6_TCP_UDP_TCP 1
-#define IS2_FKO_IP6_TCP_UDP_L4_DPORT \
- (IS2_FKO_IP6_TCP_UDP_TCP + IS2_FKL_IP6_TCP_UDP_TCP)
-#define IS2_FKL_IP6_TCP_UDP_L4_DPORT 16
-#define IS2_FKO_IP6_TCP_UDP_L4_SPORT \
- (IS2_FKO_IP6_TCP_UDP_L4_DPORT + IS2_FKL_IP6_TCP_UDP_L4_DPORT)
-#define IS2_FKL_IP6_TCP_UDP_L4_SPORT 16
-#define IS2_FKO_IP6_TCP_UDP_L4_RNG \
- (IS2_FKO_IP6_TCP_UDP_L4_SPORT + IS2_FKL_IP6_TCP_UDP_L4_SPORT)
-#define IS2_FKL_IP6_TCP_UDP_L4_RNG 8
-#define IS2_FKO_IP6_TCP_UDP_SPORT_EQ_DPORT \
- (IS2_FKO_IP6_TCP_UDP_L4_RNG + IS2_FKL_IP6_TCP_UDP_L4_RNG)
-#define IS2_FKL_IP6_TCP_UDP_SPORT_EQ_DPORT 1
-#define IS2_FKO_IP6_TCP_UDP_SEQUENCE_EQ0 \
- (IS2_FKO_IP6_TCP_UDP_SPORT_EQ_DPORT + \
- IS2_FKL_IP6_TCP_UDP_SPORT_EQ_DPORT)
-#define IS2_FKL_IP6_TCP_UDP_SEQUENCE_EQ0 1
-#define IS2_FKO_IP6_TCP_UDP_L4_FIN \
- (IS2_FKO_IP6_TCP_UDP_SEQUENCE_EQ0 + IS2_FKL_IP6_TCP_UDP_SEQUENCE_EQ0)
-#define IS2_FKL_IP6_TCP_UDP_L4_FIN 1
-#define IS2_FKO_IP6_TCP_UDP_L4_SYN \
- (IS2_FKO_IP6_TCP_UDP_L4_FIN + IS2_FKL_IP6_TCP_UDP_L4_FIN)
-#define IS2_FKL_IP6_TCP_UDP_L4_SYN 1
-#define IS2_FKO_IP6_TCP_UDP_L4_RST \
- (IS2_FKO_IP6_TCP_UDP_L4_SYN + IS2_FKL_IP6_TCP_UDP_L4_SYN)
-#define IS2_FKL_IP6_TCP_UDP_L4_RST 1
-#define IS2_FKO_IP6_TCP_UDP_L4_PSH \
- (IS2_FKO_IP6_TCP_UDP_L4_RST + IS2_FKL_IP6_TCP_UDP_L4_RST)
-#define IS2_FKL_IP6_TCP_UDP_L4_PSH 1
-#define IS2_FKO_IP6_TCP_UDP_L4_ACK \
- (IS2_FKO_IP6_TCP_UDP_L4_PSH + IS2_FKL_IP6_TCP_UDP_L4_PSH)
-#define IS2_FKL_IP6_TCP_UDP_L4_ACK 1
-#define IS2_FKO_IP6_TCP_UDP_L4_URG \
- (IS2_FKO_IP6_TCP_UDP_L4_ACK + IS2_FKL_IP6_TCP_UDP_L4_ACK)
-#define IS2_FKL_IP6_TCP_UDP_L4_URG 1
-#define IS2_FKO_IP6_TCP_UDP_L4_1588_DOM \
- (IS2_FKO_IP6_TCP_UDP_L4_URG + IS2_FKL_IP6_TCP_UDP_L4_URG)
-#define IS2_FKL_IP6_TCP_UDP_L4_1588_DOM 8
-#define IS2_FKO_IP6_TCP_UDP_L4_1588_VER \
- (IS2_FKO_IP6_TCP_UDP_L4_1588_DOM + IS2_FKL_IP6_TCP_UDP_L4_1588_DOM)
-#define IS2_FKL_IP6_TCP_UDP_L4_1588_VER 4
-
-/* IS2 full key - IP6_OTHER */
-#define IS2_FKO_IP6_OTHER_L3_PROTO IS2_FKO_IP6_TCP_UDP_TCP
-#define IS2_FKL_IP6_OTHER_L3_PROTO 8
-#define IS2_FKO_IP6_OTHER_L3_PAYLOAD \
- (IS2_FKO_IP6_OTHER_L3_PROTO + IS2_FKL_IP6_OTHER_L3_PROTO)
-#define IS2_FKL_IP6_OTHER_L3_PAYLOAD 56
-
-/* IS2 full key - CUSTOM */
-#define IS2_FKO_CUSTOM_CUSTOM_TYPE IS2_FKO_L3_TTL_GT0
-#define IS2_FKL_CUSTOM_CUSTOM_TYPE 1
-#define IS2_FKO_CUSTOM_CUSTOM \
- (IS2_FKO_CUSTOM_CUSTOM_TYPE + IS2_FKL_CUSTOM_CUSTOM_TYPE)
-#define IS2_FKL_CUSTOM_CUSTOM 320
-
-/* IS2 action - BASE_TYPE */
-#define IS2_AO_HIT_ME_ONCE 0
-#define IS2_AL_HIT_ME_ONCE 1
-#define IS2_AO_CPU_COPY_ENA (IS2_AO_HIT_ME_ONCE + IS2_AL_HIT_ME_ONCE)
-#define IS2_AL_CPU_COPY_ENA 1
-#define IS2_AO_CPU_QU_NUM (IS2_AO_CPU_COPY_ENA + IS2_AL_CPU_COPY_ENA)
-#define IS2_AL_CPU_QU_NUM 3
-#define IS2_AO_MASK_MODE (IS2_AO_CPU_QU_NUM + IS2_AL_CPU_QU_NUM)
-#define IS2_AL_MASK_MODE 2
-#define IS2_AO_MIRROR_ENA (IS2_AO_MASK_MODE + IS2_AL_MASK_MODE)
-#define IS2_AL_MIRROR_ENA 1
-#define IS2_AO_LRN_DIS (IS2_AO_MIRROR_ENA + IS2_AL_MIRROR_ENA)
-#define IS2_AL_LRN_DIS 1
-#define IS2_AO_POLICE_ENA (IS2_AO_LRN_DIS + IS2_AL_LRN_DIS)
-#define IS2_AL_POLICE_ENA 1
-#define IS2_AO_POLICE_IDX (IS2_AO_POLICE_ENA + IS2_AL_POLICE_ENA)
-#define IS2_AL_POLICE_IDX 9
-#define IS2_AO_POLICE_VCAP_ONLY (IS2_AO_POLICE_IDX + IS2_AL_POLICE_IDX)
-#define IS2_AL_POLICE_VCAP_ONLY 1
-#define IS2_AO_PORT_MASK (IS2_AO_POLICE_VCAP_ONLY + IS2_AL_POLICE_VCAP_ONLY)
-#define IS2_AL_PORT_MASK VCAP_PORT_CNT
-#define IS2_AO_REW_OP (IS2_AO_PORT_MASK + IS2_AL_PORT_MASK)
-#define IS2_AL_REW_OP 9
-#define IS2_AO_LM_CNT_DIS (IS2_AO_REW_OP + IS2_AL_REW_OP)
-#define IS2_AL_LM_CNT_DIS 1
-#define IS2_AO_ISDX_ENA \
- (IS2_AO_LM_CNT_DIS + IS2_AL_LM_CNT_DIS + 1) /* Reserved bit */
-#define IS2_AL_ISDX_ENA 1
-#define IS2_AO_ACL_ID (IS2_AO_ISDX_ENA + IS2_AL_ISDX_ENA)
-#define IS2_AL_ACL_ID 6
-
-/* IS2 action - SMAC_SIP */
-#define IS2_AO_SMAC_SIP_CPU_COPY_ENA 0
-#define IS2_AL_SMAC_SIP_CPU_COPY_ENA 1
-#define IS2_AO_SMAC_SIP_CPU_QU_NUM 1
-#define IS2_AL_SMAC_SIP_CPU_QU_NUM 3
-#define IS2_AO_SMAC_SIP_FWD_KILL_ENA 4
-#define IS2_AL_SMAC_SIP_FWD_KILL_ENA 1
-#define IS2_AO_SMAC_SIP_HOST_MATCH 5
-#define IS2_AL_SMAC_SIP_HOST_MATCH 1
-
-#endif /* _OCELOT_VCAP_H_ */
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 2ee0d0be113a..2616fd735aab 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1920,6 +1920,7 @@ myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
}
static const struct ethtool_ops myri10ge_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = myri10ge_get_drvinfo,
.get_coalesce = myri10ge_get_coalesce,
.set_coalesce = myri10ge_set_coalesce,
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index 51fa82b429a3..bfa0c0d39600 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -147,39 +147,12 @@ static int sonic_probe1(struct net_device *dev)
dev->dev_addr[i*2+1] = val >> 8;
}
- err = -ENOMEM;
-
- /* Initialize the device structure. */
-
lp->dma_bitmode = SONIC_BITMODE32;
- /* Allocate the entire chunk of memory for the descriptors.
- Note that this cannot cross a 64K boundary. */
- lp->descriptors = dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC *
- SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr,
- GFP_KERNEL);
- if (lp->descriptors == NULL)
+ err = sonic_alloc_descriptors(dev);
+ if (err)
goto out;
- /* Now set up the pointers to point to the appropriate places */
- lp->cda = lp->descriptors;
- lp->tda = lp->cda + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
-
- lp->cda_laddr = lp->descriptors_laddr;
- lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
-
dev->netdev_ops = &sonic_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 0937fc2a928e..1b5559aacb38 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -114,17 +114,6 @@ static inline void bit_reverse_addr(unsigned char addr[6])
addr[i] = bitrev8(addr[i]);
}
-static irqreturn_t macsonic_interrupt(int irq, void *dev_id)
-{
- irqreturn_t result;
- unsigned long flags;
-
- local_irq_save(flags);
- result = sonic_interrupt(irq, dev_id);
- local_irq_restore(flags);
- return result;
-}
-
static int macsonic_open(struct net_device* dev)
{
int retval;
@@ -135,12 +124,12 @@ static int macsonic_open(struct net_device* dev)
dev->name, dev->irq);
goto err;
}
- /* Under the A/UX interrupt scheme, the onboard SONIC interrupt comes
- * in at priority level 3. However, we sometimes get the level 2 inter-
- * rupt as well, which must prevent re-entrance of the sonic handler.
+ /* Under the A/UX interrupt scheme, the onboard SONIC interrupt gets
+ * moved from level 2 to level 3. Unfortunately we still get some
+ * level 2 interrupts so register the handler for both.
*/
if (dev->irq == IRQ_AUTO_3) {
- retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt, 0,
+ retval = request_irq(IRQ_NUBUS_9, sonic_interrupt, 0,
"sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
@@ -186,33 +175,10 @@ static const struct net_device_ops macsonic_netdev_ops = {
static int macsonic_init(struct net_device *dev)
{
struct sonic_local* lp = netdev_priv(dev);
+ int err = sonic_alloc_descriptors(dev);
- /* Allocate the entire chunk of memory for the descriptors.
- Note that this cannot cross a 64K boundary. */
- lp->descriptors = dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC *
- SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr,
- GFP_KERNEL);
- if (lp->descriptors == NULL)
- return -ENOMEM;
-
- /* Now set up the pointers to point to the appropriate places */
- lp->cda = lp->descriptors;
- lp->tda = lp->cda + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
-
- lp->cda_laddr = lp->descriptors_laddr;
- lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
+ if (err)
+ return err;
dev->netdev_ops = &macsonic_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 31be3ba66877..dd3605aa5f23 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -50,6 +50,42 @@ static void sonic_msg_init(struct net_device *dev)
netif_dbg(lp, drv, dev, "%s", version);
}
+static int sonic_alloc_descriptors(struct net_device *dev)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+
+ /* Allocate a chunk of memory for the descriptors. Note that this
+ * must not cross a 64K boundary. It is smaller than one page which
+ * means that page alignment is a sufficient condition.
+ */
+ lp->descriptors =
+ dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC *
+ SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr, GFP_KERNEL);
+
+ if (!lp->descriptors)
+ return -ENOMEM;
+
+ lp->cda = lp->descriptors;
+ lp->tda = lp->cda + SIZEOF_SONIC_CDA *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+ lp->rda = lp->tda + SIZEOF_SONIC_TD * SONIC_NUM_TDS *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+ lp->rra = lp->rda + SIZEOF_SONIC_RD * SONIC_NUM_RDS *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+
+ lp->cda_laddr = lp->descriptors_laddr;
+ lp->tda_laddr = lp->cda_laddr + SIZEOF_SONIC_CDA *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+ lp->rda_laddr = lp->tda_laddr + SIZEOF_SONIC_TD * SONIC_NUM_TDS *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+ lp->rra_laddr = lp->rda_laddr + SIZEOF_SONIC_RD * SONIC_NUM_RDS *
+ SONIC_BUS_SCALE(lp->dma_bitmode);
+
+ return 0;
+}
+
/*
* Open/initialize the SONIC controller.
*
@@ -264,7 +300,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
- entry = lp->next_tx;
+ entry = (lp->eol_tx + 1) & SONIC_TDS_MASK;
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
@@ -275,27 +311,26 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
sonic_tda_put(dev, entry, SONIC_TD_LINK,
sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
- wmb();
+ sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK, ~SONIC_EOL &
+ sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK));
+
+ netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
+
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+
lp->tx_len[entry] = length;
lp->tx_laddr[entry] = laddr;
lp->tx_skb[entry] = skb;
- wmb();
- sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
- sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
lp->eol_tx = entry;
- lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
- if (lp->tx_skb[lp->next_tx] != NULL) {
+ entry = (entry + 1) & SONIC_TDS_MASK;
+ if (lp->tx_skb[entry]) {
/* The ring is full, the ISR has yet to process the next TD. */
netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__);
netif_stop_queue(dev);
/* after this packet, wait for ISR to free up some TDAs */
- } else netif_start_queue(dev);
-
- netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
-
- SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+ }
spin_unlock_irqrestore(&lp->lock, flags);
@@ -594,11 +629,6 @@ static void sonic_rx(struct net_device *dev)
if (rbe)
SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
- /*
- * If any worth-while packets have been received, netif_rx()
- * has done a mark_bh(NET_BH) for us and will work on them
- * when we get to the bottom-half routine.
- */
}
@@ -780,7 +810,7 @@ static int sonic_init(struct net_device *dev)
SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
- lp->cur_tx = lp->next_tx = 0;
+ lp->cur_tx = 0;
lp->eol_tx = SONIC_NUM_TDS - 1;
/*
diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
index e0e4cba6f6f6..3cbb62c860c8 100644
--- a/drivers/net/ethernet/natsemi/sonic.h
+++ b/drivers/net/ethernet/natsemi/sonic.h
@@ -321,7 +321,6 @@ struct sonic_local {
unsigned int cur_tx; /* first unacked transmit packet */
unsigned int eol_rx;
unsigned int eol_tx; /* last unacked transmit packet */
- unsigned int next_tx; /* next free TD */
int msg_enable;
struct device *device; /* generic device */
struct net_device_stats stats;
@@ -342,6 +341,7 @@ static void sonic_multicast_list(struct net_device *dev);
static int sonic_init(struct net_device *dev);
static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void sonic_msg_init(struct net_device *dev);
+static int sonic_alloc_descriptors(struct net_device *dev);
/* Internal inlines for reading/writing DMA buffers. Note that bus
size and endianness matter here, whereas they don't for registers,
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index e1b886e87a76..dda9ec7d9cee 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -167,47 +167,11 @@ static int __init sonic_probe1(struct net_device *dev)
dev->dev_addr[i*2+1] = val >> 8;
}
- /* Initialize the device structure. */
-
lp->dma_bitmode = SONIC_BITMODE32;
- /*
- * Allocate local private descriptor areas in uncached space.
- * The entire structure must be located within the same 64kb segment.
- * A simple way to ensure this is to allocate twice the
- * size of the structure -- given that the structure is
- * much less than 64 kB, at least one of the halves of
- * the allocated area will be contained entirely in 64 kB.
- * We also allocate extra space for a pointer to allow freeing
- * this structure later on (in xtsonic_cleanup_module()).
- */
- lp->descriptors = dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC *
- SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr,
- GFP_KERNEL);
- if (lp->descriptors == NULL) {
- err = -ENOMEM;
+ err = sonic_alloc_descriptors(dev);
+ if (err)
goto out;
- }
-
- lp->cda = lp->descriptors;
- lp->tda = lp->cda + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
-
- /* get the virtual dma address */
-
- lp->cda_laddr = lp->descriptors_laddr;
- lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
- lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
dev->netdev_ops = &xtsonic_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
index a83a0ad5e27d..4268a7e0f344 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -104,14 +104,14 @@ struct cmsg_req_map_op {
__be32 tid;
__be32 count;
__be32 flags;
- u8 data[0];
+ u8 data[];
};
struct cmsg_reply_map_op {
struct cmsg_reply_map_simple reply_hdr;
__be32 count;
__be32 resv;
- u8 data[0];
+ u8 data[];
};
struct cmsg_bpf_event {
@@ -120,6 +120,6 @@ struct cmsg_bpf_event {
__be64 map_ptr;
__be32 data_size;
__be32 pkt_size;
- u8 data[0];
+ u8 data[];
};
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index c06600fb47ff..1c76e1592ca2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -1207,6 +1207,10 @@ int nfp_flower_compile_action(struct nfp_app *app,
bool pkt_host = false;
u32 csum_updated = 0;
+ if (!flow_action_hw_stats_check(&flow->rule->action, extack,
+ FLOW_ACTION_HW_STATS_DELAYED_BIT))
+ return -EOPNOTSUPP;
+
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
nfp_flow->meta.act_len = 0;
tun_type = NFP_FL_TUNNEL_NONE;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 9b50d76bbc09..bf516285510f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -587,7 +587,7 @@ struct nfp_flower_cmsg_mac_repr {
u8 info;
u8 nbi_port;
u8 phys_port;
- } ports[0];
+ } ports[];
};
#define NFP_FLOWER_CMSG_MAC_REPR_NBI GENMASK(1, 0)
@@ -619,7 +619,7 @@ struct nfp_flower_cmsg_merge_hint {
struct {
__be32 host_ctx;
__be64 host_cookie;
- } __packed flow[0];
+ } __packed flow[];
};
enum nfp_flower_cmsg_port_type {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 7ca5c1becfcf..c694dbc239d0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1490,7 +1490,8 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
nfp_flower_update_merge_stats(app, nfp_flow);
flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
- priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
+ priv->stats[ctx_id].pkts, priv->stats[ctx_id].used,
+ FLOW_ACTION_HW_STATS_DELAYED);
priv->stats[ctx_id].pkts = 0;
priv->stats[ctx_id].bytes = 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 124a43dc136a..d18a830e4264 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -320,7 +320,8 @@ nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
spin_unlock_bh(&fl_priv->qos_stats_lock);
flow_stats_update(&flow->stats, diff_bytes, diff_pkts,
- repr_priv->qos_table.last_update);
+ repr_priv->qos_table.last_update,
+ FLOW_ACTION_HW_STATS_DELAYED);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index c50fce42f473..07dbf4d72227 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -211,7 +211,7 @@ static const struct nfp_devlink_versions {
enum nfp_nsp_versions id;
const char *key;
} nfp_devlink_versions_nsp[] = {
- { NFP_VERSIONS_BUNDLE, "fw.bundle_id", },
+ { NFP_VERSIONS_BUNDLE, DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, },
{ NFP_VERSIONS_BSP, DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, },
{ NFP_VERSIONS_CPLD, "fw.cpld", },
{ NFP_VERSIONS_APP, DEVLINK_INFO_VERSION_GENERIC_FW_APP, },
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index 5d5812fd9317..fa6b13a05941 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -42,7 +42,7 @@ struct nfp_shared_buf;
*/
struct nfp_dumpspec {
u32 size;
- u8 data[0];
+ u8 data[];
};
/**
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
index 769ceef09756..a614df095b08 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -36,7 +36,7 @@ enum nfp_dumpspec_type {
struct nfp_dump_tl {
__be32 type;
__be32 length; /* chunk length to follow, aligned to 8 bytes */
- char data[0];
+ char data[];
};
/* NFP CPP parameters */
@@ -62,7 +62,7 @@ struct nfp_dumpspec_csr {
struct nfp_dumpspec_rtsym {
struct nfp_dump_tl tl;
- char rtsym[0];
+ char rtsym[];
};
/* header for register dumpable */
@@ -79,7 +79,7 @@ struct nfp_dump_rtsym {
struct nfp_dump_common_cpp cpp;
__be32 error; /* error code encountered while reading */
u8 padded_name_length; /* pad so data starts at 8 byte boundary */
- char rtsym[0];
+ char rtsym[];
/* after padded_name_length, there is dump_length data */
};
@@ -92,7 +92,7 @@ struct nfp_dump_error {
struct nfp_dump_tl tl;
__be32 error;
char padding[4];
- char spec[0];
+ char spec[];
};
/* to track state through debug size calculation TLV traversal */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index d648e32c0520..2779f1526d1e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1343,26 +1343,6 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
unsigned int factor;
- if (ec->rx_coalesce_usecs_irq ||
- ec->rx_max_coalesced_frames_irq ||
- ec->tx_coalesce_usecs_irq ||
- ec->tx_max_coalesced_frames_irq ||
- ec->stats_block_coalesce_usecs ||
- ec->use_adaptive_rx_coalesce ||
- ec->use_adaptive_tx_coalesce ||
- ec->pkt_rate_low ||
- ec->rx_coalesce_usecs_low ||
- ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low ||
- ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high ||
- ec->rx_coalesce_usecs_high ||
- ec->rx_max_coalesced_frames_high ||
- ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high ||
- ec->rate_sample_interval)
- return -EOPNOTSUPP;
-
/* Compute factor used to convert coalesce '_usecs' parameters to
* ME timestamp ticks. There are 16 ME clock cycles for each timestamp
* count.
@@ -1476,6 +1456,8 @@ static int nfp_net_set_channels(struct net_device *netdev,
}
static const struct ethtool_ops nfp_net_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = nfp_net_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = nfp_net_get_ringparam,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index e0f13dfe1f39..48a74accbbd3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -18,7 +18,7 @@ struct nfp_port;
*/
struct nfp_reprs {
unsigned int num_reprs;
- struct net_device __rcu *reprs[0];
+ struct net_device __rcu *reprs[];
};
/**
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 684e4e036c55..a486008eb80a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -1247,19 +1247,16 @@ static void nfp6000_free(struct nfp_cpp *cpp)
static int nfp6000_read_serial(struct device *dev, u8 *serial)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int pos;
- u32 reg;
+ u64 dsn;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- if (!pos) {
+ dsn = pci_get_dsn(pdev);
+ if (!dsn) {
dev_err(dev, "can't find PCIe Serial Number Capability\n");
return -EINVAL;
}
- pci_read_config_dword(pdev, pos + 4, &reg);
- put_unaligned_be16(reg >> 16, serial + 4);
- pci_read_config_dword(pdev, pos + 8, &reg);
- put_unaligned_be32(reg, serial);
+ put_unaligned_be32((u32)(dsn >> 32), serial);
+ put_unaligned_be16((u16)(dsn >> 16), serial + 4);
return 0;
}
@@ -1267,18 +1264,15 @@ static int nfp6000_read_serial(struct device *dev, u8 *serial)
static int nfp6000_get_interface(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int pos;
- u32 reg;
+ u64 dsn;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- if (!pos) {
+ dsn = pci_get_dsn(pdev);
+ if (!dsn) {
dev_err(dev, "can't find PCIe Serial Number Capability\n");
return -EINVAL;
}
- pci_read_config_dword(pdev, pos + 4, &reg);
-
- return reg & 0xffff;
+ return dsn & 0xffff;
}
static const struct nfp_cpp_operations nfp6000_pcie_ops = {
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 1531c1870020..f5360bae6f75 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -183,7 +183,7 @@ struct nfp_eth_table {
bool is_split;
unsigned int fec_modes_supported;
- } ports[0];
+ } ports[];
};
struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 49c7987c2abd..2fdd0753b3af 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1019,27 +1019,6 @@ static int nixge_ethtools_set_coalesce(struct net_device *ndev,
return -EBUSY;
}
- if (ecoalesce->rx_coalesce_usecs ||
- ecoalesce->rx_coalesce_usecs_irq ||
- ecoalesce->rx_max_coalesced_frames_irq ||
- ecoalesce->tx_coalesce_usecs ||
- ecoalesce->tx_coalesce_usecs_irq ||
- ecoalesce->tx_max_coalesced_frames_irq ||
- ecoalesce->stats_block_coalesce_usecs ||
- ecoalesce->use_adaptive_rx_coalesce ||
- ecoalesce->use_adaptive_tx_coalesce ||
- ecoalesce->pkt_rate_low ||
- ecoalesce->rx_coalesce_usecs_low ||
- ecoalesce->rx_max_coalesced_frames_low ||
- ecoalesce->tx_coalesce_usecs_low ||
- ecoalesce->tx_max_coalesced_frames_low ||
- ecoalesce->pkt_rate_high ||
- ecoalesce->rx_coalesce_usecs_high ||
- ecoalesce->rx_max_coalesced_frames_high ||
- ecoalesce->tx_coalesce_usecs_high ||
- ecoalesce->tx_max_coalesced_frames_high ||
- ecoalesce->rate_sample_interval)
- return -EOPNOTSUPP;
if (ecoalesce->rx_max_coalesced_frames)
priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
if (ecoalesce->tx_max_coalesced_frames)
@@ -1083,6 +1062,7 @@ static int nixge_ethtools_set_phys_id(struct net_device *ndev,
}
static const struct ethtool_ops nixge_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = nixge_ethtools_get_drvinfo,
.get_coalesce = nixge_ethtools_get_coalesce,
.set_coalesce = nixge_ethtools_set_coalesce,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index bb106a32f416..23ccc0da2341 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -12,12 +12,12 @@ struct ionic_lif;
#define IONIC_DRV_NAME "ionic"
#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver"
-#define IONIC_DRV_VERSION "0.20.0-k"
#define PCI_VENDOR_ID_PENSANDO 0x1dd8
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
+#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT 0x1004
#define DEVCMD_TIMEOUT 10
@@ -42,6 +42,7 @@ struct ionic {
struct dentry *dentry;
struct ionic_dev_bar bars[IONIC_BARS_MAX];
unsigned int num_bars;
+ bool is_mgmt_nic;
struct ionic_identity ident;
struct list_head lifs;
struct ionic_lif *master_lif;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 448d7b23b2f7..60fc191a35e5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -15,6 +15,7 @@
static const struct pci_device_id ionic_id_table[] = {
{ PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF) },
{ PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF) },
+ { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT) },
{ 0, } /* end of table */
};
MODULE_DEVICE_TABLE(pci, ionic_id_table);
@@ -37,6 +38,9 @@ int ionic_bus_alloc_irq_vectors(struct ionic *ionic, unsigned int nintrs)
void ionic_bus_free_irq_vectors(struct ionic *ionic)
{
+ if (!ionic->nintrs)
+ return;
+
pci_free_irq_vectors(ionic->pdev);
}
@@ -221,6 +225,9 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, ionic);
mutex_init(&ionic->dev_cmd_lock);
+ ionic->is_mgmt_nic =
+ ent->device == PCI_DEVICE_ID_PENSANDO_IONIC_ETH_MGMT;
+
/* Query system for DMA addressing limitation for the device. */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(IONIC_ADDR_LEN));
if (err) {
@@ -245,6 +252,8 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
+ if (!ionic->is_mgmt_nic)
+ pcie_print_link_status(pdev);
err = ionic_map_bars(ionic);
if (err)
@@ -346,6 +355,11 @@ err_out_reset:
ionic_reset(ionic);
err_out_teardown:
ionic_dev_teardown(ionic);
+ /* Don't fail the probe for these errors, keep
+ * the hw interface around for inspection
+ */
+ return 0;
+
err_out_unmap_bars:
ionic_unmap_bars(ionic);
pci_release_regions(pdev);
@@ -369,11 +383,14 @@ static void ionic_remove(struct pci_dev *pdev)
if (!ionic)
return;
- ionic_devlink_unregister(ionic);
- ionic_lifs_unregister(ionic);
- ionic_lifs_deinit(ionic);
- ionic_lifs_free(ionic);
- ionic_bus_free_irq_vectors(ionic);
+ if (ionic->master_lif) {
+ ionic_devlink_unregister(ionic);
+ ionic_lifs_unregister(ionic);
+ ionic_lifs_deinit(ionic);
+ ionic_lifs_free(ionic);
+ ionic_bus_free_irq_vectors(ionic);
+ }
+
ionic_port_reset(ionic);
ionic_reset(ionic);
ionic_dev_teardown(ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index bc03cecf80cc..5f8fc58d42b3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -228,7 +228,13 @@ DEFINE_SHOW_ATTRIBUTE(netdev);
void ionic_debugfs_add_lif(struct ionic_lif *lif)
{
- lif->dentry = debugfs_create_dir(lif->name, lif->ionic->dentry);
+ struct dentry *lif_dentry;
+
+ lif_dentry = debugfs_create_dir(lif->name, lif->ionic->dentry);
+ if (IS_ERR_OR_NULL(lif_dentry))
+ return;
+ lif->dentry = lif_dentry;
+
debugfs_create_file("netdev", 0400, lif->dentry,
lif->netdev, &netdev_fops);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 46107de5e6c3..f4ae40ae1e53 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -14,11 +14,15 @@
static void ionic_watchdog_cb(struct timer_list *t)
{
struct ionic *ionic = from_timer(ionic, t, watchdog_timer);
+ int hb;
mod_timer(&ionic->watchdog_timer,
round_jiffies(jiffies + ionic->watchdog_period));
- ionic_heartbeat_check(ionic);
+ hb = ionic_heartbeat_check(ionic);
+
+ if (hb >= 0 && ionic->master_lif)
+ ionic_link_status_check_request(ionic->master_lif);
}
void ionic_init_devinfo(struct ionic *ionic)
@@ -82,6 +86,7 @@ int ionic_dev_setup(struct ionic *ionic)
return -EFAULT;
}
+ idev->last_fw_status = 0xff;
timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0);
ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ;
mod_timer(&ionic->watchdog_timer,
@@ -115,8 +120,43 @@ int ionic_heartbeat_check(struct ionic *ionic)
* fw_status != 0xff (bad PCI read)
*/
fw_status = ioread8(&idev->dev_info_regs->fw_status);
- if (fw_status == 0xff ||
- !(fw_status & IONIC_FW_STS_F_RUNNING))
+ if (fw_status != 0xff)
+ fw_status &= IONIC_FW_STS_F_RUNNING; /* use only the run bit */
+
+ /* is this a transition? */
+ if (fw_status != idev->last_fw_status &&
+ idev->last_fw_status != 0xff) {
+ struct ionic_lif *lif = ionic->master_lif;
+ bool trigger = false;
+
+ if (!fw_status || fw_status == 0xff) {
+ dev_info(ionic->dev, "FW stopped %u\n", fw_status);
+ if (lif && !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ trigger = true;
+ } else {
+ dev_info(ionic->dev, "FW running %u\n", fw_status);
+ if (lif && test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ trigger = true;
+ }
+
+ if (trigger) {
+ struct ionic_deferred_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ dev_err(ionic->dev, "%s OOM\n", __func__);
+ } else {
+ work->type = IONIC_DW_TYPE_LIF_RESET;
+ if (fw_status & IONIC_FW_STS_F_RUNNING &&
+ fw_status != 0xff)
+ work->fw_status = 1;
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ }
+ }
+ }
+ idev->last_fw_status = fw_status;
+
+ if (!fw_status || fw_status == 0xff)
return -ENXIO;
/* early FW has no heartbeat, else FW will return non-zero */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 7838e342c4fd..587398b01997 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -132,6 +132,7 @@ struct ionic_dev {
unsigned long last_hb_time;
u32 last_hb;
+ u8 last_fw_status;
u64 __iomem *db_pages;
dma_addr_t phy_db_pages;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index 6fb27dcc5787..273c889faaad 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -77,6 +77,10 @@ int ionic_devlink_register(struct ionic *ionic)
return err;
}
+ /* don't register the mgmt_nic as a port */
+ if (ionic->is_mgmt_nic)
+ return 0;
+
devlink_port_attrs_set(&ionic->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
0, false, 0, NULL, 0);
err = devlink_port_register(dl, &ionic->dl_port, 0);
@@ -93,6 +97,7 @@ void ionic_devlink_unregister(struct ionic *ionic)
{
struct devlink *dl = priv_to_devlink(ionic);
- devlink_port_unregister(&ionic->dl_port);
+ if (ionic->dl_port.registered)
+ devlink_port_unregister(&ionic->dl_port);
devlink_unregister(dl);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index f778fff034f5..6996229facfd 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -3,6 +3,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/sfp.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -86,7 +87,6 @@ static void ionic_get_drvinfo(struct net_device *netdev,
struct ionic *ionic = lif->ionic;
strlcpy(drvinfo->driver, IONIC_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, IONIC_DRV_VERSION, sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, ionic->idev.dev_info.fw_version,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, ionic_bus_info(ionic),
@@ -412,28 +412,6 @@ static int ionic_set_coalesce(struct net_device *netdev,
unsigned int i;
u32 coal;
- if (coalesce->rx_max_coalesced_frames ||
- coalesce->rx_coalesce_usecs_irq ||
- coalesce->rx_max_coalesced_frames_irq ||
- coalesce->tx_max_coalesced_frames ||
- coalesce->tx_coalesce_usecs_irq ||
- coalesce->tx_max_coalesced_frames_irq ||
- coalesce->stats_block_coalesce_usecs ||
- coalesce->use_adaptive_rx_coalesce ||
- coalesce->use_adaptive_tx_coalesce ||
- coalesce->pkt_rate_low ||
- coalesce->rx_coalesce_usecs_low ||
- coalesce->rx_max_coalesced_frames_low ||
- coalesce->tx_coalesce_usecs_low ||
- coalesce->tx_max_coalesced_frames_low ||
- coalesce->pkt_rate_high ||
- coalesce->rx_coalesce_usecs_high ||
- coalesce->rx_max_coalesced_frames_high ||
- coalesce->tx_coalesce_usecs_high ||
- coalesce->tx_max_coalesced_frames_high ||
- coalesce->rate_sample_interval)
- return -EINVAL;
-
ident = &lif->ionic->ident;
if (ident->dev.intr_coal_div == 0) {
netdev_warn(netdev, "bad HW value in dev.intr_coal_div = %d\n",
@@ -462,7 +440,7 @@ static int ionic_set_coalesce(struct net_device *netdev,
if (coal != lif->rx_coalesce_hw) {
lif->rx_coalesce_hw = coal;
- if (test_bit(IONIC_LIF_UP, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state)) {
for (i = 0; i < lif->nxqs; i++) {
qcq = lif->rxqcqs[i].qcq;
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
@@ -509,11 +487,11 @@ static int ionic_set_ringparam(struct net_device *netdev,
ring->rx_pending == lif->nrxq_descs)
return 0;
- err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
if (err)
return err;
- running = test_bit(IONIC_LIF_UP, lif->state);
+ running = test_bit(IONIC_LIF_F_UP, lif->state);
if (running)
ionic_stop(netdev);
@@ -522,7 +500,7 @@ static int ionic_set_ringparam(struct net_device *netdev,
if (running)
ionic_open(netdev);
- clear_bit(IONIC_LIF_QUEUE_RESET, lif->state);
+ clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
return 0;
}
@@ -553,11 +531,11 @@ static int ionic_set_channels(struct net_device *netdev,
if (ch->combined_count == lif->nxqs)
return 0;
- err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
if (err)
return err;
- running = test_bit(IONIC_LIF_UP, lif->state);
+ running = test_bit(IONIC_LIF_F_UP, lif->state);
if (running)
ionic_stop(netdev);
@@ -565,7 +543,7 @@ static int ionic_set_channels(struct net_device *netdev,
if (running)
ionic_open(netdev);
- clear_bit(IONIC_LIF_QUEUE_RESET, lif->state);
+ clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
return 0;
}
@@ -575,7 +553,7 @@ static u32 ionic_get_priv_flags(struct net_device *netdev)
struct ionic_lif *lif = netdev_priv(netdev);
u32 priv_flags = 0;
- if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state))
+ if (test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
priv_flags |= PRIV_F_SW_DBG_STATS;
return priv_flags;
@@ -584,14 +562,10 @@ static u32 ionic_get_priv_flags(struct net_device *netdev)
static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct ionic_lif *lif = netdev_priv(netdev);
- u32 flags = lif->flags;
- clear_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state);
+ clear_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
if (priv_flags & PRIV_F_SW_DBG_STATS)
- set_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state);
-
- if (flags != lif->flags)
- lif->flags = flags;
+ set_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
return 0;
}
@@ -704,23 +678,27 @@ static int ionic_get_module_info(struct net_device *netdev,
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic_dev *idev = &lif->ionic->idev;
struct ionic_xcvr_status *xcvr;
+ struct sfp_eeprom_base *sfp;
xcvr = &idev->port_info->status.xcvr;
+ sfp = (struct sfp_eeprom_base *) xcvr->sprom;
/* report the module data type and length */
- switch (xcvr->sprom[0]) {
- case 0x03: /* SFP */
+ switch (sfp->phys_id) {
+ case SFF8024_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
break;
- case 0x0D: /* QSFP */
- case 0x11: /* QSFP28 */
+ case SFF8024_ID_QSFP_8436_8636:
+ case SFF8024_ID_QSFP28_8636:
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
break;
default:
netdev_info(netdev, "unknown xcvr type 0x%02x\n",
xcvr->sprom[0]);
+ modinfo->type = 0;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
break;
}
@@ -784,6 +762,7 @@ static int ionic_nway_reset(struct net_device *netdev)
}
static const struct ethtool_ops ionic_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = ionic_get_drvinfo,
.get_regs_len = ionic_get_regs_len,
.get_regs = ionic_get_regs,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 51adf5059834..ceeb7629e7a0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -4,8 +4,6 @@
#ifndef _IONIC_IF_H_
#define _IONIC_IF_H_
-#pragma pack(push, 1)
-
#define IONIC_DEV_INFO_SIGNATURE 0x44455649 /* 'DEVI' */
#define IONIC_DEV_INFO_VERSION 1
#define IONIC_IFNAMSIZ 16
@@ -366,7 +364,7 @@ union ionic_lif_config {
u8 rsvd2[2];
__le64 features;
__le32 queue_count[IONIC_QTYPE_MAX];
- };
+ } __packed;
__le32 words[64];
};
@@ -417,7 +415,7 @@ union ionic_lif_identity {
__le32 max_frame_size;
u8 rsvd2[106];
union ionic_lif_config config;
- } eth;
+ } __packed eth;
struct {
u8 version;
@@ -439,8 +437,8 @@ union ionic_lif_identity {
struct ionic_lif_logical_qtype rq_qtype;
struct ionic_lif_logical_qtype cq_qtype;
struct ionic_lif_logical_qtype eq_qtype;
- } rdma;
- };
+ } __packed rdma;
+ } __packed;
__le32 words[512];
};
@@ -526,7 +524,7 @@ struct ionic_q_init_cmd {
__le64 sg_ring_base;
__le32 eq_index;
u8 rsvd2[16];
-};
+} __packed;
/**
* struct ionic_q_init_comp - Queue init command completion
@@ -1095,7 +1093,7 @@ struct ionic_port_status {
u8 status;
u8 rsvd[51];
struct ionic_xcvr_status xcvr;
-};
+} __packed;
/**
* struct ionic_port_identify_cmd - Port identify command
@@ -1251,7 +1249,7 @@ struct ionic_port_getattr_comp {
u8 pause_type;
u8 loopback_mode;
u8 rsvd2[11];
- };
+ } __packed;
u8 color;
};
@@ -1319,7 +1317,7 @@ struct ionic_dev_setattr_cmd {
char name[IONIC_IFNAMSIZ];
__le64 features;
u8 rsvd2[60];
- };
+ } __packed;
};
/**
@@ -1334,7 +1332,7 @@ struct ionic_dev_setattr_comp {
union {
__le64 features;
u8 rsvd2[11];
- };
+ } __packed;
u8 color;
};
@@ -1361,7 +1359,7 @@ struct ionic_dev_getattr_comp {
union {
__le64 features;
u8 rsvd2[11];
- };
+ } __packed;
u8 color;
};
@@ -1426,7 +1424,7 @@ struct ionic_lif_setattr_cmd {
} rss;
u8 stats_ctl;
u8 rsvd[60];
- };
+ } __packed;
};
/**
@@ -1444,7 +1442,7 @@ struct ionic_lif_setattr_comp {
union {
__le64 features;
u8 rsvd2[11];
- };
+ } __packed;
u8 color;
};
@@ -1483,7 +1481,7 @@ struct ionic_lif_getattr_comp {
u8 mac[6];
__le64 features;
u8 rsvd2[11];
- };
+ } __packed;
u8 color;
};
@@ -1688,7 +1686,7 @@ struct ionic_vf_setattr_cmd {
u8 linkstate;
__le64 stats_pa;
u8 pad[60];
- };
+ } __packed;
};
struct ionic_vf_setattr_comp {
@@ -1726,7 +1724,7 @@ struct ionic_vf_getattr_comp {
u8 linkstate;
__le64 stats_pa;
u8 pad[11];
- };
+ } __packed;
u8 color;
};
@@ -2472,7 +2470,7 @@ union ionic_dev_cmd_regs {
union ionic_dev_cmd_comp comp;
u8 rsvd[48];
u32 data[478];
- };
+ } __packed;
u32 words[512];
};
@@ -2485,7 +2483,7 @@ union ionic_dev_regs {
struct {
union ionic_dev_info_regs info;
union ionic_dev_cmd_regs devcmd;
- };
+ } __packed;
__le32 words[1024];
};
@@ -2575,6 +2573,4 @@ struct ionic_identity {
union ionic_qos_identity qos;
};
-#pragma pack(pop)
-
#endif /* _IONIC_IF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 938e19ee0bcd..4b8a76098ca3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -21,6 +21,12 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
static void ionic_link_status_check(struct ionic_lif *lif);
+static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
+static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
+static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
+
+static int ionic_start_queues(struct ionic_lif *lif);
+static void ionic_stop_queues(struct ionic_lif *lif);
static void ionic_lif_deferred_work(struct work_struct *work)
{
@@ -50,6 +56,12 @@ static void ionic_lif_deferred_work(struct work_struct *work)
case IONIC_DW_TYPE_LINK_STATUS:
ionic_link_status_check(lif);
break;
+ case IONIC_DW_TYPE_LIF_RESET:
+ if (w->fw_status)
+ ionic_lif_handle_fw_up(lif);
+ else
+ ionic_lif_handle_fw_down(lif);
+ break;
default:
break;
}
@@ -58,8 +70,8 @@ static void ionic_lif_deferred_work(struct work_struct *work)
}
}
-static void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
- struct ionic_deferred_work *work)
+void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
+ struct ionic_deferred_work *work)
{
spin_lock_bh(&def->lock);
list_add_tail(&work->list, &def->list);
@@ -73,40 +85,47 @@ static void ionic_link_status_check(struct ionic_lif *lif)
u16 link_status;
bool link_up;
+ if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
+ return;
+
+ if (lif->ionic->is_mgmt_nic)
+ return;
+
link_status = le16_to_cpu(lif->info->status.link_status);
link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
- /* filter out the no-change cases */
- if (link_up == netif_carrier_ok(netdev))
- goto link_out;
-
if (link_up) {
- netdev_info(netdev, "Link up - %d Gbps\n",
- le32_to_cpu(lif->info->status.link_speed) / 1000);
+ if (!netif_carrier_ok(netdev)) {
+ u32 link_speed;
- if (test_bit(IONIC_LIF_UP, lif->state)) {
- netif_tx_wake_all_queues(lif->netdev);
+ ionic_port_identify(lif->ionic);
+ link_speed = le32_to_cpu(lif->info->status.link_speed);
+ netdev_info(netdev, "Link up - %d Gbps\n",
+ link_speed / 1000);
netif_carrier_on(netdev);
}
+
+ if (netif_running(lif->netdev))
+ ionic_start_queues(lif);
} else {
- netdev_info(netdev, "Link down\n");
+ if (netif_carrier_ok(netdev)) {
+ netdev_info(netdev, "Link down\n");
+ netif_carrier_off(netdev);
+ }
- /* carrier off first to avoid watchdog timeout */
- netif_carrier_off(netdev);
- if (test_bit(IONIC_LIF_UP, lif->state))
- netif_tx_stop_all_queues(netdev);
+ if (netif_running(lif->netdev))
+ ionic_stop_queues(lif);
}
-link_out:
- clear_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state);
+ clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
}
-static void ionic_link_status_check_request(struct ionic_lif *lif)
+void ionic_link_status_check_request(struct ionic_lif *lif)
{
struct ionic_deferred_work *work;
/* we only need one request outstanding at a time */
- if (test_and_set_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state))
+ if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
return;
if (in_interrupt()) {
@@ -244,38 +263,19 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq)
return ionic_adminq_post_wait(lif, &ctx);
}
-static void ionic_lif_quiesce(struct ionic_lif *lif)
-{
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.lif_setattr = {
- .opcode = IONIC_CMD_LIF_SETATTR,
- .attr = IONIC_LIF_ATTR_STATE,
- .index = lif->index,
- .state = IONIC_LIF_DISABLE
- },
- };
-
- ionic_adminq_post_wait(lif, &ctx);
-}
-
static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
- struct device *dev = lif->ionic->dev;
if (!qcq)
return;
- ionic_debugfs_del_qcq(qcq);
-
if (!(qcq->flags & IONIC_QCQ_F_INITED))
return;
if (qcq->flags & IONIC_QCQ_F_INTR) {
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
- devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
netif_napi_del(&qcq->napi);
}
@@ -289,12 +289,18 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
if (!qcq)
return;
+ ionic_debugfs_del_qcq(qcq);
+
dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
qcq->base = NULL;
qcq->base_pa = 0;
- if (qcq->flags & IONIC_QCQ_F_INTR)
+ if (qcq->flags & IONIC_QCQ_F_INTR) {
+ irq_set_affinity_hint(qcq->intr.vector, NULL);
+ devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
+ qcq->intr.vector = 0;
ionic_intr_free(lif, qcq->intr.index);
+ }
devm_kfree(dev, qcq->cq.info);
qcq->cq.info = NULL;
@@ -318,19 +324,21 @@ static void ionic_qcqs_free(struct ionic_lif *lif)
lif->adminqcq = NULL;
}
- for (i = 0; i < lif->nxqs; i++)
- if (lif->rxqcqs[i].stats)
- devm_kfree(dev, lif->rxqcqs[i].stats);
-
- devm_kfree(dev, lif->rxqcqs);
- lif->rxqcqs = NULL;
-
- for (i = 0; i < lif->nxqs; i++)
- if (lif->txqcqs[i].stats)
- devm_kfree(dev, lif->txqcqs[i].stats);
+ if (lif->rxqcqs) {
+ for (i = 0; i < lif->nxqs; i++)
+ if (lif->rxqcqs[i].stats)
+ devm_kfree(dev, lif->rxqcqs[i].stats);
+ devm_kfree(dev, lif->rxqcqs);
+ lif->rxqcqs = NULL;
+ }
- devm_kfree(dev, lif->txqcqs);
- lif->txqcqs = NULL;
+ if (lif->txqcqs) {
+ for (i = 0; i < lif->nxqs; i++)
+ if (lif->txqcqs[i].stats)
+ devm_kfree(dev, lif->txqcqs[i].stats);
+ devm_kfree(dev, lif->txqcqs);
+ lif->txqcqs = NULL;
+ }
}
static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
@@ -424,8 +432,15 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
IONIC_INTR_MASK_SET);
- new->intr.cpu = new->intr.index % num_online_cpus();
- if (cpu_online(new->intr.cpu))
+ err = ionic_request_irq(lif, new);
+ if (err) {
+ netdev_warn(lif->netdev, "irq request failed %d\n", err);
+ goto err_out_free_intr;
+ }
+
+ new->intr.cpu = cpumask_local_spread(new->intr.index,
+ dev_to_node(dev));
+ if (new->intr.cpu != -1)
cpumask_set_cpu(new->intr.cpu,
&new->intr.affinity_mask);
} else {
@@ -437,13 +452,13 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->cq.info) {
netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
err = -ENOMEM;
- goto err_out_free_intr;
+ goto err_out_free_irq;
}
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
if (err) {
netdev_err(lif->netdev, "Cannot initialize completion queue\n");
- goto err_out_free_intr;
+ goto err_out_free_irq;
}
new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
@@ -451,7 +466,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->base) {
netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
err = -ENOMEM;
- goto err_out_free_intr;
+ goto err_out_free_irq;
}
new->total_size = total_size;
@@ -477,8 +492,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
return 0;
+err_out_free_irq:
+ if (flags & IONIC_QCQ_F_INTR)
+ devm_free_irq(dev, new->intr.vector, &new->napi);
err_out_free_intr:
- ionic_intr_free(lif, new->intr.index);
+ if (flags & IONIC_QCQ_F_INTR)
+ ionic_intr_free(lif, new->intr.index);
err_out:
dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
return err;
@@ -500,6 +519,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
0, lif->kern_pid, &lif->adminqcq);
if (err)
return err;
+ ionic_debugfs_add_qcq(lif, lif->adminqcq);
if (lif->ionic->nnqs_per_lif) {
flags = IONIC_QCQ_F_NOTIFYQ;
@@ -510,6 +530,7 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
0, lif->kern_pid, &lif->notifyqcq);
if (err)
goto err_out_free_adminqcq;
+ ionic_debugfs_add_qcq(lif, lif->notifyqcq);
/* Let the notifyq ride on the adminq interrupt */
ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
@@ -594,6 +615,10 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+ q->tail = q->info;
+ q->head = q->tail;
+ cq->tail = cq->info;
+
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
return err;
@@ -607,8 +632,6 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->flags |= IONIC_QCQ_F_INITED;
- ionic_debugfs_add_qcq(lif, qcq);
-
return 0;
}
@@ -641,6 +664,10 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+ q->tail = q->info;
+ q->head = q->tail;
+ cq->tail = cq->info;
+
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
return err;
@@ -655,16 +682,8 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
NAPI_POLL_WEIGHT);
- err = ionic_request_irq(lif, qcq);
- if (err) {
- netif_napi_del(&qcq->napi);
- return err;
- }
-
qcq->flags |= IONIC_QCQ_F_INITED;
- ionic_debugfs_add_qcq(lif, qcq);
-
return 0;
}
@@ -672,6 +691,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
struct ionic_cq_info *cq_info)
{
union ionic_notifyq_comp *comp = cq_info->cq_desc;
+ struct ionic_deferred_work *work;
struct net_device *netdev;
struct ionic_queue *q;
struct ionic_lif *lif;
@@ -697,11 +717,13 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
ionic_link_status_check_request(lif);
break;
case IONIC_EVENT_RESET:
- netdev_info(netdev, "Notifyq IONIC_EVENT_RESET eid=%lld\n",
- eid);
- netdev_info(netdev, " reset_code=%d state=%d\n",
- comp->reset.reset_code,
- comp->reset.state);
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ netdev_err(lif->netdev, "%s OOM\n", __func__);
+ } else {
+ work->type = IONIC_DW_TYPE_LIF_RESET;
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ }
break;
default:
netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n",
@@ -831,7 +853,7 @@ static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
err = ionic_adminq_post_wait(lif, &ctx);
- if (err)
+ if (err && err != -EEXIST)
return err;
return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
@@ -861,7 +883,7 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
spin_unlock_bh(&lif->rx_filters.lock);
err = ionic_adminq_post_wait(lif, &ctx);
- if (err)
+ if (err && err != -EEXIST)
return err;
netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
@@ -1093,6 +1115,7 @@ static int ionic_set_nic_features(struct ionic_lif *lif,
u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
IONIC_ETH_HW_VLAN_RX_STRIP |
IONIC_ETH_HW_VLAN_RX_FILTER;
+ u64 old_hw_features;
int err;
ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
@@ -1100,9 +1123,13 @@ static int ionic_set_nic_features(struct ionic_lif *lif,
if (err)
return err;
+ old_hw_features = lif->hw_features;
lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
ctx.comp.lif_setattr.features);
+ if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
+ ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
+
if ((vlan_flags & features) &&
!(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
@@ -1149,6 +1176,10 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
netdev_features_t features;
int err;
+ /* no netdev features on the management device */
+ if (lif->ionic->is_mgmt_nic)
+ return 0;
+
/* set up what we expect to support by default */
features = NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
@@ -1205,7 +1236,8 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
netdev->hw_features |= netdev->hw_enc_features;
netdev->features |= netdev->hw_features;
- netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->priv_flags |= IFF_UNICAST_FLT |
+ IFF_LIVE_ADDR_CHANGE;
return 0;
}
@@ -1356,13 +1388,15 @@ int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
.cmd.lif_setattr = {
.opcode = IONIC_CMD_LIF_SETATTR,
.attr = IONIC_LIF_ATTR_RSS,
- .rss.types = cpu_to_le16(types),
.rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
},
};
unsigned int i, tbl_sz;
- lif->rss_types = types;
+ if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
+ lif->rss_types = types;
+ ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
+ }
if (key)
memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
@@ -1413,10 +1447,22 @@ static void ionic_lif_rss_deinit(struct ionic_lif *lif)
static void ionic_txrx_disable(struct ionic_lif *lif)
{
unsigned int i;
+ int err;
- for (i = 0; i < lif->nxqs; i++) {
- ionic_qcq_disable(lif->txqcqs[i].qcq);
- ionic_qcq_disable(lif->rxqcqs[i].qcq);
+ if (lif->txqcqs) {
+ for (i = 0; i < lif->nxqs; i++) {
+ err = ionic_qcq_disable(lif->txqcqs[i].qcq);
+ if (err == -ETIMEDOUT)
+ break;
+ }
+ }
+
+ if (lif->rxqcqs) {
+ for (i = 0; i < lif->nxqs; i++) {
+ err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
+ if (err == -ETIMEDOUT)
+ break;
+ }
}
}
@@ -1424,26 +1470,40 @@ static void ionic_txrx_deinit(struct ionic_lif *lif)
{
unsigned int i;
- for (i = 0; i < lif->nxqs; i++) {
- ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
- ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
+ if (lif->txqcqs) {
+ for (i = 0; i < lif->nxqs; i++) {
+ ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
+ ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
+ ionic_tx_empty(&lif->txqcqs[i].qcq->q);
+ }
+ }
- ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
- ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
- ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
+ if (lif->rxqcqs) {
+ for (i = 0; i < lif->nxqs; i++) {
+ ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
+ ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
+ ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
+ }
}
+ lif->rx_mode = 0;
}
static void ionic_txrx_free(struct ionic_lif *lif)
{
unsigned int i;
- for (i = 0; i < lif->nxqs; i++) {
- ionic_qcq_free(lif, lif->txqcqs[i].qcq);
- lif->txqcqs[i].qcq = NULL;
+ if (lif->txqcqs) {
+ for (i = 0; i < lif->nxqs; i++) {
+ ionic_qcq_free(lif, lif->txqcqs[i].qcq);
+ lif->txqcqs[i].qcq = NULL;
+ }
+ }
- ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
- lif->rxqcqs[i].qcq = NULL;
+ if (lif->rxqcqs) {
+ for (i = 0; i < lif->nxqs; i++) {
+ ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
+ lif->rxqcqs[i].qcq = NULL;
+ }
}
}
@@ -1465,6 +1525,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
goto err_out;
lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
+ ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq);
}
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
@@ -1485,6 +1546,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
lif->rx_coalesce_hw);
ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
lif->txqcqs[i].qcq);
+ ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq);
}
return 0;
@@ -1533,14 +1595,15 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
int i, err;
for (i = 0; i < lif->nxqs; i++) {
- err = ionic_qcq_enable(lif->txqcqs[i].qcq);
+ ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
+ err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
if (err)
goto err_out;
- ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
- err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
+ err = ionic_qcq_enable(lif->txqcqs[i].qcq);
if (err) {
- ionic_qcq_disable(lif->txqcqs[i].qcq);
+ if (err != -ETIMEDOUT)
+ ionic_qcq_disable(lif->rxqcqs[i].qcq);
goto err_out;
}
}
@@ -1549,74 +1612,84 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
err_out:
while (i--) {
- ionic_qcq_disable(lif->rxqcqs[i].qcq);
- ionic_qcq_disable(lif->txqcqs[i].qcq);
+ err = ionic_qcq_disable(lif->txqcqs[i].qcq);
+ if (err == -ETIMEDOUT)
+ break;
+ err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
+ if (err == -ETIMEDOUT)
+ break;
}
return err;
}
+static int ionic_start_queues(struct ionic_lif *lif)
+{
+ int err;
+
+ if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
+ return 0;
+
+ err = ionic_txrx_enable(lif);
+ if (err) {
+ clear_bit(IONIC_LIF_F_UP, lif->state);
+ return err;
+ }
+ netif_tx_wake_all_queues(lif->netdev);
+
+ return 0;
+}
+
int ionic_open(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
int err;
- netif_carrier_off(netdev);
-
err = ionic_txrx_alloc(lif);
if (err)
return err;
err = ionic_txrx_init(lif);
if (err)
- goto err_txrx_free;
-
- err = ionic_txrx_enable(lif);
- if (err)
- goto err_txrx_deinit;
-
- netif_set_real_num_tx_queues(netdev, lif->nxqs);
- netif_set_real_num_rx_queues(netdev, lif->nxqs);
-
- set_bit(IONIC_LIF_UP, lif->state);
+ goto err_out;
- ionic_link_status_check_request(lif);
- if (netif_carrier_ok(netdev))
- netif_tx_wake_all_queues(netdev);
+ /* don't start the queues until we have link */
+ if (netif_carrier_ok(netdev)) {
+ err = ionic_start_queues(lif);
+ if (err)
+ goto err_txrx_deinit;
+ }
return 0;
err_txrx_deinit:
ionic_txrx_deinit(lif);
-err_txrx_free:
+err_out:
ionic_txrx_free(lif);
return err;
}
+static void ionic_stop_queues(struct ionic_lif *lif)
+{
+ if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
+ return;
+
+ ionic_txrx_disable(lif);
+ netif_tx_disable(lif->netdev);
+}
+
int ionic_stop(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
- int err = 0;
- if (!test_bit(IONIC_LIF_UP, lif->state)) {
- dev_dbg(lif->ionic->dev, "%s: %s state=DOWN\n",
- __func__, lif->name);
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
return 0;
- }
- dev_dbg(lif->ionic->dev, "%s: %s state=UP\n", __func__, lif->name);
- clear_bit(IONIC_LIF_UP, lif->state);
-
- /* carrier off before disabling queues to avoid watchdog timeout */
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
- netif_tx_disable(netdev);
- ionic_txrx_disable(lif);
- ionic_lif_quiesce(lif);
+ ionic_stop_queues(lif);
ionic_txrx_deinit(lif);
ionic_txrx_free(lif);
- return err;
+ return 0;
}
static int ionic_get_vf_config(struct net_device *netdev,
@@ -1871,7 +1944,7 @@ int ionic_reset_queues(struct ionic_lif *lif)
/* Put off the next watchdog timeout */
netif_trans_update(lif->netdev);
- err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
+ err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
if (err)
return err;
@@ -1881,7 +1954,7 @@ int ionic_reset_queues(struct ionic_lif *lif)
if (!err && running)
ionic_open(lif->netdev);
- clear_bit(IONIC_LIF_QUEUE_RESET, lif->state);
+ clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
return err;
}
@@ -1910,6 +1983,8 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
ionic_ethtool_set_ops(netdev);
netdev->watchdog_timeo = 2 * HZ;
+ netif_carrier_off(netdev);
+
netdev->min_mtu = IONIC_MIN_MTU;
netdev->max_mtu = IONIC_MAX_MTU;
@@ -1944,6 +2019,8 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
goto err_out_free_netdev;
}
+ ionic_debugfs_add_lif(lif);
+
/* allocate queues */
err = ionic_qcqs_alloc(lif);
if (err)
@@ -2003,6 +2080,80 @@ static void ionic_lif_reset(struct ionic_lif *lif)
mutex_unlock(&lif->ionic->dev_cmd_lock);
}
+static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
+{
+ struct ionic *ionic = lif->ionic;
+
+ if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return;
+
+ dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
+
+ netif_device_detach(lif->netdev);
+
+ if (test_bit(IONIC_LIF_F_UP, lif->state)) {
+ dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
+ ionic_stop_queues(lif);
+ }
+
+ if (netif_running(lif->netdev)) {
+ ionic_txrx_deinit(lif);
+ ionic_txrx_free(lif);
+ }
+ ionic_lifs_deinit(ionic);
+ ionic_qcqs_free(lif);
+
+ dev_info(ionic->dev, "FW Down: LIFs stopped\n");
+}
+
+static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
+{
+ struct ionic *ionic = lif->ionic;
+ int err;
+
+ if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ return;
+
+ dev_info(ionic->dev, "FW Up: restarting LIFs\n");
+
+ err = ionic_qcqs_alloc(lif);
+ if (err)
+ goto err_out;
+
+ err = ionic_lifs_init(ionic);
+ if (err)
+ goto err_qcqs_free;
+
+ if (lif->registered)
+ ionic_lif_set_netdev_info(lif);
+
+ if (netif_running(lif->netdev)) {
+ err = ionic_txrx_alloc(lif);
+ if (err)
+ goto err_lifs_deinit;
+
+ err = ionic_txrx_init(lif);
+ if (err)
+ goto err_txrx_free;
+ }
+
+ clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
+ ionic_link_status_check_request(lif);
+ netif_device_attach(lif->netdev);
+ dev_info(ionic->dev, "FW Up: LIFs restarted\n");
+
+ return;
+
+err_txrx_free:
+ ionic_txrx_free(lif);
+err_lifs_deinit:
+ ionic_lifs_deinit(ionic);
+err_qcqs_free:
+ ionic_qcqs_free(lif);
+err_out:
+ dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
+}
+
static void ionic_lif_free(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
@@ -2015,7 +2166,8 @@ static void ionic_lif_free(struct ionic_lif *lif)
/* free queues */
ionic_qcqs_free(lif);
- ionic_lif_reset(lif);
+ if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ ionic_lif_reset(lif);
/* free lif info */
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
@@ -2048,13 +2200,17 @@ void ionic_lifs_free(struct ionic *ionic)
static void ionic_lif_deinit(struct ionic_lif *lif)
{
- if (!test_bit(IONIC_LIF_INITED, lif->state))
+ if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
return;
- clear_bit(IONIC_LIF_INITED, lif->state);
+ if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ cancel_work_sync(&lif->deferred.work);
+ cancel_work_sync(&lif->tx_timeout_work);
+ }
ionic_rx_filters_deinit(lif);
- ionic_lif_rss_deinit(lif);
+ if (lif->netdev->features & NETIF_F_RXHASH)
+ ionic_lif_rss_deinit(lif);
napi_disable(&lif->adminqcq->napi);
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
@@ -2107,13 +2263,6 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
NAPI_POLL_WEIGHT);
- err = ionic_request_irq(lif, qcq);
- if (err) {
- netdev_warn(lif->netdev, "adminq irq request failed %d\n", err);
- netif_napi_del(&qcq->napi);
- return err;
- }
-
napi_enable(&qcq->napi);
if (qcq->flags & IONIC_QCQ_F_INTR)
@@ -2122,8 +2271,6 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
qcq->flags |= IONIC_QCQ_F_INITED;
- ionic_debugfs_add_qcq(lif, qcq);
-
return 0;
}
@@ -2159,6 +2306,7 @@ static int ionic_lif_notifyq_init(struct ionic_lif *lif)
if (err)
return err;
+ lif->last_eid = 0;
q->hw_type = ctx.comp.q_init.hw_type;
q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
q->dbval = IONIC_DBELL_QID(q->hw_index);
@@ -2171,8 +2319,6 @@ static int ionic_lif_notifyq_init(struct ionic_lif *lif)
qcq->flags |= IONIC_QCQ_F_INITED;
- ionic_debugfs_add_qcq(lif, qcq);
-
return 0;
}
@@ -2201,8 +2347,8 @@ static int ionic_station_set(struct ionic_lif *lif)
addr.sa_family = AF_INET;
err = eth_prepare_mac_addr_change(netdev, &addr);
if (err) {
- netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM\n",
- addr.sa_data);
+ netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
+ addr.sa_data, err);
return 0;
}
@@ -2226,8 +2372,6 @@ static int ionic_lif_init(struct ionic_lif *lif)
int dbpage_num;
int err;
- ionic_debugfs_add_lif(lif);
-
mutex_lock(&lif->ionic->dev_cmd_lock);
ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
@@ -2287,7 +2431,7 @@ static int ionic_lif_init(struct ionic_lif *lif)
lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
- set_bit(IONIC_LIF_INITED, lif->state);
+ set_bit(IONIC_LIF_F_INITED, lif->state);
INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
@@ -2375,6 +2519,12 @@ int ionic_lifs_register(struct ionic *ionic)
{
int err;
+ /* the netdev is not registered on the management device, it is
+ * only used as a vehicle for napi operations on the adminq
+ */
+ if (ionic->is_mgmt_nic)
+ return 0;
+
INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
ionic->nb.notifier_call = ionic_lif_notify;
@@ -2408,9 +2558,8 @@ void ionic_lifs_unregister(struct ionic *ionic)
* current model, so don't bother searching the
* ionic->lif for candidates to unregister
*/
- cancel_work_sync(&ionic->master_lif->deferred.work);
- cancel_work_sync(&ionic->master_lif->tx_timeout_work);
- if (ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
+ if (ionic->master_lif &&
+ ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(ionic->master_lif->netdev);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 9c5a7dd45f9d..5d4ffda5c05f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -98,6 +98,7 @@ struct ionic_deferred_work {
union {
unsigned int rx_mode;
u8 addr[ETH_ALEN];
+ u8 fw_status;
};
};
@@ -121,14 +122,15 @@ struct ionic_lif_sw_stats {
};
enum ionic_lif_state_flags {
- IONIC_LIF_INITED,
- IONIC_LIF_SW_DEBUG_STATS,
- IONIC_LIF_UP,
- IONIC_LIF_LINK_CHECK_REQUESTED,
- IONIC_LIF_QUEUE_RESET,
+ IONIC_LIF_F_INITED,
+ IONIC_LIF_F_SW_DEBUG_STATS,
+ IONIC_LIF_F_UP,
+ IONIC_LIF_F_LINK_CHECK_REQUESTED,
+ IONIC_LIF_F_QUEUE_RESET,
+ IONIC_LIF_F_FW_RESET,
/* leave this as last */
- IONIC_LIF_STATE_SIZE
+ IONIC_LIF_F_STATE_SIZE
};
#define IONIC_LIF_NAME_MAX_SZ 32
@@ -136,7 +138,7 @@ struct ionic_lif {
char name[IONIC_LIF_NAME_MAX_SZ];
struct list_head list;
struct net_device *netdev;
- DECLARE_BITMAP(state, IONIC_LIF_STATE_SIZE);
+ DECLARE_BITMAP(state, IONIC_LIF_F_STATE_SIZE);
struct ionic *ionic;
bool registered;
unsigned int index;
@@ -179,7 +181,6 @@ struct ionic_lif {
u32 rx_coalesce_usecs; /* what the user asked for */
u32 rx_coalesce_hw; /* what the hw is using */
- u32 flags;
struct work_struct tx_timeout_work;
};
@@ -225,6 +226,9 @@ static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units)
return (units * div) / mult;
}
+void ionic_link_status_check_request(struct ionic_lif *lif);
+void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
+ struct ionic_deferred_work *work);
int ionic_lifs_alloc(struct ionic *ionic);
void ionic_lifs_free(struct ionic *ionic);
void ionic_lifs_deinit(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index a8e3fb73b465..588c62e9add7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/utsname.h>
+#include <linux/vermagic.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -15,7 +16,6 @@
MODULE_DESCRIPTION(IONIC_DRV_DESCRIPTION);
MODULE_AUTHOR("Pensando Systems, Inc");
MODULE_LICENSE("GPL");
-MODULE_VERSION(IONIC_DRV_VERSION);
static const char *ionic_error_to_str(enum ionic_status_code code)
{
@@ -58,6 +58,8 @@ static const char *ionic_error_to_str(enum ionic_status_code code)
return "IONIC_RC_BAD_ADDR";
case IONIC_RC_DEV_CMD:
return "IONIC_RC_DEV_CMD";
+ case IONIC_RC_ENOSUPP:
+ return "IONIC_RC_ENOSUPP";
case IONIC_RC_ERROR:
return "IONIC_RC_ERROR";
case IONIC_RC_ERDMA:
@@ -76,6 +78,7 @@ static int ionic_error_to_errno(enum ionic_status_code code)
case IONIC_RC_EQTYPE:
case IONIC_RC_EQID:
case IONIC_RC_EINVAL:
+ case IONIC_RC_ENOSUPP:
return -EINVAL;
case IONIC_RC_EPERM:
return -EPERM;
@@ -240,11 +243,16 @@ static void ionic_adminq_cb(struct ionic_queue *q,
static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
- struct ionic_queue *adminq = &lif->adminqcq->q;
+ struct ionic_queue *adminq;
int err = 0;
WARN_ON(in_interrupt());
+ if (!lif->adminqcq)
+ return -EIO;
+
+ adminq = &lif->adminqcq->q;
+
spin_lock(&lif->adminq_lock);
if (!ionic_q_has_space(adminq, 1)) {
err = -ENOSPC;
@@ -278,9 +286,11 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
err = ionic_adminq_post(lif, ctx);
if (err) {
- name = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
- netdev_err(netdev, "Posting of %s (%d) failed: %d\n",
- name, ctx->cmd.cmd.opcode, err);
+ if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ name = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
+ netdev_err(netdev, "Posting of %s (%d) failed: %d\n",
+ name, ctx->cmd.cmd.opcode, err);
+ }
return err;
}
@@ -357,7 +367,10 @@ try_again:
done, duration / HZ, duration);
if (!done && hb) {
- ionic_dev_cmd_clean(ionic);
+ /* It is possible (but unlikely) that FW was busy and missed a
+ * heartbeat check but is still alive and will process this
+ * request, so don't clean the dev_cmd in this case.
+ */
dev_warn(ionic->dev, "DEVCMD %s (%d) failed - FW halted\n",
ionic_opcode_to_str(opcode), opcode);
return -ENXIO;
@@ -414,7 +427,7 @@ int ionic_identify(struct ionic *ionic)
memset(ident, 0, sizeof(*ident));
ident->drv.os_type = cpu_to_le32(IONIC_OS_TYPE_LINUX);
- strncpy(ident->drv.driver_ver_str, IONIC_DRV_VERSION,
+ strncpy(ident->drv.driver_ver_str, UTS_RELEASE,
sizeof(ident->drv.driver_ver_str) - 1);
mutex_lock(&ionic->dev_cmd_lock);
@@ -558,8 +571,6 @@ int ionic_port_reset(struct ionic *ionic)
static int __init ionic_init_module(void)
{
- pr_info("%s %s, ver %s\n",
- IONIC_DRV_NAME, IONIC_DRV_DESCRIPTION, IONIC_DRV_VERSION);
ionic_debugfs_create();
return ionic_bus_register_driver();
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index a1e9796a660a..8f2a8fb029f1 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -118,8 +118,8 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
/* rx stats */
total += MAX_Q(lif) * IONIC_NUM_RX_STATS;
- if (test_bit(IONIC_LIF_UP, lif->state) &&
- test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state) &&
+ test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
/* tx debug stats */
total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS +
IONIC_NUM_TX_Q_STATS +
@@ -151,8 +151,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
*buf += ETH_GSTRING_LEN;
}
- if (test_bit(IONIC_LIF_UP, lif->state) &&
- test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state) &&
+ test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
snprintf(*buf, ETH_GSTRING_LEN,
"txq_%d_%s",
@@ -190,8 +190,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
*buf += ETH_GSTRING_LEN;
}
- if (test_bit(IONIC_LIF_UP, lif->state) &&
- test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state) &&
+ test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
snprintf(*buf, ETH_GSTRING_LEN,
"rxq_%d_cq_%s",
@@ -247,8 +247,8 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
(*buf)++;
}
- if (test_bit(IONIC_LIF_UP, lif->state) &&
- test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state) &&
+ test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
txqcq = lif_to_txqcq(lif, q_num);
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
**buf = IONIC_READ_STAT64(&txqcq->q,
@@ -281,8 +281,8 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
(*buf)++;
}
- if (test_bit(IONIC_LIF_UP, lif->state) &&
- test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+ if (test_bit(IONIC_LIF_F_UP, lif->state) &&
+ test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
rxqcq = lif_to_rxqcq(lif, q_num);
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
**buf = IONIC_READ_STAT64(&rxqcq->cq,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index e452f4242ba0..d233b6e77b1e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -158,7 +158,7 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_i
}
/* no packet processing while resetting */
- if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state))) {
+ if (unlikely(test_bit(IONIC_LIF_F_QUEUE_RESET, q->lif->state))) {
stats->dropped++;
return;
}
@@ -593,6 +593,22 @@ void ionic_tx_flush(struct ionic_cq *cq)
work_done, 0);
}
+void ionic_tx_empty(struct ionic_queue *q)
+{
+ struct ionic_desc_info *desc_info;
+ int done = 0;
+
+ /* walk the not completed tx entries, if any */
+ while (q->head != q->tail) {
+ desc_info = q->tail;
+ q->tail = desc_info->next;
+ ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
+ desc_info->cb = NULL;
+ desc_info->cb_arg = NULL;
+ done++;
+ }
+}
+
static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
{
int err;
@@ -632,10 +648,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
ip_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
}
return 0;
@@ -1026,7 +1039,7 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
int ndescs;
int err;
- if (unlikely(!test_bit(IONIC_LIF_UP, lif->state))) {
+ if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
index 53775c62c85a..71973e3c35a6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -9,6 +9,7 @@ void ionic_tx_flush(struct ionic_cq *cq);
void ionic_rx_fill(struct ionic_queue *q);
void ionic_rx_empty(struct ionic_queue *q);
+void ionic_tx_empty(struct ionic_queue *q);
int ionic_rx_napi(struct napi_struct *napi, int budget);
netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 3dce769d83a1..86153660d245 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -1316,7 +1316,7 @@ struct netxen_minidump_template_hdr {
u32 driver_info_word4;
u32 saved_state_array[NX_DUMP_STATE_ARRAY_LEN];
u32 capture_size_array[NX_DUMP_CAP_SIZE_ARRAY_LEN];
- u32 rsvd[0];
+ u32 rsvd[];
};
/* Common Entry Header: Common to All Entry Types */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 6a2d91d58968..66f45fce90fa 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -748,24 +748,7 @@ static int netxen_set_intr_coalesce(struct net_device *netdev,
if (ethcoal->rx_coalesce_usecs > 0xffff ||
ethcoal->rx_max_coalesced_frames > 0xffff ||
ethcoal->tx_coalesce_usecs > 0xffff ||
- ethcoal->tx_max_coalesced_frames > 0xffff ||
- ethcoal->rx_coalesce_usecs_irq ||
- ethcoal->rx_max_coalesced_frames_irq ||
- ethcoal->tx_coalesce_usecs_irq ||
- ethcoal->tx_max_coalesced_frames_irq ||
- ethcoal->stats_block_coalesce_usecs ||
- ethcoal->use_adaptive_rx_coalesce ||
- ethcoal->use_adaptive_tx_coalesce ||
- ethcoal->pkt_rate_low ||
- ethcoal->rx_coalesce_usecs_low ||
- ethcoal->rx_max_coalesced_frames_low ||
- ethcoal->tx_coalesce_usecs_low ||
- ethcoal->tx_max_coalesced_frames_low ||
- ethcoal->pkt_rate_high ||
- ethcoal->rx_coalesce_usecs_high ||
- ethcoal->rx_max_coalesced_frames_high ||
- ethcoal->tx_coalesce_usecs_high ||
- ethcoal->tx_max_coalesced_frames_high)
+ ethcoal->tx_max_coalesced_frames > 0xffff)
return -EINVAL;
if (!ethcoal->rx_coalesce_usecs ||
@@ -923,6 +906,8 @@ netxen_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
}
const struct ethtool_ops netxen_nic_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = netxen_nic_get_drvinfo,
.get_regs_len = netxen_nic_get_regs_len,
.get_regs = netxen_nic_get_regs,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 03bdd2e26329..38a65b984e47 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -4691,26 +4691,20 @@ static void qed_chain_free_single(struct qed_dev *cdev,
static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
{
- void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
+ struct addr_tbl_entry *pp_addr_tbl = p_chain->pbl.pp_addr_tbl;
u32 page_cnt = p_chain->page_cnt, i, pbl_size;
- u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table;
- if (!pp_virt_addr_tbl)
+ if (!pp_addr_tbl)
return;
- if (!p_pbl_virt)
- goto out;
-
for (i = 0; i < page_cnt; i++) {
- if (!pp_virt_addr_tbl[i])
+ if (!pp_addr_tbl[i].virt_addr || !pp_addr_tbl[i].dma_map)
break;
dma_free_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE,
- pp_virt_addr_tbl[i],
- *(dma_addr_t *)p_pbl_virt);
-
- p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
+ pp_addr_tbl[i].virt_addr,
+ pp_addr_tbl[i].dma_map);
}
pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
@@ -4720,9 +4714,9 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
pbl_size,
p_chain->pbl_sp.p_virt_table,
p_chain->pbl_sp.p_phys_table);
-out:
- vfree(p_chain->pbl.pp_virt_addr_tbl);
- p_chain->pbl.pp_virt_addr_tbl = NULL;
+
+ vfree(p_chain->pbl.pp_addr_tbl);
+ p_chain->pbl.pp_addr_tbl = NULL;
}
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
@@ -4823,19 +4817,19 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
{
u32 page_cnt = p_chain->page_cnt, size, i;
dma_addr_t p_phys = 0, p_pbl_phys = 0;
- void **pp_virt_addr_tbl = NULL;
+ struct addr_tbl_entry *pp_addr_tbl;
u8 *p_pbl_virt = NULL;
void *p_virt = NULL;
- size = page_cnt * sizeof(*pp_virt_addr_tbl);
- pp_virt_addr_tbl = vzalloc(size);
- if (!pp_virt_addr_tbl)
+ size = page_cnt * sizeof(*pp_addr_tbl);
+ pp_addr_tbl = vzalloc(size);
+ if (!pp_addr_tbl)
return -ENOMEM;
/* The allocation of the PBL table is done with its full size, since it
* is expected to be successive.
* qed_chain_init_pbl_mem() is called even in a case of an allocation
- * failure, since pp_virt_addr_tbl was previously allocated, and it
+ * failure, since tbl was previously allocated, and it
* should be saved to allow its freeing during the error flow.
*/
size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
@@ -4849,8 +4843,7 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
p_chain->b_external_pbl = true;
}
- qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
- pp_virt_addr_tbl);
+ qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_addr_tbl);
if (!p_pbl_virt)
return -ENOMEM;
@@ -4869,7 +4862,8 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
/* Fill the PBL table with the physical address of the page */
*(dma_addr_t *)p_pbl_virt = p_phys;
/* Keep the virtual address of the page */
- p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
+ p_chain->pbl.pp_addr_tbl[i].virt_addr = p_virt;
+ p_chain->pbl.pp_addr_tbl[i].dma_map = p_phys;
p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 2c189c637cca..96356e897c80 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1087,9 +1087,6 @@ static void qed_update_pf_params(struct qed_dev *cdev,
#define QED_PERIODIC_DB_REC_INTERVAL_MS 100
#define QED_PERIODIC_DB_REC_INTERVAL \
msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
-#define QED_PERIODIC_DB_REC_WAIT_COUNT 10
-#define QED_PERIODIC_DB_REC_WAIT_INTERVAL \
- (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT)
static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
enum qed_slowpath_wq_flag wq_flag,
@@ -1123,7 +1120,7 @@ void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
static void qed_slowpath_wq_stop(struct qed_dev *cdev)
{
- int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT;
+ int i;
if (IS_VF(cdev))
return;
@@ -1135,13 +1132,7 @@ static void qed_slowpath_wq_stop(struct qed_dev *cdev)
/* Stop queuing new delayed works */
cdev->hwfns[i].slowpath_wq_active = false;
- /* Wait until the last periodic doorbell recovery is executed */
- while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
- &cdev->hwfns[i].slowpath_task_flags) &&
- sleep_count--)
- msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL);
-
- flush_workqueue(cdev->hwfns[i].slowpath_wq);
+ cancel_delayed_work(&cdev->hwfns[i].slowpath_task);
destroy_workqueue(cdev->hwfns[i].slowpath_wq);
}
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 8a426afb6a55..812c7766e096 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1566,7 +1566,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
static int qede_selftest_receive_traffic(struct qede_dev *edev)
{
- u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
+ u16 sw_rx_index, len;
struct eth_fast_path_rx_reg_cqe *fp_cqe;
struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
@@ -1596,17 +1596,6 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
continue;
}
- hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
- sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
-
- /* Memory barrier to prevent the CPU from doing speculative
- * reads of CQE/BD before reading hw_comp_cons. If the CQE is
- * read before it is written by FW, then FW writes CQE and SB,
- * and then the CPU reads the hw_comp_cons, it will use an old
- * CQE.
- */
- rmb();
-
/* Get the CQE from the completion ring */
cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
@@ -2087,6 +2076,7 @@ err:
}
static const struct ethtool_ops qede_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_link_ksettings = qede_get_link_ksettings,
.set_link_ksettings = qede_set_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
@@ -2133,6 +2123,7 @@ static const struct ethtool_ops qede_ethtool_ops = {
};
static const struct ethtool_ops qede_vf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_link_ksettings = qede_get_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
.get_msglevel = qede_get_msglevel,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index d1ce4531d01a..fe72bb6c9455 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1746,7 +1746,8 @@ unlock:
}
static int qede_parse_actions(struct qede_dev *edev,
- struct flow_action *flow_action)
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
{
const struct flow_action_entry *act;
int i;
@@ -1756,6 +1757,9 @@ static int qede_parse_actions(struct qede_dev *edev,
return -EINVAL;
}
+ if (!flow_action_basic_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_DROP:
@@ -1970,7 +1974,7 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
}
/* parse tc actions and get the vf_id */
- if (qede_parse_actions(edev, &f->rule->action))
+ if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
goto unlock;
if (qede_flow_find_fltr(edev, &t)) {
@@ -2038,7 +2042,7 @@ static int qede_flow_spec_validate(struct qede_dev *edev,
return -EINVAL;
}
- if (qede_parse_actions(edev, flow_action))
+ if (qede_parse_actions(edev, flow_action, NULL))
return -EINVAL;
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 374a4d4371f9..134611aa2c9a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -418,7 +418,7 @@ struct qlcnic_83xx_dump_template_hdr {
u32 saved_state[16];
u32 cap_sizes[8];
u32 ocm_wnd_reg[16];
- u32 rsvd[0];
+ u32 rsvd[];
};
struct qlcnic_82xx_dump_template_hdr {
@@ -436,7 +436,7 @@ struct qlcnic_82xx_dump_template_hdr {
u32 cap_sizes[8];
u32 rsvd[7];
u32 capabilities;
- u32 rsvd1[0];
+ u32 rsvd1[];
};
#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
@@ -740,7 +740,7 @@ struct qlcnic_hostrq_rx_ctx {
The following is packed:
- N hostrq_rds_rings
- N hostrq_sds_rings */
- char data[0];
+ char data[];
} __packed;
struct qlcnic_cardrsp_rds_ring{
@@ -769,7 +769,7 @@ struct qlcnic_cardrsp_rx_ctx {
The following is packed:
- N cardrsp_rds_rings
- N cardrs_sds_rings */
- char data[0];
+ char data[];
} __packed;
#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 75d83c3cbf27..5c2a3acf1e89 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1542,24 +1542,7 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
if (ethcoal->rx_coalesce_usecs > 0xffff ||
ethcoal->rx_max_coalesced_frames > 0xffff ||
ethcoal->tx_coalesce_usecs > 0xffff ||
- ethcoal->tx_max_coalesced_frames > 0xffff ||
- ethcoal->rx_coalesce_usecs_irq ||
- ethcoal->rx_max_coalesced_frames_irq ||
- ethcoal->tx_coalesce_usecs_irq ||
- ethcoal->tx_max_coalesced_frames_irq ||
- ethcoal->stats_block_coalesce_usecs ||
- ethcoal->use_adaptive_rx_coalesce ||
- ethcoal->use_adaptive_tx_coalesce ||
- ethcoal->pkt_rate_low ||
- ethcoal->rx_coalesce_usecs_low ||
- ethcoal->rx_max_coalesced_frames_low ||
- ethcoal->tx_coalesce_usecs_low ||
- ethcoal->tx_max_coalesced_frames_low ||
- ethcoal->pkt_rate_high ||
- ethcoal->rx_coalesce_usecs_high ||
- ethcoal->rx_max_coalesced_frames_high ||
- ethcoal->tx_coalesce_usecs_high ||
- ethcoal->tx_max_coalesced_frames_high)
+ ethcoal->tx_max_coalesced_frames > 0xffff)
return -EINVAL;
err = qlcnic_config_intr_coalesce(adapter, ethcoal);
@@ -1834,6 +1817,8 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
}
const struct ethtool_ops qlcnic_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = qlcnic_get_drvinfo,
.get_regs_len = qlcnic_get_regs_len,
.get_regs = qlcnic_get_regs,
@@ -1865,6 +1850,8 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
};
const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = qlcnic_get_drvinfo,
.get_regs_len = qlcnic_get_regs_len,
.get_regs = qlcnic_get_regs,
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index bebe38d74d66..251d4ac4af02 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1288,11 +1288,8 @@ static int emac_tso_csum(struct emac_adapter *adpt,
memset(tpd, 0, sizeof(*tpd));
memset(&extra_tpd, 0, sizeof(extra_tpd));
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
+
TPD_PKT_LEN_SET(&extra_tpd, skb->len);
TPD_LSO_SET(&extra_tpd, 1);
TPD_LSOV_SET(&extra_tpd, 1);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index fbf4cbcf1a65..1305522f72d6 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -57,7 +57,7 @@ static int rmnet_register_real_device(struct net_device *real_dev)
if (rmnet_is_real_dev_registered(real_dev))
return 0;
- port = kzalloc(sizeof(*port), GFP_ATOMIC);
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
@@ -122,13 +122,12 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
}
real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
- if (!real_dev || !dev)
+ if (!real_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "link does not exist");
return -ENODEV;
+ }
- if (!data[IFLA_RMNET_MUX_ID])
- return -EINVAL;
-
- ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (!ep)
return -ENOMEM;
@@ -139,7 +138,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
goto err0;
port = rmnet_get_port_rtnl(real_dev);
- err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep);
+ err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep, extack);
if (err)
goto err1;
@@ -263,12 +262,16 @@ static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
{
u16 mux_id;
- if (!data || !data[IFLA_RMNET_MUX_ID])
+ if (!data || !data[IFLA_RMNET_MUX_ID]) {
+ NL_SET_ERR_MSG_MOD(extack, "MUX ID not specified");
return -EINVAL;
+ }
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
- if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
+ if (mux_id > (RMNET_MAX_LOGICAL_EP - 1)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid MUX ID");
return -ERANGE;
+ }
return 0;
}
@@ -406,14 +409,22 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
/* If there is more than one rmnet dev attached, its probably being
* used for muxing. Skip the briding in that case
*/
- if (port->nr_rmnet_devs > 1)
+ if (port->nr_rmnet_devs > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "more than one rmnet dev attached");
return -EINVAL;
+ }
- if (port->rmnet_mode != RMNET_EPMODE_VND)
+ if (port->rmnet_mode != RMNET_EPMODE_VND) {
+ NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
return -EINVAL;
+ }
+
+ if (rmnet_is_real_dev_registered(slave_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "slave cannot be another rmnet dev");
- if (rmnet_is_real_dev_registered(slave_dev))
return -EBUSY;
+ }
err = rmnet_register_real_device(slave_dev);
if (err)
@@ -475,4 +486,5 @@ static void __exit rmnet_exit(void)
module_init(rmnet_init)
module_exit(rmnet_exit)
+MODULE_ALIAS_RTNL_LINK("rmnet");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 26ad40f19c64..d58b51d277f1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -212,6 +212,8 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
rmnet_dev->needs_free_netdev = true;
rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
+ rmnet_dev->features |= NETIF_F_LLTX;
+
/* This perm addr will be used as interface identifier by IPv6 */
rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
eth_random_addr(rmnet_dev->perm_addr);
@@ -222,16 +224,17 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port,
struct net_device *real_dev,
- struct rmnet_endpoint *ep)
+ struct rmnet_endpoint *ep,
+ struct netlink_ext_ack *extack)
+
{
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
int rc;
- if (ep->egress_dev)
- return -EINVAL;
-
- if (rmnet_get_endpoint(port, id))
+ if (rmnet_get_endpoint(port, id)) {
+ NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
return -EBUSY;
+ }
rmnet_dev->hw_features = NETIF_F_RXCSUM;
rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
index 14d77c709d4a..4967f3461ed1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
@@ -11,7 +11,8 @@ int rmnet_vnd_do_flow_control(struct net_device *dev, int enable);
int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port,
struct net_device *real_dev,
- struct rmnet_endpoint *ep);
+ struct rmnet_endpoint *ep,
+ struct netlink_ext_ack *extack);
int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep);
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 791d99b9e1cf..55cb5730beb6 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -212,7 +212,6 @@ enum rtl_registers {
/* Unlimited maximum PCI burst. */
#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
- RxMissed = 0x4c,
Cfg9346 = 0x50,
Config0 = 0x51,
Config1 = 0x52,
@@ -576,6 +575,7 @@ struct rtl8169_tc_offsets {
__le64 tx_errors;
__le32 tx_multi_collision;
__le16 tx_aborted;
+ __le16 rx_missed;
};
enum rtl_flag {
@@ -689,6 +689,12 @@ static void rtl_unlock_config_regs(struct rtl8169_private *tp)
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
}
+static void rtl_pci_commit(struct rtl8169_private *tp)
+{
+ /* Read an arbitrary register to commit a preceding PCI write */
+ RTL_R8(tp, ChipCmd);
+}
+
static bool rtl_is_8125(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_60;
@@ -1302,10 +1308,6 @@ static void rtl_irq_disable(struct rtl8169_private *tp)
tp->irq_enabled = 0;
}
-#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
-#define RTL_EVENT_NAPI_TX (TxOK | TxErr)
-#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
-
static void rtl_irq_enable(struct rtl8169_private *tp)
{
tp->irq_enabled = 1;
@@ -1319,18 +1321,13 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
{
rtl_irq_disable(tp);
rtl_ack_events(tp, 0xffffffff);
- /* PCI commit */
- RTL_R8(tp, ChipCmd);
+ rtl_pci_commit(tp);
}
static void rtl_link_chg_patch(struct rtl8169_private *tp)
{
- struct net_device *dev = tp->dev;
struct phy_device *phydev = tp->phydev;
- if (!netif_running(dev))
- return;
-
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
if (phydev->speed == SPEED_1000) {
@@ -1536,7 +1533,7 @@ static int rtl8169_set_features(struct net_device *dev,
}
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- RTL_R16(tp, CPlusCmd);
+ rtl_pci_commit(tp);
rtl_unlock_work(tp);
@@ -1622,7 +1619,7 @@ static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
u32 cmd;
RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32);
- RTL_R32(tp, CounterAddrHigh);
+ rtl_pci_commit(tp);
cmd = (u64)paddr & DMA_BIT_MASK(32);
RTL_W32(tp, CounterAddrLow, cmd);
RTL_W32(tp, CounterAddrLow, cmd | counter_cmd);
@@ -1689,6 +1686,7 @@ static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp)
tp->tc_offset.tx_errors = counters->tx_errors;
tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
tp->tc_offset.tx_aborted = counters->tx_aborted;
+ tp->tc_offset.rx_missed = counters->rx_missed;
tp->tc_offset.inited = true;
return ret;
@@ -1946,7 +1944,7 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- RTL_R16(tp, CPlusCmd);
+ rtl_pci_commit(tp);
rtl_unlock_work(tp);
@@ -2008,6 +2006,8 @@ out:
}
static const struct ethtool_ops rtl8169_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = rtl8169_get_drvinfo,
.get_regs_len = rtl8169_get_regs_len,
.get_link = ethtool_op_get_link,
@@ -2044,7 +2044,7 @@ static void rtl_enable_eee(struct rtl8169_private *tp)
phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
}
-static void rtl8169_get_mac_version(struct rtl8169_private *tp)
+static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{
/*
* The driver currently handles the 8168Bf and the 8168Be identically
@@ -2060,7 +2060,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
static const struct rtl_mac_info {
u16 mask;
u16 val;
- u16 mac_version;
+ enum mac_version ver;
} mac_info[] = {
/* 8125 family. */
{ 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
@@ -2147,22 +2147,22 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
{ 0x000, 0x000, RTL_GIGA_MAC_NONE }
};
const struct rtl_mac_info *p = mac_info;
- u16 reg = RTL_R32(tp, TxConfig) >> 20;
+ enum mac_version ver;
- while ((reg & p->mask) != p->val)
+ while ((xid & p->mask) != p->val)
p++;
- tp->mac_version = p->mac_version;
-
- if (tp->mac_version == RTL_GIGA_MAC_NONE) {
- dev_err(tp_to_dev(tp), "unknown chip XID %03x\n", reg & 0xfcf);
- } else if (!tp->supports_gmii) {
- if (tp->mac_version == RTL_GIGA_MAC_VER_42)
- tp->mac_version = RTL_GIGA_MAC_VER_43;
- else if (tp->mac_version == RTL_GIGA_MAC_VER_45)
- tp->mac_version = RTL_GIGA_MAC_VER_47;
- else if (tp->mac_version == RTL_GIGA_MAC_VER_46)
- tp->mac_version = RTL_GIGA_MAC_VER_48;
+ ver = p->ver;
+
+ if (ver != RTL_GIGA_MAC_NONE && !gmii) {
+ if (ver == RTL_GIGA_MAC_VER_42)
+ ver = RTL_GIGA_MAC_VER_43;
+ else if (ver == RTL_GIGA_MAC_VER_45)
+ ver = RTL_GIGA_MAC_VER_47;
+ else if (ver == RTL_GIGA_MAC_VER_46)
+ ver = RTL_GIGA_MAC_VER_48;
}
+
+ return ver;
}
static void rtl_release_firmware(struct rtl8169_private *tp)
@@ -2228,8 +2228,8 @@ u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
{
- if (!test_and_set_bit(flag, tp->wk.flags))
- schedule_work(&tp->wk.work);
+ set_bit(flag, tp->wk.flags);
+ schedule_work(&tp->wk.work);
}
static void rtl8169_init_phy(struct rtl8169_private *tp)
@@ -2264,10 +2264,10 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
rtl_unlock_config_regs(tp);
RTL_W32(tp, MAC4, addr[4] | addr[5] << 8);
- RTL_R32(tp, MAC4);
+ rtl_pci_commit(tp);
RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
- RTL_R32(tp, MAC0);
+ rtl_pci_commit(tp);
if (tp->mac_version == RTL_GIGA_MAC_VER_34)
rtl_rar_exgmac_set(tp, addr);
@@ -2471,66 +2471,52 @@ static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
}
-static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
+static void rtl_jumbo_config(struct rtl8169_private *tp)
{
- rtl_unlock_config_regs(tp);
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- pcie_set_readrq(tp->pci_dev, 512);
- r8168b_1_hw_jumbo_enable(tp);
- break;
- case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
- pcie_set_readrq(tp->pci_dev, 512);
- r8168c_hw_jumbo_enable(tp);
- break;
- case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
- r8168dp_hw_jumbo_enable(tp);
- break;
- case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
- pcie_set_readrq(tp->pci_dev, 512);
- r8168e_hw_jumbo_enable(tp);
- break;
- default:
- break;
- }
- rtl_lock_config_regs(tp);
-}
+ bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
-static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
-{
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
- r8168b_1_hw_jumbo_disable(tp);
+ if (jumbo) {
+ pcie_set_readrq(tp->pci_dev, 512);
+ r8168b_1_hw_jumbo_enable(tp);
+ } else {
+ r8168b_1_hw_jumbo_disable(tp);
+ }
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
- r8168c_hw_jumbo_disable(tp);
+ if (jumbo) {
+ pcie_set_readrq(tp->pci_dev, 512);
+ r8168c_hw_jumbo_enable(tp);
+ } else {
+ r8168c_hw_jumbo_disable(tp);
+ }
break;
case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
- r8168dp_hw_jumbo_disable(tp);
+ if (jumbo)
+ r8168dp_hw_jumbo_enable(tp);
+ else
+ r8168dp_hw_jumbo_disable(tp);
break;
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
- r8168e_hw_jumbo_disable(tp);
+ if (jumbo) {
+ pcie_set_readrq(tp->pci_dev, 512);
+ r8168e_hw_jumbo_enable(tp);
+ } else {
+ r8168e_hw_jumbo_disable(tp);
+ }
break;
default:
break;
}
rtl_lock_config_regs(tp);
- if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
+ if (!jumbo && pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
pcie_set_readrq(tp->pci_dev, 4096);
}
-static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
-{
- if (mtu > ETH_DATA_LEN)
- rtl_hw_jumbo_enable(tp);
- else
- rtl_hw_jumbo_disable(tp);
-}
-
DECLARE_RTL_COND(rtl_chipcmd_cond)
{
return RTL_R8(tp, ChipCmd) & CmdReset;
@@ -3838,9 +3824,6 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
static void rtl_hw_start_8169(struct rtl8169_private *tp)
{
- if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
-
RTL_W8(tp, EarlyTxThres, NoEarlyTx);
tp->cp_cmd |= PCIMulRW;
@@ -3853,8 +3836,6 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
rtl8169_set_magic_reg(tp, tp->mac_version);
- RTL_W32(tp, RxMissed, 0);
-
/* disable interrupt coalescing */
RTL_W16(tp, IntrMitigate, 0x0000);
}
@@ -3877,10 +3858,11 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_set_rx_tx_desc_registers(tp);
rtl_lock_config_regs(tp);
- rtl_jumbo_config(tp, tp->dev->mtu);
+ rtl_jumbo_config(tp);
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
- RTL_R16(tp, CPlusCmd);
+ rtl_pci_commit(tp);
+
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_init_rxcfg(tp);
rtl_set_tx_config_registers(tp);
@@ -3892,10 +3874,9 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
{
struct rtl8169_private *tp = netdev_priv(dev);
- rtl_jumbo_config(tp, new_mtu);
-
dev->mtu = new_mtu;
netdev_update_features(dev);
+ rtl_jumbo_config(tp);
return 0;
}
@@ -3910,6 +3891,7 @@ static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
{
u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
+ desc->opts2 = 0;
/* Force memory writes to complete before releasing descriptor */
dma_wmb();
@@ -3991,17 +3973,15 @@ static int rtl8169_init_ring(struct rtl8169_private *tp)
return rtl8169_rx_fill(tp);
}
-static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
- struct TxDesc *desc)
+static void rtl8169_unmap_tx_skb(struct rtl8169_private *tp, unsigned int entry)
{
- unsigned int len = tx_skb->len;
+ struct ring_info *tx_skb = tp->tx_skb + entry;
+ struct TxDesc *desc = tp->TxDescArray + entry;
- dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
-
- desc->opts1 = 0x00;
- desc->opts2 = 0x00;
- desc->addr = 0x00;
- tx_skb->len = 0;
+ dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), tx_skb->len,
+ DMA_TO_DEVICE);
+ memset(desc, 0, sizeof(*desc));
+ memset(tx_skb, 0, sizeof(*tx_skb));
}
static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
@@ -4017,12 +3997,9 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
if (len) {
struct sk_buff *skb = tx_skb->skb;
- rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb,
- tp->TxDescArray + entry);
- if (skb) {
+ rtl8169_unmap_tx_skb(tp, entry);
+ if (skb)
dev_consume_skb_any(skb);
- tx_skb->skb = NULL;
- }
}
}
}
@@ -4063,57 +4040,56 @@ static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue)
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
-static __le32 rtl8169_get_txd_opts1(u32 opts0, u32 len, unsigned int entry)
+static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len,
+ void *addr, unsigned int entry, bool desc_own)
{
- u32 status = opts0 | len;
+ struct TxDesc *txd = tp->TxDescArray + entry;
+ struct device *d = tp_to_dev(tp);
+ dma_addr_t mapping;
+ u32 opts1;
+ int ret;
+
+ mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(d, mapping);
+ if (unlikely(ret)) {
+ if (net_ratelimit())
+ netif_err(tp, drv, tp->dev, "Failed to map TX data!\n");
+ return ret;
+ }
+ txd->addr = cpu_to_le64(mapping);
+ txd->opts2 = cpu_to_le32(opts[1]);
+
+ opts1 = opts[0] | len;
if (entry == NUM_TX_DESC - 1)
- status |= RingEnd;
+ opts1 |= RingEnd;
+ if (desc_own)
+ opts1 |= DescOwn;
+ txd->opts1 = cpu_to_le32(opts1);
+
+ tp->tx_skb[entry].len = len;
- return cpu_to_le32(status);
+ return 0;
}
static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
- u32 *opts)
+ const u32 *opts, unsigned int entry)
{
struct skb_shared_info *info = skb_shinfo(skb);
- unsigned int cur_frag, entry;
- struct TxDesc *uninitialized_var(txd);
- struct device *d = tp_to_dev(tp);
+ unsigned int cur_frag;
- entry = tp->cur_tx;
for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
const skb_frag_t *frag = info->frags + cur_frag;
- dma_addr_t mapping;
- u32 len;
- void *addr;
+ void *addr = skb_frag_address(frag);
+ u32 len = skb_frag_size(frag);
entry = (entry + 1) % NUM_TX_DESC;
- txd = tp->TxDescArray + entry;
- len = skb_frag_size(frag);
- addr = skb_frag_address(frag);
- mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(d, mapping))) {
- if (net_ratelimit())
- netif_err(tp, drv, tp->dev,
- "Failed to map TX fragments DMA!\n");
+ if (unlikely(rtl8169_tx_map(tp, opts, len, addr, entry, true)))
goto err_out;
- }
-
- txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
- txd->opts2 = cpu_to_le32(opts[1]);
- txd->addr = cpu_to_le64(mapping);
-
- tp->tx_skb[entry].len = len;
- }
-
- if (cur_frag) {
- tp->tx_skb[entry].skb = skb;
- txd->opts1 |= cpu_to_le32(LastFrag);
}
- return cur_frag;
+ return 0;
err_out:
rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
@@ -4125,36 +4101,13 @@ static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
}
-/* msdn_giant_send_check()
- * According to the document of microsoft, the TCP Pseudo Header excludes the
- * packet length for IPv6 TCP large packets.
- */
-static int msdn_giant_send_check(struct sk_buff *skb)
-{
- const struct ipv6hdr *ipv6h;
- struct tcphdr *th;
- int ret;
-
- ret = skb_cow_head(skb, 0);
- if (ret)
- return ret;
-
- ipv6h = ipv6_hdr(skb);
- th = tcp_hdr(skb);
-
- th->check = 0;
- th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
-
- return ret;
-}
-
static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
{
u32 mss = skb_shinfo(skb)->gso_size;
if (mss) {
opts[0] |= TD_LSO;
- opts[0] |= min(mss, TD_MSS_MAX) << TD0_MSS_SHIFT;
+ opts[0] |= mss << TD0_MSS_SHIFT;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
const struct iphdr *ip = ip_hdr(skb);
@@ -4180,9 +4133,10 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
break;
case htons(ETH_P_IPV6):
- if (msdn_giant_send_check(skb))
+ if (skb_cow_head(skb, 0))
return false;
+ tcp_v6_gso_csum_prep(skb);
opts[0] |= TD1_GTSENV6;
break;
@@ -4192,7 +4146,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
}
opts[0] |= transport_offset << GTTCPHO_SHIFT;
- opts[1] |= min(mss, TD_MSS_MAX) << TD1_MSS_SHIFT;
+ opts[1] |= mss << TD1_MSS_SHIFT;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ip_protocol;
@@ -4260,56 +4214,44 @@ static void rtl8169_doorbell(struct rtl8169_private *tp)
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
+ unsigned int frags = skb_shinfo(skb)->nr_frags;
struct rtl8169_private *tp = netdev_priv(dev);
unsigned int entry = tp->cur_tx % NUM_TX_DESC;
- struct TxDesc *txd = tp->TxDescArray + entry;
- struct device *d = tp_to_dev(tp);
- dma_addr_t mapping;
- u32 opts[2], len;
- bool stop_queue;
- bool door_bell;
- int frags;
+ struct TxDesc *txd_first, *txd_last;
+ bool stop_queue, door_bell;
+ u32 opts[2];
- if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
+ txd_first = tp->TxDescArray + entry;
+
+ if (unlikely(!rtl_tx_slots_avail(tp, frags))) {
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop_0;
}
- if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
+ if (unlikely(le32_to_cpu(txd_first->opts1) & DescOwn))
goto err_stop_0;
opts[1] = rtl8169_tx_vlan_tag(skb);
- opts[0] = DescOwn;
+ opts[0] = 0;
- if (rtl_chip_supports_csum_v2(tp)) {
- if (!rtl8169_tso_csum_v2(tp, skb, opts))
- goto err_dma_0;
- } else {
+ if (!rtl_chip_supports_csum_v2(tp))
rtl8169_tso_csum_v1(skb, opts);
- }
-
- len = skb_headlen(skb);
- mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(d, mapping))) {
- if (net_ratelimit())
- netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
+ else if (!rtl8169_tso_csum_v2(tp, skb, opts))
goto err_dma_0;
- }
- tp->tx_skb[entry].len = len;
- txd->addr = cpu_to_le64(mapping);
+ if (unlikely(rtl8169_tx_map(tp, opts, skb_headlen(skb), skb->data,
+ entry, false)))
+ goto err_dma_0;
- frags = rtl8169_xmit_frags(tp, skb, opts);
- if (frags < 0)
- goto err_dma_1;
- else if (frags)
- opts[0] |= FirstFrag;
- else {
- opts[0] |= FirstFrag | LastFrag;
- tp->tx_skb[entry].skb = skb;
+ if (frags) {
+ if (rtl8169_xmit_frags(tp, skb, opts, entry))
+ goto err_dma_1;
+ entry = (entry + frags) % NUM_TX_DESC;
}
- txd->opts2 = cpu_to_le32(opts[1]);
+ txd_last = tp->TxDescArray + entry;
+ txd_last->opts1 |= cpu_to_le32(LastFrag);
+ tp->tx_skb[entry].skb = skb;
skb_tx_timestamp(skb);
@@ -4318,7 +4260,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more());
- txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
+ txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag);
/* Force all memory writes to complete before notifying device */
wmb();
@@ -4354,7 +4296,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
err_dma_1:
- rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
+ rtl8169_unmap_tx_skb(tp, entry);
err_dma_0:
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
@@ -4403,13 +4345,15 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
- u16 pci_status, pci_cmd;
+ int pci_status_errs;
+ u16 pci_cmd;
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
- pci_read_config_word(pdev, PCI_STATUS, &pci_status);
- netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
- pci_cmd, pci_status);
+ pci_status_errs = pci_status_get_and_clear_errors(pdev);
+
+ netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n",
+ pci_cmd, pci_status_errs);
/*
* The recovery sequence below admits a very elaborated explanation:
@@ -4426,11 +4370,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
- pci_write_config_word(pdev, PCI_STATUS,
- pci_status & (PCI_STATUS_DETECTED_PARITY |
- PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
-
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
@@ -4441,33 +4380,24 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
dirty_tx = tp->dirty_tx;
smp_rmb();
- tx_left = tp->cur_tx - dirty_tx;
- while (tx_left > 0) {
+ for (tx_left = tp->cur_tx - dirty_tx; tx_left > 0; tx_left--) {
unsigned int entry = dirty_tx % NUM_TX_DESC;
- struct ring_info *tx_skb = tp->tx_skb + entry;
+ struct sk_buff *skb = tp->tx_skb[entry].skb;
u32 status;
status = le32_to_cpu(tp->TxDescArray[entry].opts1);
if (status & DescOwn)
break;
- /* This barrier is needed to keep us from reading
- * any other fields out of the Tx descriptor until
- * we know the status of DescOwn
- */
- dma_rmb();
+ rtl8169_unmap_tx_skb(tp, entry);
- rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb,
- tp->TxDescArray + entry);
- if (tx_skb->skb) {
+ if (skb) {
pkts_compl++;
- bytes_compl += tx_skb->skb->len;
- napi_consume_skb(tx_skb->skb, budget);
- tx_skb->skb = NULL;
+ bytes_compl += skb->len;
+ napi_consume_skb(skb, budget);
}
dirty_tx++;
- tx_left--;
}
if (tp->dirty_tx != dirty_tx) {
@@ -4606,7 +4536,6 @@ process_pkt:
u64_stats_update_end(&tp->rx_stats.syncp);
}
release_descriptor:
- desc->opts2 = 0;
rtl8169_mark_to_asic(desc);
}
@@ -4636,8 +4565,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
if (unlikely(status & RxFIFOOver &&
tp->mac_version == RTL_GIGA_MAC_VER_11)) {
netif_stop_queue(tp->dev);
- /* XXX - Hack alert. See rtl_task(). */
- set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
+ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
rtl_irq_disable(tp);
@@ -4650,31 +4578,17 @@ out:
static void rtl_task(struct work_struct *work)
{
- static const struct {
- int bitnr;
- void (*action)(struct rtl8169_private *);
- } rtl_work[] = {
- { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
- };
struct rtl8169_private *tp =
container_of(work, struct rtl8169_private, wk.work);
- struct net_device *dev = tp->dev;
- int i;
rtl_lock_work(tp);
- if (!netif_running(dev) ||
+ if (!netif_running(tp->dev) ||
!test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
goto out_unlock;
- for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
- bool pending;
-
- pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
- if (pending)
- rtl_work[i].action(tp);
- }
-
+ if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags))
+ rtl_reset_work(tp);
out_unlock:
rtl_unlock_work(tp);
}
@@ -4697,17 +4611,6 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static void rtl8169_rx_missed(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- if (tp->mac_version > RTL_GIGA_MAC_VER_06)
- return;
-
- dev->stats.rx_missed_errors += RTL_R32(tp, RxMissed) & 0xffffff;
- RTL_W32(tp, RxMissed, 0);
-}
-
static void r8169_phylink_handler(struct net_device *ndev)
{
struct rtl8169_private *tp = netdev_priv(ndev);
@@ -4757,12 +4660,6 @@ static void rtl8169_down(struct net_device *dev)
netif_stop_queue(dev);
rtl8169_hw_reset(tp);
- /*
- * At this point device interrupts can not be enabled in any function,
- * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
- * and napi is disabled (rtl8169_poll).
- */
- rtl8169_rx_missed(dev);
/* Give a racing hard_start_xmit a few cycles to complete. */
synchronize_rcu();
@@ -4907,8 +4804,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
pm_runtime_get_noresume(&pdev->dev);
- if (netif_running(dev) && pm_runtime_active(&pdev->dev))
- rtl8169_rx_missed(dev);
+ netdev_stats_to_stats64(stats, &dev->stats);
do {
start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
@@ -4922,15 +4818,6 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_bytes = tp->tx_stats.bytes;
} while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
- stats->rx_dropped = dev->stats.rx_dropped;
- stats->tx_dropped = dev->stats.tx_dropped;
- stats->rx_length_errors = dev->stats.rx_length_errors;
- stats->rx_errors = dev->stats.rx_errors;
- stats->rx_crc_errors = dev->stats.rx_crc_errors;
- stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
- stats->rx_missed_errors = dev->stats.rx_missed_errors;
- stats->multicast = dev->stats.multicast;
-
/*
* Fetch additional counter values missing in stats collected by driver
* from tally counters.
@@ -4948,6 +4835,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
le32_to_cpu(tp->tc_offset.tx_multi_collision);
stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
le16_to_cpu(tp->tc_offset.tx_aborted);
+ stats->rx_missed_errors = le16_to_cpu(counters->rx_missed) -
+ le16_to_cpu(tp->tc_offset.rx_missed);
pm_runtime_put_noidle(&pdev->dev);
}
@@ -5033,7 +4922,6 @@ static int rtl8169_runtime_suspend(struct device *device)
rtl8169_net_suspend(dev);
/* Update counters before going runtime suspend */
- rtl8169_rx_missed(dev);
rtl8169_update_counters(tp);
return 0;
@@ -5098,8 +4986,7 @@ static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
pci_clear_master(tp->pci_dev);
RTL_W8(tp, ChipCmd, CmdRxEnb);
- /* PCI commit */
- RTL_R8(tp, ChipCmd);
+ rtl_pci_commit(tp);
break;
default:
break;
@@ -5173,7 +5060,7 @@ static const struct net_device_ops rtl_netdev_ops = {
static void rtl_set_irq_mask(struct rtl8169_private *tp)
{
- tp->irq_mask = RTL_EVENT_NAPI | LinkChg;
+ tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg;
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver;
@@ -5449,9 +5336,10 @@ done:
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct rtl8169_private *tp;
+ int jumbo_max, region, rc;
+ enum mac_version chipset;
struct net_device *dev;
- int chipset, region;
- int jumbo_max, rc;
+ u16 xid;
dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
if (!dev)
@@ -5509,10 +5397,16 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->mmio_addr = pcim_iomap_table(pdev)[region];
+ xid = (RTL_R32(tp, TxConfig) >> 20) & 0xfcf;
+
/* Identify chip attached to board */
- rtl8169_get_mac_version(tp);
- if (tp->mac_version == RTL_GIGA_MAC_NONE)
+ chipset = rtl8169_get_mac_version(xid, tp->supports_gmii);
+ if (chipset == RTL_GIGA_MAC_NONE) {
+ dev_err(&pdev->dev, "unknown chip XID %03x\n", xid);
return -ENODEV;
+ }
+
+ tp->mac_version = chipset;
tp->cp_cmd = RTL_R16(tp, CPlusCmd);
@@ -5530,8 +5424,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- chipset = tp->mac_version;
-
rc = rtl_alloc_irq(tp);
if (rc < 0) {
dev_err(&pdev->dev, "Can't allocate interrupt\n");
@@ -5549,9 +5441,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
- dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
@@ -5573,7 +5462,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rtl_chip_supports_csum_v2(tp)) {
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
- dev->features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
} else {
@@ -5588,9 +5476,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->mac_version == RTL_GIGA_MAC_VER_22) {
dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
- dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
}
+ dev->features |= dev->hw_features;
+
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
@@ -5622,8 +5511,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_mdio_unregister;
netif_info(tp, probe, dev, "%s, %pM, XID %03x, IRQ %d\n",
- rtl_chip_infos[chipset].name, dev->dev_addr,
- (RTL_R32(tp, TxConfig) >> 20) & 0xfcf,
+ rtl_chip_infos[chipset].name, dev->dev_addr, xid,
pci_irq_vector(pdev, 0));
if (jumbo_max)
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index e367e77c773b..b73f7d023e99 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -796,6 +796,11 @@ static void rtl8168g_disable_aldps(struct phy_device *phydev)
phy_modify_paged(phydev, 0x0a43, 0x10, BIT(2), 0);
}
+static void rtl8168g_enable_gphy_10m(struct phy_device *phydev)
+{
+ phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+}
+
static void rtl8168g_phy_adjust_10m_aldps(struct phy_device *phydev)
{
phy_modify_paged(phydev, 0x0bcc, 0x14, BIT(8), 0);
@@ -904,8 +909,7 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp,
r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
- /* enable GPHY 10M */
- phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+ rtl8168g_enable_gphy_10m(phydev);
/* SAR ADC performance */
phy_modify_paged(phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
@@ -940,8 +944,7 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp,
r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
- /* enable GPHY 10M */
- phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+ rtl8168g_enable_gphy_10m(phydev);
ioffset = rtl8168h_2_get_adc_bias_ioffset(tp);
if (ioffset != 0xffff)
@@ -1063,8 +1066,7 @@ static void rtl8117_hw_phy_config(struct rtl8169_private *tp,
r8168g_phy_param(phydev, 0x8011, 0x0000, 0x0800);
- /* enable GPHY 10M */
- phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(11));
+ rtl8168g_enable_gphy_10m(phydev);
r8168g_phy_param(phydev, 0x8016, 0x0000, 0x0400);
@@ -1171,7 +1173,7 @@ static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp,
phy_write_paged(phydev, 0xbc3, 0x12, 0x5555);
phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00);
phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000);
- phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
+ rtl8168g_enable_gphy_10m(phydev);
rtl8125_config_eee_phy(phydev);
}
@@ -1236,7 +1238,7 @@ static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp,
phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020);
phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000);
phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
- phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
+ rtl8168g_enable_gphy_10m(phydev);
rtl8125_config_eee_phy(phydev);
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 58ca126518a2..8ed73f44405d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -142,69 +142,6 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWALCR1] = 0x00b4,
};
-static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
- SH_ETH_OFFSET_DEFAULTS,
-
- [EDSR] = 0x0000,
- [EDMR] = 0x0400,
- [EDTRR] = 0x0408,
- [EDRRR] = 0x0410,
- [EESR] = 0x0428,
- [EESIPR] = 0x0430,
- [TDLAR] = 0x0010,
- [TDFAR] = 0x0014,
- [TDFXR] = 0x0018,
- [TDFFR] = 0x001c,
- [RDLAR] = 0x0030,
- [RDFAR] = 0x0034,
- [RDFXR] = 0x0038,
- [RDFFR] = 0x003c,
- [TRSCER] = 0x0438,
- [RMFCR] = 0x0440,
- [TFTR] = 0x0448,
- [FDR] = 0x0450,
- [RMCR] = 0x0458,
- [RPADIR] = 0x0460,
- [FCFTR] = 0x0468,
- [CSMR] = 0x04E4,
-
- [ECMR] = 0x0500,
- [RFLR] = 0x0508,
- [ECSR] = 0x0510,
- [ECSIPR] = 0x0518,
- [PIR] = 0x0520,
- [APR] = 0x0554,
- [MPR] = 0x0558,
- [PFTCR] = 0x055c,
- [PFRCR] = 0x0560,
- [TPAUSER] = 0x0564,
- [MAHR] = 0x05c0,
- [MALR] = 0x05c8,
- [CEFCR] = 0x0740,
- [FRECR] = 0x0748,
- [TSFRCR] = 0x0750,
- [TLFRCR] = 0x0758,
- [RFCR] = 0x0760,
- [MAFCR] = 0x0778,
-
- [ARSTR] = 0x0000,
- [TSU_CTRST] = 0x0004,
- [TSU_FWSLC] = 0x0038,
- [TSU_VTAG0] = 0x0058,
- [TSU_ADSBSY] = 0x0060,
- [TSU_TEN] = 0x0064,
- [TSU_POST1] = 0x0070,
- [TSU_POST2] = 0x0074,
- [TSU_POST3] = 0x0078,
- [TSU_POST4] = 0x007c,
- [TSU_ADRH0] = 0x0100,
-
- [TXNLCR0] = 0x0080,
- [TXALCR0] = 0x0084,
- [RXNLCR0] = 0x0088,
- [RXALCR0] = 0x008C,
-};
-
static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
SH_ETH_OFFSET_DEFAULTS,
@@ -569,6 +506,9 @@ static void sh_eth_set_rate_gether(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ if (WARN_ON(!mdp->cd->gecmr))
+ return;
+
switch (mdp->speed) {
case 10: /* 10BASE */
sh_eth_write(ndev, GECMR_10, GECMR);
@@ -590,7 +530,7 @@ static struct sh_eth_cpu_data r7s72100_data = {
.chip_reset = sh_eth_chip_reset,
.set_duplex = sh_eth_set_duplex,
- .register_type = SH_ETH_REG_FAST_RZ,
+ .register_type = SH_ETH_REG_GIGABIT,
.edtrr_trns = EDTRR_TRNS_GETHER,
.ecsr_value = ECSR_ICD,
@@ -663,6 +603,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
@@ -788,6 +729,7 @@ static struct sh_eth_cpu_data r8a77980_data = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.nbst = 1,
@@ -957,6 +899,9 @@ static void sh_eth_set_rate_giga(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ if (WARN_ON(!mdp->cd->gecmr))
+ return;
+
switch (mdp->speed) {
case 10: /* 10BASE */
sh_eth_write(ndev, 0x00000000, GECMR);
@@ -1002,6 +947,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.rpadir = 1,
@@ -1042,6 +988,7 @@ static struct sh_eth_cpu_data sh7734_data = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.no_trimd = 1,
@@ -1083,6 +1030,7 @@ static struct sh_eth_cpu_data sh7763_data = {
.apr = 1,
.mpr = 1,
.tpauser = 1,
+ .gecmr = 1,
.bculr = 1,
.hw_swap = 1,
.no_trimd = 1,
@@ -2140,11 +2088,13 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
add_reg(EESR);
add_reg(EESIPR);
add_reg(TDLAR);
- add_reg(TDFAR);
+ if (!cd->no_xdfar)
+ add_reg(TDFAR);
add_reg(TDFXR);
add_reg(TDFFR);
add_reg(RDLAR);
- add_reg(RDFAR);
+ if (!cd->no_xdfar)
+ add_reg(RDFAR);
add_reg(RDFXR);
add_reg(RDFFR);
add_reg(TRSCER);
@@ -2179,21 +2129,26 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
if (cd->tpauser)
add_reg(TPAUSER);
add_reg(TPAUSECR);
- add_reg(GECMR);
+ if (cd->gecmr)
+ add_reg(GECMR);
if (cd->bculr)
add_reg(BCULR);
add_reg(MAHR);
add_reg(MALR);
- add_reg(TROCR);
- add_reg(CDCR);
- add_reg(LCCR);
- add_reg(CNDCR);
+ if (!cd->no_tx_cntrs) {
+ add_reg(TROCR);
+ add_reg(CDCR);
+ add_reg(LCCR);
+ add_reg(CNDCR);
+ }
add_reg(CEFCR);
add_reg(FRECR);
add_reg(TSFRCR);
add_reg(TLFRCR);
- add_reg(CERCR);
- add_reg(CEECR);
+ if (cd->cexcr) {
+ add_reg(CERCR);
+ add_reg(CEECR);
+ }
add_reg(MAFCR);
if (cd->rtrate)
add_reg(RTRATE);
@@ -3121,9 +3076,6 @@ static const u16 *sh_eth_get_register_offset(int register_type)
case SH_ETH_REG_GIGABIT:
reg_offset = sh_eth_offset_gigabit;
break;
- case SH_ETH_REG_FAST_RZ:
- reg_offset = sh_eth_offset_fast_rz;
- break;
case SH_ETH_REG_FAST_RCAR:
reg_offset = sh_eth_offset_fast_rcar;
break;
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 850726301e1c..c1b3751b12c4 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -145,7 +145,6 @@ enum {
enum {
SH_ETH_REG_GIGABIT,
- SH_ETH_REG_FAST_RZ,
SH_ETH_REG_FAST_RCAR,
SH_ETH_REG_FAST_SH4,
SH_ETH_REG_FAST_SH3_SH2
@@ -490,6 +489,7 @@ struct sh_eth_cpu_data {
unsigned apr:1; /* EtherC has APR */
unsigned mpr:1; /* EtherC has MPR */
unsigned tpauser:1; /* EtherC has TPAUSER */
+ unsigned gecmr:1; /* EtherC has GECMR */
unsigned bculr:1; /* EtherC has BCULR */
unsigned tsu:1; /* EtherC has TSU */
unsigned hw_swap:1; /* E-DMAC has DE bit in EDMR */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 466483c4ac67..21465cb3d60a 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -476,6 +476,7 @@ static int sxgbe_get_regs_len(struct net_device *dev)
}
static const struct ethtool_ops sxgbe_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = sxgbe_getdrvinfo,
.get_msglevel = sxgbe_getmsglevel,
.set_msglevel = sxgbe_setmsglevel,
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 14393767ef9f..4580b30caae1 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -685,10 +685,70 @@ reset_nic:
return rc ? rc : rc2;
}
-int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
- bool spoofchk)
+static int efx_ef10_sriov_set_privilege_mask(struct efx_nic *efx, int vf_i,
+ u32 mask, u32 value)
{
- return spoofchk ? -EOPNOTSUPP : 0;
+ MCDI_DECLARE_BUF(pm_outbuf, MC_CMD_PRIVILEGE_MASK_OUT_LEN);
+ MCDI_DECLARE_BUF(pm_inbuf, MC_CMD_PRIVILEGE_MASK_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u32 old_mask, new_mask;
+ size_t outlen;
+ int rc;
+
+ EFX_WARN_ON_PARANOID((value & ~mask) != 0);
+
+ /* Get privilege mask */
+ MCDI_POPULATE_DWORD_2(pm_inbuf, PRIVILEGE_MASK_IN_FUNCTION,
+ PRIVILEGE_MASK_IN_FUNCTION_PF, nic_data->pf_index,
+ PRIVILEGE_MASK_IN_FUNCTION_VF, vf_i);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_PRIVILEGE_MASK,
+ pm_inbuf, sizeof(pm_inbuf),
+ pm_outbuf, sizeof(pm_outbuf), &outlen);
+
+ if (rc != 0)
+ return rc;
+ if (outlen != MC_CMD_PRIVILEGE_MASK_OUT_LEN)
+ return -EIO;
+
+ old_mask = MCDI_DWORD(pm_outbuf, PRIVILEGE_MASK_OUT_OLD_MASK);
+
+ new_mask = old_mask & ~mask;
+ new_mask |= value;
+
+ if (new_mask == old_mask)
+ return 0;
+
+ new_mask |= MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE;
+
+ /* Set privilege mask */
+ MCDI_SET_DWORD(pm_inbuf, PRIVILEGE_MASK_IN_NEW_MASK, new_mask);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_PRIVILEGE_MASK,
+ pm_inbuf, sizeof(pm_inbuf),
+ pm_outbuf, sizeof(pm_outbuf), &outlen);
+
+ if (rc != 0)
+ return rc;
+ if (outlen != MC_CMD_PRIVILEGE_MASK_OUT_LEN)
+ return -EIO;
+
+ return 0;
+}
+
+int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i, bool spoofchk)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ /* Can't enable spoofchk if firmware doesn't support it. */
+ if (!(nic_data->datapath_caps &
+ BIT(MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN)) &&
+ spoofchk)
+ return -EOPNOTSUPP;
+
+ return efx_ef10_sriov_set_privilege_mask(efx, vf_i,
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX,
+ spoofchk ? 0 : MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX);
}
int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 4481f21a1f43..256807c28ff7 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -113,7 +113,6 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
*
*************************************************************************/
-static const struct efx_channel_type efx_default_channel_type;
static void efx_remove_port(struct efx_nic *efx);
static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 95395d67ea2d..66dcab140449 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -151,24 +151,6 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
-static inline void efx_filter_rfs_expire(struct work_struct *data)
-{
- struct delayed_work *dwork = to_delayed_work(data);
- struct efx_channel *channel;
- unsigned int time, quota;
-
- channel = container_of(dwork, struct efx_channel, filter_work);
- time = jiffies - channel->rfs_last_expiry;
- quota = channel->rfs_filter_count * time / (30 * HZ);
- if (quota > 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
- channel->rfs_last_expiry += time;
- /* Ensure we do more work eventually even if NAPI poll is not happening */
- schedule_delayed_work(dwork, 30 * HZ);
-}
-#define efx_filter_rfs_enabled() 1
-#else
-static inline void efx_filter_rfs_expire(struct work_struct *data) {}
-#define efx_filter_rfs_enabled() 0
#endif
/* RSS contexts */
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 73d4e39b5b16..c492523b986c 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -485,6 +485,23 @@ void efx_remove_eventq(struct efx_channel *channel)
*
*************************************************************************/
+#ifdef CONFIG_RFS_ACCEL
+static void efx_filter_rfs_expire(struct work_struct *data)
+{
+ struct delayed_work *dwork = to_delayed_work(data);
+ struct efx_channel *channel;
+ unsigned int time, quota;
+
+ channel = container_of(dwork, struct efx_channel, filter_work);
+ time = jiffies - channel->rfs_last_expiry;
+ quota = channel->rfs_filter_count * time / (30 * HZ);
+ if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
+ channel->rfs_last_expiry += time;
+ /* Ensure we do more work eventually even if NAPI poll is not happening */
+ schedule_delayed_work(dwork, 30 * HZ);
+}
+#endif
+
/* Allocate and initialise a channel structure. */
struct efx_channel *
efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
@@ -1167,6 +1184,9 @@ static int efx_poll(struct napi_struct *napi, int budget)
struct efx_channel *channel =
container_of(napi, struct efx_channel, napi_str);
struct efx_nic *efx = channel->efx;
+#ifdef CONFIG_RFS_ACCEL
+ unsigned int time;
+#endif
int spent;
netif_vdbg(efx, intr, efx->net_dev,
@@ -1186,7 +1206,10 @@ static int efx_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_RFS_ACCEL
/* Perhaps expire some ARFS filters */
- mod_delayed_work(system_wq, &channel->filter_work, 0);
+ time = jiffies - channel->rfs_last_expiry;
+ /* Would our quota be >= 20? */
+ if (channel->rfs_filter_count * time >= 600 * HZ)
+ mod_delayed_work(system_wq, &channel->filter_work, 0);
#endif
/* There is no race here; although napi_disable() will
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index b0d76bc19673..1799ff9a45d9 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -200,11 +200,11 @@ void efx_link_status_changed(struct efx_nic *efx)
unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
{
/* The maximum MTU that we can fit in a single page, allowing for
- * framing, overhead and XDP headroom.
+ * framing, overhead and XDP headroom + tailroom.
*/
int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
efx->rx_prefix_size + efx->type->rx_buffer_padding +
- efx->rx_ip_align + XDP_PACKET_HEADROOM;
+ efx->rx_ip_align + EFX_XDP_HEADROOM + EFX_XDP_TAILROOM;
return PAGE_SIZE - overhead;
}
@@ -302,8 +302,9 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_dma_len = (efx->rx_prefix_size +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
- rx_buf_len = (sizeof(struct efx_rx_page_state) + XDP_PACKET_HEADROOM +
- efx->rx_ip_align + efx->rx_dma_len);
+ rx_buf_len = (sizeof(struct efx_rx_page_state) + EFX_XDP_HEADROOM +
+ efx->rx_ip_align + efx->rx_dma_len + EFX_XDP_TAILROOM);
+
if (rx_buf_len <= PAGE_SIZE) {
efx->rx_scatter = efx->type->always_rx_scatter;
efx->rx_buffer_order = 0;
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 993b5769525b..04e88d05e8ff 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -232,9 +232,6 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
bool adaptive, rx_may_override_tx;
int rc;
- if (coalesce->use_adaptive_tx_coalesce)
- return -EINVAL;
-
efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
if (coalesce->rx_coalesce_usecs != rx_usecs)
@@ -582,6 +579,7 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
case ETHTOOL_GRXFH: {
struct efx_rss_context *ctx = &efx->rss_context;
+ __u64 data;
mutex_lock(&efx->rss_lock);
if (info->flow_type & FLOW_RSS && info->rss_context) {
@@ -591,35 +589,38 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
goto out_unlock;
}
}
- info->data = 0;
+
+ data = 0;
if (!efx_rss_active(ctx)) /* No RSS */
- goto out_unlock;
+ goto out_setdata_unlock;
+
switch (info->flow_type & ~FLOW_RSS) {
case UDP_V4_FLOW:
- if (ctx->rx_hash_udp_4tuple)
- /* fall through */
- case TCP_V4_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
case UDP_V6_FLOW:
if (ctx->rx_hash_udp_4tuple)
- /* fall through */
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ else
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V4_FLOW:
case TCP_V6_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- /* fall through */
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ break;
+ case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
+ case AH_ESP_V4_FLOW:
case AH_ESP_V6_FLOW:
+ case IPV4_FLOW:
case IPV6_FLOW:
- info->data |= RXH_IP_SRC | RXH_IP_DST;
+ data = RXH_IP_SRC | RXH_IP_DST;
break;
default:
break;
}
+out_setdata_unlock:
+ info->data = data;
out_unlock:
mutex_unlock(&efx->rss_lock);
return rc;
@@ -1134,6 +1135,9 @@ static int efx_ethtool_set_fecparam(struct net_device *net_dev,
}
const struct ethtool_ops efx_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USECS_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = efx_ethtool_get_drvinfo,
.get_regs_len = efx_ethtool_get_regs_len,
.get_regs = efx_ethtool_get_regs,
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 08bd6a321918..db90d94e24c9 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -603,9 +603,6 @@ static int ef4_ethtool_set_coalesce(struct net_device *net_dev,
bool adaptive, rx_may_override_tx;
int rc;
- if (coalesce->use_adaptive_tx_coalesce)
- return -EINVAL;
-
ef4_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
if (coalesce->rx_coalesce_usecs != rx_usecs)
@@ -1311,6 +1308,9 @@ static int ef4_ethtool_get_module_info(struct net_device *net_dev,
}
const struct ethtool_ops ef4_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USECS_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = ef4_ethtool_get_drvinfo,
.get_regs_len = ef4_ethtool_get_regs_len,
.get_regs = ef4_ethtool_get_regs,
diff --git a/drivers/net/ethernet/sfc/falcon/falcon_boards.c b/drivers/net/ethernet/sfc/falcon/falcon_boards.c
index 605f486fa675..729a05c1b0cf 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon_boards.c
@@ -88,11 +88,11 @@ static int ef4_init_lm87(struct ef4_nic *efx, const struct i2c_board_info *info,
const u8 *reg_values)
{
struct falcon_board *board = falcon_board(efx);
- struct i2c_client *client = i2c_new_device(&board->i2c_adap, info);
+ struct i2c_client *client = i2c_new_client_device(&board->i2c_adap, info);
int rc;
- if (!client)
- return -EIO;
+ if (IS_ERR(client))
+ return PTR_ERR(client);
/* Read-to-clear alarm/interrupt status */
i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
diff --git a/drivers/net/ethernet/sfc/falcon/net_driver.h b/drivers/net/ethernet/sfc/falcon/net_driver.h
index a49ea2e719b6..a529ff395ead 100644
--- a/drivers/net/ethernet/sfc/falcon/net_driver.h
+++ b/drivers/net/ethernet/sfc/falcon/net_driver.h
@@ -288,7 +288,7 @@ struct ef4_rx_buffer {
struct ef4_rx_page_state {
dma_addr_t dma_addr;
- unsigned int __pad[0] ____cacheline_aligned;
+ unsigned int __pad[] ____cacheline_aligned;
};
/**
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 8164f0edcbf0..b084e623b5f4 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -91,6 +91,12 @@
#define EFX_RX_BUF_ALIGNMENT 4
#endif
+/* Non-standard XDP_PACKET_HEADROOM and tailroom to satisfy XDP_REDIRECT and
+ * still fit two standard MTU size packets into a single 4K page.
+ */
+#define EFX_XDP_HEADROOM 128
+#define EFX_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
/* Forward declare Precision Time Protocol (PTP) support structure. */
struct efx_ptp_data;
struct hwtstamp_config;
@@ -333,7 +339,7 @@ struct efx_rx_buffer {
struct efx_rx_page_state {
dma_addr_t dma_addr;
- unsigned int __pad[0] ____cacheline_aligned;
+ unsigned int __pad[] ____cacheline_aligned;
};
/**
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index a2042f16babc..260352d97d9d 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -302,7 +302,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
efx->rx_prefix_size);
xdp.data = *ehp;
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+ xdp.data_hard_start = xdp.data - EFX_XDP_HEADROOM;
/* No support yet for XDP metadata */
xdp_set_data_meta_invalid(&xdp);
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index ee8beb87bdc1..e10c23833515 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -412,10 +412,10 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
- XDP_PACKET_HEADROOM;
+ EFX_XDP_HEADROOM;
rx_buf->page = page;
rx_buf->page_offset = page_offset + efx->rx_ip_align +
- XDP_PACKET_HEADROOM;
+ EFX_XDP_HEADROOM;
rx_buf->len = efx->rx_dma_len;
rx_buf->flags = 0;
++rx_queue->added_count;
@@ -433,7 +433,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
void efx_rx_config_page_split(struct efx_nic *efx)
{
efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
- XDP_PACKET_HEADROOM,
+ EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
EFX_RX_BUF_ALIGNMENT);
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 8aafc54a4684..19b58563cb78 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -287,9 +287,8 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
return PTR_ERR(segments);
dev_consume_skb_any(skb);
- skb = segments;
- skb_list_walk_safe(skb, skb, next) {
+ skb_list_walk_safe(segments, skb, next) {
skb_mark_not_on_list(skb);
efx_enqueue_skb(tx_queue, skb);
}
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index db6b2988e632..7305e8e86c51 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -582,40 +582,23 @@ static void ioc3_timer(struct timer_list *t)
/* Try to find a PHY. There is no apparent relation between the MII addresses
* in the SGI documentation and what we find in reality, so we simply probe
- * for the PHY. It seems IOC3 PHYs usually live on address 31. One of my
- * onboard IOC3s has the special oddity that probing doesn't seem to find it
- * yet the interface seems to work fine, so if probing fails we for now will
- * simply default to PHY 31 instead of bailing out.
+ * for the PHY.
*/
static int ioc3_mii_init(struct ioc3_private *ip)
{
- int ioc3_phy_workaround = 1;
- int i, found = 0, res = 0;
u16 word;
+ int i;
for (i = 0; i < 32; i++) {
word = ioc3_mdio_read(ip->mii.dev, i, MII_PHYSID1);
if (word != 0xffff && word != 0x0000) {
- found = 1;
- break; /* Found a PHY */
+ ip->mii.phy_id = i;
+ return 0;
}
}
-
- if (!found) {
- if (ioc3_phy_workaround) {
- i = 31;
- } else {
- ip->mii.phy_id = -1;
- res = -ENODEV;
- goto out;
- }
- }
-
- ip->mii.phy_id = i;
-
-out:
- return res;
+ ip->mii.phy_id = -1;
+ return -ENODEV;
}
static void ioc3_mii_start(struct ioc3_private *ip)
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index e8224b543dfc..a5a0fb60193a 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -589,6 +589,8 @@ static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
}
static const struct ethtool_ops netsec_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = netsec_et_get_drvinfo,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
@@ -896,9 +898,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
case XDP_TX:
ret = netsec_xdp_xmit_back(priv, xdp);
if (ret != NETSEC_XDP_TX)
- __page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data), len,
+ true);
break;
case XDP_REDIRECT:
err = xdp_do_redirect(priv->ndev, xdp, prog);
@@ -906,9 +908,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
ret = NETSEC_XDP_REDIR;
} else {
ret = NETSEC_XDP_CONSUMED;
- __page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data), len,
+ true);
}
break;
default:
@@ -919,9 +921,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
/* fall through -- handle aborts by dropping packet */
case XDP_DROP:
ret = NETSEC_XDP_CONSUMED;
- __page_pool_put_page(dring->page_pool,
- virt_to_head_page(xdp->data),
- len, true);
+ page_pool_put_page(dring->page_pool,
+ virt_to_head_page(xdp->data), len, true);
break;
}
@@ -1020,8 +1021,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
* cache state. Since we paid the allocation cost if
* building an skb fails try to put the page into cache
*/
- __page_pool_put_page(dring->page_pool, page,
- pkt_len, true);
+ page_pool_put_page(dring->page_pool, page, pkt_len,
+ true);
netif_err(priv, drv, priv->ndev,
"rx failed to build skb\n");
break;
@@ -1148,11 +1149,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
~tcp_v4_check(0, ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0);
} else {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
}
tx_ctrl.tcp_seg_offload_flag = true;
@@ -1199,7 +1196,7 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
if (id == NETSEC_RING_RX) {
struct page *page = virt_to_page(desc->addr);
- page_pool_put_page(dring->page_pool, page, false);
+ page_pool_put_full_page(dring->page_pool, page, false);
} else if (id == NETSEC_RING_TX) {
dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 338e25a6374e..b46f8d2ae6d7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -3,6 +3,7 @@ config STMMAC_ETH
tristate "STMicroelectronics Multi-Gigabit Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
select MII
+ select MDIO_XPCS
select PAGE_POOL
select PHYLINK
select CRC32
@@ -197,6 +198,15 @@ config DWMAC_SUN8I
EMAC ethernet controller.
endif
+config DWMAC_INTEL
+ tristate "Intel GMAC support"
+ default X86
+ depends on X86 && STMMAC_ETH && PCI
+ depends on COMMON_CLK
+ ---help---
+ This selects the Intel platform specific bus support for the
+ stmmac driver. This driver is used for Intel Quark/EHL/TGL.
+
config STMMAC_PCI
tristate "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index c59926d96bcc..5a6f265bc540 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -30,5 +30,6 @@ obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
stmmac-platform-objs:= stmmac_platform.o
dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o
+obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o
obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 487099092693..6208a68a331d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -15,6 +15,7 @@
#include <linux/netdevice.h>
#include <linux/stmmac.h>
#include <linux/phy.h>
+#include <linux/mdio-xpcs.h>
#include <linux/module.h>
#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define STMMAC_VLAN_TAG_USED
@@ -33,6 +34,11 @@
#define DWMAC_CORE_5_00 0x50
#define DWMAC_CORE_5_10 0x51
#define DWXGMAC_CORE_2_10 0x21
+#define DWXLGMAC_CORE_2_00 0x20
+
+/* Device ID */
+#define DWXGMAC_ID 0x76
+#define DWXLGMAC_ID 0x27
#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
@@ -425,6 +431,12 @@ struct mac_link {
u32 speed5000;
u32 speed10000;
} xgmii;
+ struct {
+ u32 speed25000;
+ u32 speed40000;
+ u32 speed50000;
+ u32 speed100000;
+ } xlgmii;
};
struct mii_regs {
@@ -446,6 +458,8 @@ struct mac_device_info {
const struct stmmac_hwtimestamp *ptp;
const struct stmmac_tc_ops *tc;
const struct stmmac_mmc_ops *mmc;
+ const struct mdio_xpcs_ops *xpcs;
+ struct mdio_xpcs_args xpcs_args;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
void __iomem *pcsr; /* vpointer to device CSRs */
@@ -456,6 +470,9 @@ struct mac_device_info {
unsigned int pcs;
unsigned int pmt;
unsigned int ps;
+ unsigned int xlgmac;
+ unsigned int num_vlan;
+ u32 vlan_filter[32];
};
struct stmmac_rx_routing {
@@ -467,6 +484,7 @@ int dwmac100_setup(struct stmmac_priv *priv);
int dwmac1000_setup(struct stmmac_priv *priv);
int dwmac4_setup(struct stmmac_priv *priv);
int dwxgmac2_setup(struct stmmac_priv *priv);
+int dwxlgmac2_setup(struct stmmac_priv *priv);
void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
new file mode 100644
index 000000000000..5419d4e478c0
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -0,0 +1,592 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Intel Corporation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/pci.h>
+#include <linux/dmi.h>
+#include "stmmac.h"
+
+/* This struct is used to associate PCI Function of MAC controller on a board,
+ * discovered via DMI, with the address of PHY connected to the MAC. The
+ * negative value of the address means that MAC controller is not connected
+ * with PHY.
+ */
+struct stmmac_pci_func_data {
+ unsigned int func;
+ int phy_addr;
+};
+
+struct stmmac_pci_dmi_data {
+ const struct stmmac_pci_func_data *func;
+ size_t nfuncs;
+};
+
+struct stmmac_pci_info {
+ int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
+};
+
+static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
+ const struct dmi_system_id *dmi_list)
+{
+ const struct stmmac_pci_func_data *func_data;
+ const struct stmmac_pci_dmi_data *dmi_data;
+ const struct dmi_system_id *dmi_id;
+ int func = PCI_FUNC(pdev->devfn);
+ size_t n;
+
+ dmi_id = dmi_first_match(dmi_list);
+ if (!dmi_id)
+ return -ENODEV;
+
+ dmi_data = dmi_id->driver_data;
+ func_data = dmi_data->func;
+
+ for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
+ if (func_data->func == func)
+ return func_data->phy_addr;
+
+ return -ENODEV;
+}
+
+static void common_default_data(struct plat_stmmacenet_data *plat)
+{
+ plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->has_gmac = 1;
+ plat->force_sf_dma_mode = 1;
+
+ plat->mdio_bus_data->needs_reset = true;
+
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
+
+ /* Set default number of RX and TX queues to use */
+ plat->tx_queues_to_use = 1;
+ plat->rx_queues_to_use = 1;
+
+ /* Disable Priority config by default */
+ plat->tx_queues_cfg[0].use_prio = false;
+ plat->rx_queues_cfg[0].use_prio = false;
+
+ /* Disable RX queues routing by default */
+ plat->rx_queues_cfg[0].pkt_route = 0x0;
+}
+
+static int intel_mgbe_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int i;
+
+ plat->clk_csr = 5;
+ plat->has_gmac = 0;
+ plat->has_gmac4 = 1;
+ plat->force_sf_dma_mode = 0;
+ plat->tso_en = 1;
+
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+
+ for (i = 0; i < plat->rx_queues_to_use; i++) {
+ plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
+ plat->rx_queues_cfg[i].chan = i;
+
+ /* Disable Priority config by default */
+ plat->rx_queues_cfg[i].use_prio = false;
+
+ /* Disable RX queues routing by default */
+ plat->rx_queues_cfg[i].pkt_route = 0x0;
+ }
+
+ for (i = 0; i < plat->tx_queues_to_use; i++) {
+ plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
+
+ /* Disable Priority config by default */
+ plat->tx_queues_cfg[i].use_prio = false;
+ }
+
+ /* FIFO size is 4096 bytes for 1 tx/rx queue */
+ plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
+ plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
+
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
+ plat->tx_queues_cfg[0].weight = 0x09;
+ plat->tx_queues_cfg[1].weight = 0x0A;
+ plat->tx_queues_cfg[2].weight = 0x0B;
+ plat->tx_queues_cfg[3].weight = 0x0C;
+ plat->tx_queues_cfg[4].weight = 0x0D;
+ plat->tx_queues_cfg[5].weight = 0x0E;
+ plat->tx_queues_cfg[6].weight = 0x0F;
+ plat->tx_queues_cfg[7].weight = 0x10;
+
+ plat->dma_cfg->pbl = 32;
+ plat->dma_cfg->pblx8 = true;
+ plat->dma_cfg->fixed_burst = 0;
+ plat->dma_cfg->mixed_burst = 0;
+ plat->dma_cfg->aal = 0;
+
+ plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
+ GFP_KERNEL);
+ if (!plat->axi)
+ return -ENOMEM;
+
+ plat->axi->axi_lpi_en = 0;
+ plat->axi->axi_xit_frm = 0;
+ plat->axi->axi_wr_osr_lmt = 1;
+ plat->axi->axi_rd_osr_lmt = 1;
+ plat->axi->axi_blen[0] = 4;
+ plat->axi->axi_blen[1] = 8;
+ plat->axi->axi_blen[2] = 16;
+
+ plat->ptp_max_adj = plat->clk_ptp_rate;
+
+ /* Set system clock */
+ plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
+ "stmmac-clk", NULL, 0,
+ plat->clk_ptp_rate);
+
+ if (IS_ERR(plat->stmmac_clk)) {
+ dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
+ plat->stmmac_clk = NULL;
+ }
+ clk_prepare_enable(plat->stmmac_clk);
+
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
+
+ return 0;
+}
+
+static int ehl_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int ret;
+
+ plat->rx_queues_to_use = 8;
+ plat->tx_queues_to_use = 8;
+ plat->clk_ptp_rate = 200000000;
+ ret = intel_mgbe_common_data(pdev, plat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ehl_sgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+
+ return ehl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_sgmii1g_pci_info = {
+ .setup = ehl_sgmii_data,
+};
+
+static int ehl_rgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
+
+ return ehl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_rgmii1g_pci_info = {
+ .setup = ehl_rgmii_data,
+};
+
+static int ehl_pse0_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 2;
+ plat->phy_addr = 1;
+ return ehl_common_data(pdev, plat);
+}
+
+static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
+ return ehl_pse0_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_pse0_rgmii1g_pci_info = {
+ .setup = ehl_pse0_rgmii1g_data,
+};
+
+static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ return ehl_pse0_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_pse0_sgmii1g_pci_info = {
+ .setup = ehl_pse0_sgmii1g_data,
+};
+
+static int ehl_pse1_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 3;
+ plat->phy_addr = 1;
+ return ehl_common_data(pdev, plat);
+}
+
+static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
+ return ehl_pse1_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_pse1_rgmii1g_pci_info = {
+ .setup = ehl_pse1_rgmii1g_data,
+};
+
+static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ return ehl_pse1_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_pse1_sgmii1g_pci_info = {
+ .setup = ehl_pse1_sgmii1g_data,
+};
+
+static int tgl_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int ret;
+
+ plat->rx_queues_to_use = 6;
+ plat->tx_queues_to_use = 4;
+ plat->clk_ptp_rate = 200000000;
+ ret = intel_mgbe_common_data(pdev, plat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tgl_sgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ return tgl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info tgl_sgmii1g_pci_info = {
+ .setup = tgl_sgmii_data,
+};
+
+static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
+ {
+ .func = 6,
+ .phy_addr = 1,
+ },
+};
+
+static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
+ .func = galileo_stmmac_func_data,
+ .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
+};
+
+static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
+ {
+ .func = 6,
+ .phy_addr = 1,
+ },
+ {
+ .func = 7,
+ .phy_addr = 1,
+ },
+};
+
+static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
+ .func = iot2040_stmmac_func_data,
+ .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
+};
+
+static const struct dmi_system_id quark_pci_dmi[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
+ },
+ .driver_data = (void *)&galileo_stmmac_dmi_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
+ },
+ .driver_data = (void *)&galileo_stmmac_dmi_data,
+ },
+ /* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
+ * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
+ * has only one pci network device while other asset tags are
+ * for IOT2040 which has two.
+ */
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
+ DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
+ "6ES7647-0AA00-0YA2"),
+ },
+ .driver_data = (void *)&galileo_stmmac_dmi_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
+ },
+ .driver_data = (void *)&iot2040_stmmac_dmi_data,
+ },
+ {}
+};
+
+static int quark_default_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int ret;
+
+ /* Set common default data first */
+ common_default_data(plat);
+
+ /* Refuse to load the driver and register net device if MAC controller
+ * does not connect to any PHY interface.
+ */
+ ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
+ if (ret < 0) {
+ /* Return error to the caller on DMI enabled boards. */
+ if (dmi_get_system_info(DMI_BOARD_NAME))
+ return ret;
+
+ /* Galileo boards with old firmware don't support DMI. We always
+ * use 1 here as PHY address, so at least the first found MAC
+ * controller would be probed.
+ */
+ ret = 1;
+ }
+
+ plat->bus_id = pci_dev_id(pdev);
+ plat->phy_addr = ret;
+ plat->phy_interface = PHY_INTERFACE_MODE_RMII;
+
+ plat->dma_cfg->pbl = 16;
+ plat->dma_cfg->pblx8 = true;
+ plat->dma_cfg->fixed_burst = 1;
+ /* AXI (TODO) */
+
+ return 0;
+}
+
+static const struct stmmac_pci_info quark_pci_info = {
+ .setup = quark_default_data,
+};
+
+/**
+ * intel_eth_pci_probe
+ *
+ * @pdev: pci device pointer
+ * @id: pointer to table of device id/id's.
+ *
+ * Description: This probing function gets called for all PCI devices which
+ * match the ID table and are not "owned" by other driver yet. This function
+ * gets passed a "struct pci_dev *" for each device whose entry in the ID table
+ * matches the device. The probe functions returns zero when the driver choose
+ * to take "ownership" of the device or an error code(-ve no) otherwise.
+ */
+static int intel_eth_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
+ struct plat_stmmacenet_data *plat;
+ struct stmmac_resources res;
+ int i;
+ int ret;
+
+ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+
+ plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
+ GFP_KERNEL);
+ if (!plat->dma_cfg)
+ return -ENOMEM;
+
+ /* Enable pci device */
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
+ __func__);
+ return ret;
+ }
+
+ /* Get the base address of device */
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
+ if (ret)
+ return ret;
+ break;
+ }
+
+ pci_set_master(pdev);
+
+ ret = info->setup(pdev, plat);
+ if (ret)
+ return ret;
+
+ pci_enable_msi(pdev);
+
+ memset(&res, 0, sizeof(res));
+ res.addr = pcim_iomap_table(pdev)[i];
+ res.wol_irq = pdev->irq;
+ res.irq = pdev->irq;
+
+ return stmmac_dvr_probe(&pdev->dev, plat, &res);
+}
+
+/**
+ * intel_eth_pci_remove
+ *
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and releases the PCI resources.
+ */
+static void intel_eth_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = dev_get_drvdata(&pdev->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int i;
+
+ stmmac_dvr_remove(&pdev->dev);
+
+ if (priv->plat->stmmac_clk)
+ clk_unregister_fixed_rate(priv->plat->stmmac_clk);
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ pcim_iounmap_regions(pdev, BIT(i));
+ break;
+ }
+
+ pci_disable_device(pdev);
+}
+
+static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = stmmac_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+ return 0;
+}
+
+static int __maybe_unused intel_eth_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return stmmac_resume(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
+ intel_eth_pci_resume);
+
+#define PCI_DEVICE_ID_INTEL_QUARK_ID 0x0937
+#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G_ID 0x4b30
+#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G_ID 0x4b31
+#define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5_ID 0x4b32
+/* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC
+ * which are named PSE0 and PSE1
+ */
+#define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G_ID 0x4ba0
+#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G_ID 0x4ba1
+#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5_ID 0x4ba2
+#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID 0x4bb0
+#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID 0x4bb1
+#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID 0x4bb2
+#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac
+
+static const struct pci_device_id intel_eth_pci_id_table[] = {
+ { PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G_ID, &ehl_rgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G_ID, &ehl_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5_ID, &ehl_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G_ID,
+ &ehl_pse0_rgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G_ID,
+ &ehl_pse0_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5_ID,
+ &ehl_pse0_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G_ID,
+ &ehl_pse1_rgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID,
+ &ehl_pse1_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID,
+ &ehl_pse1_sgmii1g_pci_info) },
+ { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_pci_info) },
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
+
+static struct pci_driver intel_eth_pci_driver = {
+ .name = "intel-eth-pci",
+ .id_table = intel_eth_pci_id_table,
+ .probe = intel_eth_pci_probe,
+ .remove = intel_eth_pci_remove,
+ .driver = {
+ .pm = &intel_eth_pm_ops,
+ },
+};
+
+module_pci_driver(intel_eth_pci_driver);
+
+MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver");
+MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 9b7be996d07b..b2dc99289687 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -304,7 +304,7 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
/* Get ETH_CLK clocks */
dwmac->clk_eth_ck = devm_clk_get(dev, "eth-ck");
if (IS_ERR(dwmac->clk_eth_ck)) {
- dev_warn(dev, "No phy clock provided...\n");
+ dev_info(dev, "No phy clock provided...\n");
dwmac->clk_eth_ck = NULL;
}
@@ -324,7 +324,7 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
/* Get IRQ information early to have an ability to ask for deferred
* probe if needed before we went too far with resource allocation.
*/
- dwmac->irq_pwr_wakeup = platform_get_irq_byname(pdev,
+ dwmac->irq_pwr_wakeup = platform_get_irq_byname_optional(pdev,
"stm32_pwr_wakeup");
if (dwmac->irq_pwr_wakeup == -EPROBE_DEFER)
return -EPROBE_DEFER;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index af50af27550b..28cac28253b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -18,6 +18,7 @@
#define GMAC_PACKET_FILTER 0x00000008
#define GMAC_HASH_TAB(x) (0x10 + (x) * 4)
#define GMAC_VLAN_TAG 0x00000050
+#define GMAC_VLAN_TAG_DATA 0x00000054
#define GMAC_VLAN_HASH_TABLE 0x00000058
#define GMAC_RX_FLOW_CTRL 0x00000090
#define GMAC_VLAN_INCL 0x00000060
@@ -90,6 +91,29 @@
#define GMAC_VLAN_VLC GENMASK(17, 16)
#define GMAC_VLAN_VLC_SHIFT 16
+/* MAC VLAN Tag */
+#define GMAC_VLAN_TAG_VID GENMASK(15, 0)
+#define GMAC_VLAN_TAG_ETV BIT(16)
+
+/* MAC VLAN Tag Control */
+#define GMAC_VLAN_TAG_CTRL_OB BIT(0)
+#define GMAC_VLAN_TAG_CTRL_CT BIT(1)
+#define GMAC_VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2)
+#define GMAC_VLAN_TAG_CTRL_OFS_SHIFT 2
+#define GMAC_VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21)
+#define GMAC_VLAN_TAG_CTRL_EVLS_SHIFT 21
+#define GMAC_VLAN_TAG_CTRL_EVLRXS BIT(24)
+
+#define GMAC_VLAN_TAG_STRIP_NONE (0x0 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
+#define GMAC_VLAN_TAG_STRIP_PASS (0x1 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
+#define GMAC_VLAN_TAG_STRIP_FAIL (0x2 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
+#define GMAC_VLAN_TAG_STRIP_ALL (0x3 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
+
+/* MAC VLAN Tag Data/Filter */
+#define GMAC_VLAN_TAG_DATA_VID GENMASK(15, 0)
+#define GMAC_VLAN_TAG_DATA_VEN BIT(16)
+#define GMAC_VLAN_TAG_DATA_ETV BIT(17)
+
/* MAC RX Queue Enable */
#define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2))
#define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2)
@@ -248,6 +272,7 @@ enum power_event {
#define GMAC_HW_FEAT_FRPBS GENMASK(12, 11)
#define GMAC_HW_FEAT_FRPSEL BIT(10)
#define GMAC_HW_FEAT_DVLAN BIT(5)
+#define GMAC_HW_FEAT_NRVF GENMASK(2, 0)
/* MAC HW ADDR regs */
#define GMAC_HI_DCS GENMASK(18, 16)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index dc09d2131e40..39692d15d80c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -394,6 +394,156 @@ static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
}
+static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ u32 val;
+
+ val = readl(ioaddr + GMAC_VLAN_TAG);
+ val &= ~GMAC_VLAN_TAG_VID;
+ val |= GMAC_VLAN_TAG_ETV | vid;
+
+ writel(val, ioaddr + GMAC_VLAN_TAG);
+}
+
+static int dwmac4_write_vlan_filter(struct net_device *dev,
+ struct mac_device_info *hw,
+ u8 index, u32 data)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ int i, timeout = 10;
+ u32 val;
+
+ if (index >= hw->num_vlan)
+ return -EINVAL;
+
+ writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
+
+ val = readl(ioaddr + GMAC_VLAN_TAG);
+ val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
+ GMAC_VLAN_TAG_CTRL_CT |
+ GMAC_VLAN_TAG_CTRL_OB);
+ val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
+
+ writel(val, ioaddr + GMAC_VLAN_TAG);
+
+ for (i = 0; i < timeout; i++) {
+ val = readl(ioaddr + GMAC_VLAN_TAG);
+ if (!(val & GMAC_VLAN_TAG_CTRL_OB))
+ return 0;
+ udelay(1);
+ }
+
+ netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
+
+ return -EBUSY;
+}
+
+static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid)
+{
+ int index = -1;
+ u32 val = 0;
+ int i, ret;
+
+ if (vid > 4095)
+ return -EINVAL;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ /* For single VLAN filter, VID 0 means VLAN promiscuous */
+ if (vid == 0) {
+ netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
+ return -EPERM;
+ }
+
+ if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
+ netdev_err(dev, "Only single VLAN ID supported\n");
+ return -EPERM;
+ }
+
+ hw->vlan_filter[0] = vid;
+ dwmac4_write_single_vlan(dev, vid);
+
+ return 0;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
+
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] == val)
+ return 0;
+ else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
+ index = i;
+ }
+
+ if (index == -1) {
+ netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
+ hw->num_vlan);
+ return -EPERM;
+ }
+
+ ret = dwmac4_write_vlan_filter(dev, hw, index, val);
+
+ if (!ret)
+ hw->vlan_filter[index] = val;
+
+ return ret;
+}
+
+static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid)
+{
+ int i, ret = 0;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
+ hw->vlan_filter[0] = 0;
+ dwmac4_write_single_vlan(dev, 0);
+ }
+ return 0;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
+ ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
+
+ if (!ret)
+ hw->vlan_filter[i] = 0;
+ else
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw)
+{
+ u32 val;
+ int i;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
+ return;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
+ val = hw->vlan_filter[i];
+ dwmac4_write_vlan_filter(dev, hw, i, val);
+ }
+ }
+}
+
static void dwmac4_set_filter(struct mac_device_info *hw,
struct net_device *dev)
{
@@ -469,6 +619,10 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
}
}
+ /* VLAN filtering */
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ value |= GMAC_PACKET_FILTER_VTFE;
+
writel(value, ioaddr + GMAC_PACKET_FILTER);
}
@@ -947,6 +1101,9 @@ const struct stmmac_ops dwmac4_ops = {
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
+ .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
+ .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
+ .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
};
const struct stmmac_ops dwmac410_ops = {
@@ -987,6 +1144,9 @@ const struct stmmac_ops dwmac410_ops = {
.config_l4_filter = dwmac4_config_l4_filter,
.est_configure = dwmac5_est_configure,
.fpe_configure = dwmac5_fpe_configure,
+ .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
+ .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
+ .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
};
const struct stmmac_ops dwmac510_ops = {
@@ -1032,8 +1192,42 @@ const struct stmmac_ops dwmac510_ops = {
.config_l4_filter = dwmac4_config_l4_filter,
.est_configure = dwmac5_est_configure,
.fpe_configure = dwmac5_fpe_configure,
+ .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
+ .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
+ .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
};
+static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
+{
+ u32 val, num_vlan;
+
+ val = readl(ioaddr + GMAC_HW_FEATURE3);
+ switch (val & GMAC_HW_FEAT_NRVF) {
+ case 0:
+ num_vlan = 1;
+ break;
+ case 1:
+ num_vlan = 4;
+ break;
+ case 2:
+ num_vlan = 8;
+ break;
+ case 3:
+ num_vlan = 16;
+ break;
+ case 4:
+ num_vlan = 24;
+ break;
+ case 5:
+ num_vlan = 32;
+ break;
+ default:
+ num_vlan = 1;
+ }
+
+ return num_vlan;
+}
+
int dwmac4_setup(struct stmmac_priv *priv)
{
struct mac_device_info *mac = priv->hw;
@@ -1062,6 +1256,7 @@ int dwmac4_setup(struct stmmac_priv *priv)
mac->mii.reg_mask = GENMASK(20, 16);
mac->mii.clk_csr_shift = 8;
mac->mii.clk_csr_mask = GENMASK(11, 8);
+ mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 9becca280074..6e30d7eb4983 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -6,6 +6,7 @@
*/
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/delay.h>
#include "common.h"
#include "dwmac4_dma.h"
@@ -14,22 +15,14 @@
int dwmac4_dma_reset(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
- int limit;
/* DMA SW reset */
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
- break;
- mdelay(10);
- }
-
- if (limit < 0)
- return -EBUSY;
- return 0;
+ return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+ !(value & DMA_BUS_MODE_SFT_RESET),
+ 10000, 100000);
}
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 688d36095333..cb87d31a99df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -16,19 +16,14 @@
int dwmac_dma_reset(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
- int err;
/* DMA SW reset */
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+ return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
!(value & DMA_BUS_MODE_SFT_RESET),
10000, 100000);
- if (err)
- return -EBUSY;
-
- return 0;
}
/* CSR1 enables the transmit DMA to check for new descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 67b754a56288..0e4575f7bedb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -9,6 +9,7 @@
#include <linux/iopoll.h>
#include "stmmac.h"
#include "stmmac_ptp.h"
+#include "dwxlgmac2.h"
#include "dwxgmac2.h"
static void dwxgmac2_core_init(struct mac_device_info *hw,
@@ -1485,6 +1486,67 @@ const struct stmmac_ops dwxgmac210_ops = {
.fpe_configure = dwxgmac3_fpe_configure,
};
+static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
+ u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XLGMAC_RXQ_ENABLE_CTRL0) & ~XGMAC_RXQEN(queue);
+ if (mode == MTL_QUEUE_AVB)
+ value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
+ else if (mode == MTL_QUEUE_DCB)
+ value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
+ writel(value, ioaddr + XLGMAC_RXQ_ENABLE_CTRL0);
+}
+
+const struct stmmac_ops dwxlgmac2_ops = {
+ .core_init = dwxgmac2_core_init,
+ .set_mac = dwxgmac2_set_mac,
+ .rx_ipc = dwxgmac2_rx_ipc,
+ .rx_queue_enable = dwxlgmac2_rx_queue_enable,
+ .rx_queue_prio = dwxgmac2_rx_queue_prio,
+ .tx_queue_prio = dwxgmac2_tx_queue_prio,
+ .rx_queue_routing = NULL,
+ .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
+ .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
+ .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
+ .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
+ .config_cbs = dwxgmac2_config_cbs,
+ .dump_regs = dwxgmac2_dump_regs,
+ .host_irq_status = dwxgmac2_host_irq_status,
+ .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
+ .flow_ctrl = dwxgmac2_flow_ctrl,
+ .pmt = dwxgmac2_pmt,
+ .set_umac_addr = dwxgmac2_set_umac_addr,
+ .get_umac_addr = dwxgmac2_get_umac_addr,
+ .set_eee_mode = dwxgmac2_set_eee_mode,
+ .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_eee_timer = dwxgmac2_set_eee_timer,
+ .set_eee_pls = dwxgmac2_set_eee_pls,
+ .pcs_ctrl_ane = NULL,
+ .pcs_rane = NULL,
+ .pcs_get_adv_lp = NULL,
+ .debug = NULL,
+ .set_filter = dwxgmac2_set_filter,
+ .safety_feat_config = dwxgmac3_safety_feat_config,
+ .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
+ .safety_feat_dump = dwxgmac3_safety_feat_dump,
+ .set_mac_loopback = dwxgmac2_set_mac_loopback,
+ .rss_configure = dwxgmac2_rss_configure,
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .rxp_config = dwxgmac3_rxp_config,
+ .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
+ .flex_pps_config = dwxgmac2_flex_pps_config,
+ .sarc_configure = dwxgmac2_sarc_configure,
+ .enable_vlan = dwxgmac2_enable_vlan,
+ .config_l3_filter = dwxgmac2_config_l3_filter,
+ .config_l4_filter = dwxgmac2_config_l4_filter,
+ .set_arp_offload = dwxgmac2_set_arp_offload,
+ .est_configure = dwxgmac3_est_configure,
+ .fpe_configure = dwxgmac3_fpe_configure,
+};
+
int dwxgmac2_setup(struct stmmac_priv *priv)
{
struct mac_device_info *mac = priv->hw;
@@ -1521,3 +1583,40 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
return 0;
}
+
+int dwxlgmac2_setup(struct stmmac_priv *priv)
+{
+ struct mac_device_info *mac = priv->hw;
+
+ dev_info(priv->device, "\tXLGMAC\n");
+
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
+ mac->mcast_bits_log2 = 0;
+
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+ mac->link.duplex = 0;
+ mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
+ mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
+ mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G;
+ mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G;
+ mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G;
+ mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G;
+ mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G;
+ mac->link.speed_mask = XLGMAC_CONFIG_SS;
+
+ mac->mii.addr = XGMAC_MDIO_ADDR;
+ mac->mii.data = XGMAC_MDIO_DATA;
+ mac->mii.addr_shift = 16;
+ mac->mii.addr_mask = GENMASK(20, 16);
+ mac->mii.reg_shift = 0;
+ mac->mii.reg_mask = GENMASK(15, 0);
+ mac->mii.clk_csr_shift = 19;
+ mac->mii.clk_csr_mask = GENMASK(21, 19);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxlgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxlgmac2.h
new file mode 100644
index 000000000000..726090d49221
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxlgmac2.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare XLGMAC definitions.
+ */
+
+#ifndef __STMMAC_DWXLGMAC2_H__
+#define __STMMAC_DWXLGMAC2_H__
+
+/* MAC Registers */
+#define XLGMAC_CONFIG_SS GENMASK(30, 28)
+#define XLGMAC_CONFIG_SS_SHIFT 28
+#define XLGMAC_CONFIG_SS_40G (0x0 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_25G (0x1 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_50G (0x2 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_100G (0x3 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_10G (0x4 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_2500 (0x6 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_1000 (0x7 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_RXQ_ENABLE_CTRL0 0x00000140
+
+#endif /* __STMMAC_DWXLGMAC2_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 3af2e5015245..bb7114f970f8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -23,6 +23,18 @@ static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg)
return reg & GENMASK(7, 0);
}
+static u32 stmmac_get_dev_id(struct stmmac_priv *priv, u32 id_reg)
+{
+ u32 reg = readl(priv->ioaddr + id_reg);
+
+ if (!reg) {
+ dev_info(priv->device, "Version ID not available\n");
+ return 0x0;
+ }
+
+ return (reg & GENMASK(15, 8)) >> 8;
+}
+
static void stmmac_dwmac_mode_quirk(struct stmmac_priv *priv)
{
struct mac_device_info *mac = priv->hw;
@@ -69,11 +81,18 @@ static int stmmac_dwmac4_quirks(struct stmmac_priv *priv)
return 0;
}
+static int stmmac_dwxlgmac_quirks(struct stmmac_priv *priv)
+{
+ priv->hw->xlgmac = true;
+ return 0;
+}
+
static const struct stmmac_hwif_entry {
bool gmac;
bool gmac4;
bool xgmac;
u32 min_id;
+ u32 dev_id;
const struct stmmac_regs_off regs;
const void *desc;
const void *dma;
@@ -199,6 +218,7 @@ static const struct stmmac_hwif_entry {
.gmac4 = false,
.xgmac = true,
.min_id = DWXGMAC_CORE_2_10,
+ .dev_id = DWXGMAC_ID,
.regs = {
.ptp_off = PTP_XGMAC_OFFSET,
.mmc_off = MMC_XGMAC_OFFSET,
@@ -212,6 +232,25 @@ static const struct stmmac_hwif_entry {
.mmc = &dwxgmac_mmc_ops,
.setup = dwxgmac2_setup,
.quirks = NULL,
+ }, {
+ .gmac = false,
+ .gmac4 = false,
+ .xgmac = true,
+ .min_id = DWXLGMAC_CORE_2_00,
+ .dev_id = DWXLGMAC_ID,
+ .regs = {
+ .ptp_off = PTP_XGMAC_OFFSET,
+ .mmc_off = MMC_XGMAC_OFFSET,
+ },
+ .desc = &dwxgmac210_desc_ops,
+ .dma = &dwxgmac210_dma_ops,
+ .mac = &dwxlgmac2_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = NULL,
+ .tc = &dwmac510_tc_ops,
+ .mmc = &dwxgmac_mmc_ops,
+ .setup = dwxlgmac2_setup,
+ .quirks = stmmac_dwxlgmac_quirks,
},
};
@@ -223,13 +262,15 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
const struct stmmac_hwif_entry *entry;
struct mac_device_info *mac;
bool needs_setup = true;
+ u32 id, dev_id = 0;
int i, ret;
- u32 id;
if (needs_gmac) {
id = stmmac_get_id(priv, GMAC_VERSION);
} else if (needs_gmac4 || needs_xgmac) {
id = stmmac_get_id(priv, GMAC4_VERSION);
+ if (needs_xgmac)
+ dev_id = stmmac_get_dev_id(priv, GMAC4_VERSION);
} else {
id = 0;
}
@@ -267,6 +308,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
/* Use synopsys_id var because some setups can override this */
if (priv->synopsys_id < entry->min_id)
continue;
+ if (needs_xgmac && (dev_id ^ entry->dev_id))
+ continue;
/* Only use generic HW helpers if needed */
mac->desc = mac->desc ? : entry->desc;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index df63b0367aff..ffe2d63389b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -369,6 +369,14 @@ struct stmmac_ops {
void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
__le16 perfect_match, bool is_double);
void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid);
+ int (*del_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid);
+ void (*restore_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw);
/* TX Timestamp */
int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
/* Source Address Insertion / Replacement */
@@ -461,6 +469,12 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
#define stmmac_enable_vlan(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, enable_vlan, __args)
+#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, add_hw_vlan_rx_fltr, __args)
+#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, del_hw_vlan_rx_fltr, __args)
+#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, restore_hw_vlan_rx_fltr, __args)
#define stmmac_get_mac_tx_timestamp(__priv, __args...) \
stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args)
#define stmmac_sarc_configure(__priv, __args...) \
@@ -577,6 +591,18 @@ struct stmmac_mmc_ops {
#define stmmac_mmc_read(__priv, __args...) \
stmmac_do_void_callback(__priv, mmc, read, __args)
+/* XPCS callbacks */
+#define stmmac_xpcs_validate(__priv, __args...) \
+ stmmac_do_callback(__priv, xpcs, validate, __args)
+#define stmmac_xpcs_config(__priv, __args...) \
+ stmmac_do_callback(__priv, xpcs, config, __args)
+#define stmmac_xpcs_get_state(__priv, __args...) \
+ stmmac_do_callback(__priv, xpcs, get_state, __args)
+#define stmmac_xpcs_link_up(__priv, __args...) \
+ stmmac_do_callback(__priv, xpcs, link_up, __args)
+#define stmmac_xpcs_probe(__priv, __args...) \
+ stmmac_do_callback(__priv, xpcs, probe, __args)
+
struct stmmac_regs_off {
u32 ptp_off;
u32 mmc_off;
@@ -593,6 +619,7 @@ extern const struct stmmac_dma_ops dwmac410_dma_ops;
extern const struct stmmac_ops dwmac510_ops;
extern const struct stmmac_tc_ops dwmac510_tc_ops;
extern const struct stmmac_ops dwxgmac210_ops;
+extern const struct stmmac_ops dwxlgmac2_ops;
extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
extern const struct stmmac_mmc_ops dwmac_mmc_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index b29603ec744c..eae11c585025 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -732,20 +732,6 @@ static int stmmac_set_coalesce(struct net_device *dev,
u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int rx_riwt;
- /* Check not supported parameters */
- if ((ec->rx_coalesce_usecs_irq) ||
- (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
- (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
- (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
- (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
- (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
- (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
- (ec->rx_max_coalesced_frames_high) ||
- (ec->tx_max_coalesced_frames_irq) ||
- (ec->stats_block_coalesce_usecs) ||
- (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
- return -EOPNOTSUPP;
-
if (priv->use_riwt && (ec->rx_coalesce_usecs > 0)) {
rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
@@ -914,6 +900,8 @@ static int stmmac_set_tunable(struct net_device *dev,
}
static const struct ethtool_ops stmmac_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
.get_msglevel = stmmac_ethtool_getmsglevel,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 020159622559..fcf080243a0f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -10,6 +10,7 @@
*******************************************************************************/
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/delay.h>
#include "common.h"
#include "stmmac_ptp.h"
@@ -53,7 +54,6 @@ static void config_sub_second_increment(void __iomem *ioaddr,
static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
{
- int limit;
u32 value;
writel(sec, ioaddr + PTP_STSUR);
@@ -64,16 +64,9 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
writel(value, ioaddr + PTP_TCR);
/* wait for present system time initialize to complete */
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSINIT))
- break;
- mdelay(10);
- }
- if (limit < 0)
- return -EBUSY;
-
- return 0;
+ return readl_poll_timeout(ioaddr + PTP_TCR, value,
+ !(value & PTP_TCR_TSINIT),
+ 10000, 100000);
}
static int config_addend(void __iomem *ioaddr, u32 addend)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 7da18c9afa01..2fb671e61ee8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -849,6 +849,38 @@ static void stmmac_validate(struct phylink_config *config,
phylink_set(mac_supported, 10000baseKX4_Full);
phylink_set(mac_supported, 10000baseKR_Full);
}
+ if (!max_speed || (max_speed >= 25000)) {
+ phylink_set(mac_supported, 25000baseCR_Full);
+ phylink_set(mac_supported, 25000baseKR_Full);
+ phylink_set(mac_supported, 25000baseSR_Full);
+ }
+ if (!max_speed || (max_speed >= 40000)) {
+ phylink_set(mac_supported, 40000baseKR4_Full);
+ phylink_set(mac_supported, 40000baseCR4_Full);
+ phylink_set(mac_supported, 40000baseSR4_Full);
+ phylink_set(mac_supported, 40000baseLR4_Full);
+ }
+ if (!max_speed || (max_speed >= 50000)) {
+ phylink_set(mac_supported, 50000baseCR2_Full);
+ phylink_set(mac_supported, 50000baseKR2_Full);
+ phylink_set(mac_supported, 50000baseSR2_Full);
+ phylink_set(mac_supported, 50000baseKR_Full);
+ phylink_set(mac_supported, 50000baseSR_Full);
+ phylink_set(mac_supported, 50000baseCR_Full);
+ phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
+ phylink_set(mac_supported, 50000baseDR_Full);
+ }
+ if (!max_speed || (max_speed >= 100000)) {
+ phylink_set(mac_supported, 100000baseKR4_Full);
+ phylink_set(mac_supported, 100000baseSR4_Full);
+ phylink_set(mac_supported, 100000baseCR4_Full);
+ phylink_set(mac_supported, 100000baseLR4_ER4_Full);
+ phylink_set(mac_supported, 100000baseKR2_Full);
+ phylink_set(mac_supported, 100000baseSR2_Full);
+ phylink_set(mac_supported, 100000baseCR2_Full);
+ phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
+ phylink_set(mac_supported, 100000baseDR2_Full);
+ }
}
/* Half-Duplex can only work with single queue */
@@ -858,33 +890,65 @@ static void stmmac_validate(struct phylink_config *config,
phylink_set(mask, 1000baseT_Half);
}
- bitmap_and(supported, supported, mac_supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_andnot(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mac_supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_andnot(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mac_supported);
+ linkmode_andnot(supported, supported, mask);
+
+ linkmode_and(state->advertising, state->advertising, mac_supported);
+ linkmode_andnot(state->advertising, state->advertising, mask);
+
+ /* If PCS is supported, check which modes it supports. */
+ stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
}
static void stmmac_mac_pcs_get_state(struct phylink_config *config,
struct phylink_link_state *state)
{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
state->link = 0;
+ stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
}
static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
+}
+
+static void stmmac_mac_an_restart(struct phylink_config *config)
+{
+ /* Not Supported */
+}
+
+static void stmmac_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ stmmac_mac_set(priv, priv->ioaddr, false);
+ priv->eee_active = false;
+ stmmac_eee_init(priv);
+ stmmac_set_eee_pls(priv, priv->hw, false);
+}
+
+static void stmmac_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
u32 ctrl;
+ stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
+
ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
ctrl &= ~priv->hw->link.speed_mask;
- if (state->interface == PHY_INTERFACE_MODE_USXGMII) {
- switch (state->speed) {
+ if (interface == PHY_INTERFACE_MODE_USXGMII) {
+ switch (speed) {
case SPEED_10000:
ctrl |= priv->hw->link.xgmii.speed10000;
break;
@@ -897,8 +961,34 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
default:
return;
}
+ } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
+ switch (speed) {
+ case SPEED_100000:
+ ctrl |= priv->hw->link.xlgmii.speed100000;
+ break;
+ case SPEED_50000:
+ ctrl |= priv->hw->link.xlgmii.speed50000;
+ break;
+ case SPEED_40000:
+ ctrl |= priv->hw->link.xlgmii.speed40000;
+ break;
+ case SPEED_25000:
+ ctrl |= priv->hw->link.xlgmii.speed25000;
+ break;
+ case SPEED_10000:
+ ctrl |= priv->hw->link.xgmii.speed10000;
+ break;
+ case SPEED_2500:
+ ctrl |= priv->hw->link.speed2500;
+ break;
+ case SPEED_1000:
+ ctrl |= priv->hw->link.speed1000;
+ break;
+ default:
+ return;
+ }
} else {
- switch (state->speed) {
+ switch (speed) {
case SPEED_2500:
ctrl |= priv->hw->link.speed2500;
break;
@@ -916,44 +1006,21 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
}
}
- priv->speed = state->speed;
+ priv->speed = speed;
if (priv->plat->fix_mac_speed)
- priv->plat->fix_mac_speed(priv->plat->bsp_priv, state->speed);
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
- if (!state->duplex)
+ if (!duplex)
ctrl &= ~priv->hw->link.duplex;
else
ctrl |= priv->hw->link.duplex;
/* Flow Control operation */
- if (state->pause)
- stmmac_mac_flow_ctrl(priv, state->duplex);
+ if (tx_pause && rx_pause)
+ stmmac_mac_flow_ctrl(priv, duplex);
writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
-}
-
-static void stmmac_mac_an_restart(struct phylink_config *config)
-{
- /* Not Supported */
-}
-
-static void stmmac_mac_link_down(struct phylink_config *config,
- unsigned int mode, phy_interface_t interface)
-{
- struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
-
- stmmac_mac_set(priv, priv->ioaddr, false);
- priv->eee_active = false;
- stmmac_eee_init(priv);
- stmmac_set_eee_pls(priv, priv->hw, false);
-}
-
-static void stmmac_mac_link_up(struct phylink_config *config,
- unsigned int mode, phy_interface_t interface,
- struct phy_device *phy)
-{
- struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
@@ -1043,6 +1110,10 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.pcs_poll = true;
+
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
phylink = phylink_create(&priv->phylink_config, fwnode,
mode, &stmmac_phylink_mac_ops);
@@ -1251,11 +1322,11 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (buf->page)
- page_pool_put_page(rx_q->page_pool, buf->page, false);
+ page_pool_put_full_page(rx_q->page_pool, buf->page, false);
buf->page = NULL;
if (buf->sec_page)
- page_pool_put_page(rx_q->page_pool, buf->sec_page, false);
+ page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
buf->sec_page = NULL;
}
@@ -2687,7 +2758,8 @@ static int stmmac_open(struct net_device *dev)
int ret;
if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI) {
+ priv->hw->pcs != STMMAC_PCS_RTBI &&
+ priv->hw->xpcs == NULL) {
ret = stmmac_init_phy(dev);
if (ret) {
netdev_err(priv->dev,
@@ -4494,6 +4566,8 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
return ret;
}
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+
return ret;
}
@@ -4501,11 +4575,16 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
{
struct stmmac_priv *priv = netdev_priv(ndev);
bool is_double = false;
+ int ret;
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
clear_bit(vid, priv->active_vlans);
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ if (ret)
+ return ret;
+
return stmmac_vlan_update(priv, is_double);
}
@@ -5096,6 +5175,8 @@ int stmmac_resume(struct device *dev)
stmmac_init_coalesce(priv);
stmmac_set_rx_mode(ndev);
+ stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
+
stmmac_enable_all_queues(priv);
stmmac_start_all_queues(priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index cfe5d8b73142..b2a707e2ef43 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -382,6 +382,14 @@ int stmmac_mdio_register(struct net_device *ndev)
max_addr = PHY_MAX_ADDR;
}
+ if (mdio_bus_data->has_xpcs) {
+ priv->hw->xpcs = mdio_xpcs_get_ops();
+ if (!priv->hw->xpcs) {
+ err = -ENODEV;
+ goto bus_register_fail;
+ }
+ }
+
if (mdio_bus_data->needs_reset)
new_bus->reset = &stmmac_mdio_reset;
@@ -433,6 +441,25 @@ int stmmac_mdio_register(struct net_device *ndev)
found = 1;
}
+ /* Try to probe the XPCS by scanning all addresses. */
+ if (priv->hw->xpcs) {
+ struct mdio_xpcs_args *xpcs = &priv->hw->xpcs_args;
+ int ret, mode = priv->plat->phy_interface;
+ max_addr = PHY_MAX_ADDR;
+
+ xpcs->bus = new_bus;
+
+ for (addr = 0; addr < max_addr; addr++) {
+ xpcs->addr = addr;
+
+ ret = stmmac_xpcs_probe(priv, xpcs, mode);
+ if (!ret) {
+ found = 1;
+ break;
+ }
+ }
+ }
+
if (!found && !mdio_node) {
dev_warn(dev, "No PHY found\n");
mdiobus_unregister(new_bus);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index fe2c9fa6a71c..3fb21f7ac9fb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -15,49 +15,10 @@
#include "stmmac.h"
-/*
- * This struct is used to associate PCI Function of MAC controller on a board,
- * discovered via DMI, with the address of PHY connected to the MAC. The
- * negative value of the address means that MAC controller is not connected
- * with PHY.
- */
-struct stmmac_pci_func_data {
- unsigned int func;
- int phy_addr;
-};
-
-struct stmmac_pci_dmi_data {
- const struct stmmac_pci_func_data *func;
- size_t nfuncs;
-};
-
struct stmmac_pci_info {
int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
};
-static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
- const struct dmi_system_id *dmi_list)
-{
- const struct stmmac_pci_func_data *func_data;
- const struct stmmac_pci_dmi_data *dmi_data;
- const struct dmi_system_id *dmi_id;
- int func = PCI_FUNC(pdev->devfn);
- size_t n;
-
- dmi_id = dmi_first_match(dmi_list);
- if (!dmi_id)
- return -ENODEV;
-
- dmi_data = dmi_id->driver_data;
- func_data = dmi_data->func;
-
- for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
- if (func_data->func == func)
- return func_data->phy_addr;
-
- return -ENODEV;
-}
-
static void common_default_data(struct plat_stmmacenet_data *plat)
{
plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
@@ -108,272 +69,6 @@ static const struct stmmac_pci_info stmmac_pci_info = {
.setup = stmmac_default_data,
};
-static int intel_mgbe_common_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- int i;
-
- plat->clk_csr = 5;
- plat->has_gmac = 0;
- plat->has_gmac4 = 1;
- plat->force_sf_dma_mode = 0;
- plat->tso_en = 1;
-
- plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
-
- for (i = 0; i < plat->rx_queues_to_use; i++) {
- plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
- plat->rx_queues_cfg[i].chan = i;
-
- /* Disable Priority config by default */
- plat->rx_queues_cfg[i].use_prio = false;
-
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[i].pkt_route = 0x0;
- }
-
- for (i = 0; i < plat->tx_queues_to_use; i++) {
- plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
-
- /* Disable Priority config by default */
- plat->tx_queues_cfg[i].use_prio = false;
- }
-
- /* FIFO size is 4096 bytes for 1 tx/rx queue */
- plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
- plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
-
- plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
- plat->tx_queues_cfg[0].weight = 0x09;
- plat->tx_queues_cfg[1].weight = 0x0A;
- plat->tx_queues_cfg[2].weight = 0x0B;
- plat->tx_queues_cfg[3].weight = 0x0C;
- plat->tx_queues_cfg[4].weight = 0x0D;
- plat->tx_queues_cfg[5].weight = 0x0E;
- plat->tx_queues_cfg[6].weight = 0x0F;
- plat->tx_queues_cfg[7].weight = 0x10;
-
- plat->dma_cfg->pbl = 32;
- plat->dma_cfg->pblx8 = true;
- plat->dma_cfg->fixed_burst = 0;
- plat->dma_cfg->mixed_burst = 0;
- plat->dma_cfg->aal = 0;
-
- plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
- GFP_KERNEL);
- if (!plat->axi)
- return -ENOMEM;
-
- plat->axi->axi_lpi_en = 0;
- plat->axi->axi_xit_frm = 0;
- plat->axi->axi_wr_osr_lmt = 1;
- plat->axi->axi_rd_osr_lmt = 1;
- plat->axi->axi_blen[0] = 4;
- plat->axi->axi_blen[1] = 8;
- plat->axi->axi_blen[2] = 16;
-
- plat->ptp_max_adj = plat->clk_ptp_rate;
-
- /* Set system clock */
- plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
- "stmmac-clk", NULL, 0,
- plat->clk_ptp_rate);
-
- if (IS_ERR(plat->stmmac_clk)) {
- dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
- plat->stmmac_clk = NULL;
- }
- clk_prepare_enable(plat->stmmac_clk);
-
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
- return 0;
-}
-
-static int ehl_common_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- int ret;
-
- plat->rx_queues_to_use = 8;
- plat->tx_queues_to_use = 8;
- plat->clk_ptp_rate = 200000000;
- ret = intel_mgbe_common_data(pdev, plat);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int ehl_sgmii_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- plat->bus_id = 1;
- plat->phy_addr = 0;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
-
- return ehl_common_data(pdev, plat);
-}
-
-static struct stmmac_pci_info ehl_sgmii1g_pci_info = {
- .setup = ehl_sgmii_data,
-};
-
-static int ehl_rgmii_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- plat->bus_id = 1;
- plat->phy_addr = 0;
- plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
-
- return ehl_common_data(pdev, plat);
-}
-
-static struct stmmac_pci_info ehl_rgmii1g_pci_info = {
- .setup = ehl_rgmii_data,
-};
-
-static int tgl_common_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- int ret;
-
- plat->rx_queues_to_use = 6;
- plat->tx_queues_to_use = 4;
- plat->clk_ptp_rate = 200000000;
- ret = intel_mgbe_common_data(pdev, plat);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int tgl_sgmii_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- plat->bus_id = 1;
- plat->phy_addr = 0;
- plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- return tgl_common_data(pdev, plat);
-}
-
-static struct stmmac_pci_info tgl_sgmii1g_pci_info = {
- .setup = tgl_sgmii_data,
-};
-
-static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
- {
- .func = 6,
- .phy_addr = 1,
- },
-};
-
-static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
- .func = galileo_stmmac_func_data,
- .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
-};
-
-static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
- {
- .func = 6,
- .phy_addr = 1,
- },
- {
- .func = 7,
- .phy_addr = 1,
- },
-};
-
-static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
- .func = iot2040_stmmac_func_data,
- .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
-};
-
-static const struct dmi_system_id quark_pci_dmi[] = {
- {
- .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
- },
- .driver_data = (void *)&galileo_stmmac_dmi_data,
- },
- {
- .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
- },
- .driver_data = (void *)&galileo_stmmac_dmi_data,
- },
- /*
- * There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
- * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
- * has only one pci network device while other asset tags are
- * for IOT2040 which has two.
- */
- {
- .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
- DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
- "6ES7647-0AA00-0YA2"),
- },
- .driver_data = (void *)&galileo_stmmac_dmi_data,
- },
- {
- .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
- },
- .driver_data = (void *)&iot2040_stmmac_dmi_data,
- },
- {}
-};
-
-static int quark_default_data(struct pci_dev *pdev,
- struct plat_stmmacenet_data *plat)
-{
- int ret;
-
- /* Set common default data first */
- common_default_data(plat);
-
- /*
- * Refuse to load the driver and register net device if MAC controller
- * does not connect to any PHY interface.
- */
- ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
- if (ret < 0) {
- /* Return error to the caller on DMI enabled boards. */
- if (dmi_get_system_info(DMI_BOARD_NAME))
- return ret;
-
- /*
- * Galileo boards with old firmware don't support DMI. We always
- * use 1 here as PHY address, so at least the first found MAC
- * controller would be probed.
- */
- ret = 1;
- }
-
- plat->bus_id = pci_dev_id(pdev);
- plat->phy_addr = ret;
- plat->phy_interface = PHY_INTERFACE_MODE_RMII;
-
- plat->dma_cfg->pbl = 16;
- plat->dma_cfg->pblx8 = true;
- plat->dma_cfg->fixed_burst = 1;
- /* AXI (TODO) */
-
- return 0;
-}
-
-static const struct stmmac_pci_info quark_pci_info = {
- .setup = quark_default_data,
-};
-
static int snps_gmac5_default_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
@@ -579,28 +274,15 @@ static int __maybe_unused stmmac_pci_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
/* synthetic ID, no official vendor */
-#define PCI_VENDOR_ID_STMMAC 0x700
-
-#define STMMAC_QUARK_ID 0x0937
-#define STMMAC_DEVICE_ID 0x1108
-#define STMMAC_EHL_RGMII1G_ID 0x4b30
-#define STMMAC_EHL_SGMII1G_ID 0x4b31
-#define STMMAC_TGL_SGMII1G_ID 0xa0ac
-#define STMMAC_GMAC5_ID 0x7102
-
-#define STMMAC_DEVICE(vendor_id, dev_id, info) { \
- PCI_VDEVICE(vendor_id, dev_id), \
- .driver_data = (kernel_ulong_t)&info \
- }
+#define PCI_VENDOR_ID_STMMAC 0x0700
+
+#define PCI_DEVICE_ID_STMMAC_STMMAC 0x1108
+#define PCI_DEVICE_ID_SYNOPSYS_GMAC5_ID 0x7102
static const struct pci_device_id stmmac_id_table[] = {
- STMMAC_DEVICE(STMMAC, STMMAC_DEVICE_ID, stmmac_pci_info),
- STMMAC_DEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_MAC, stmmac_pci_info),
- STMMAC_DEVICE(INTEL, STMMAC_QUARK_ID, quark_pci_info),
- STMMAC_DEVICE(INTEL, STMMAC_EHL_RGMII1G_ID, ehl_rgmii1g_pci_info),
- STMMAC_DEVICE(INTEL, STMMAC_EHL_SGMII1G_ID, ehl_sgmii1g_pci_info),
- STMMAC_DEVICE(INTEL, STMMAC_TGL_SGMII1G_ID, tgl_sgmii1g_pci_info),
- STMMAC_DEVICE(SYNOPSYS, STMMAC_GMAC5_ID, snps_gmac5_pci_info),
+ { PCI_DEVICE_DATA(STMMAC, STMMAC, &stmmac_pci_info) },
+ { PCI_DEVICE_DATA(STMICRO, MAC, &stmmac_pci_info) },
+ { PCI_DEVICE_DATA(SYNOPSYS, GMAC5_ID, &snps_gmac5_pci_info) },
{}
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 13fafd905db8..bcda49dcf619 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -588,7 +588,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
if (IS_ERR(plat->clk_ptp_ref)) {
plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
plat->clk_ptp_ref = NULL;
- dev_warn(&pdev->dev, "PTP uses main clock\n");
+ dev_info(&pdev->dev, "PTP uses main clock\n");
} else {
plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
@@ -645,8 +645,6 @@ EXPORT_SYMBOL_GPL(stmmac_remove_config_dt);
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res)
{
- struct resource *res;
-
memset(stmmac_res, 0, sizeof(*stmmac_res));
/* Get IRQ information early to have an ability to ask for deferred
@@ -680,8 +678,7 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
dev_info(&pdev->dev, "IRQ eth_lpi not found\n");
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
+ stmmac_res->addr = devm_platform_ioremap_resource(pdev, 0);
return PTR_ERR_OR_ZERO(stmmac_res->addr);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 2aba2673d6c3..e6696495f126 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -380,7 +380,7 @@ static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
int ret;
if (!priv->dev->phydev)
- return -EBUSY;
+ return -EOPNOTSUPP;
ret = phy_loopback(priv->dev->phydev, true);
if (ret)
@@ -1387,6 +1387,7 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
cls->rule = rule;
rule->action.entries[0].id = FLOW_ACTION_DROP;
+ rule->action.entries[0].hw_stats = FLOW_ACTION_HW_STATS_ANY;
rule->action.num_entries = 1;
attr.dst = priv->dev->dev_addr;
@@ -1515,6 +1516,7 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
cls->rule = rule;
rule->action.entries[0].id = FLOW_ACTION_DROP;
+ rule->action.entries[0].hw_stats = FLOW_ACTION_HW_STATS_ANY;
rule->action.num_entries = 1;
attr.dst = priv->dev->dev_addr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 7a01dee2f9a8..3d747846f482 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -367,7 +367,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
static int tc_parse_flow_actions(struct stmmac_priv *priv,
struct flow_action *action,
- struct stmmac_flow_entry *entry)
+ struct stmmac_flow_entry *entry,
+ struct netlink_ext_ack *extack)
{
struct flow_action_entry *act;
int i;
@@ -375,6 +376,9 @@ static int tc_parse_flow_actions(struct stmmac_priv *priv,
if (!flow_action_has_entries(action))
return -EINVAL;
+ if (!flow_action_basic_hw_stats_check(action, extack))
+ return -EOPNOTSUPP;
+
flow_action_for_each(i, act, action) {
switch (act->id) {
case FLOW_ACTION_DROP:
@@ -530,7 +534,8 @@ static int tc_add_flow(struct stmmac_priv *priv,
return -ENOENT;
}
- ret = tc_parse_flow_actions(priv, &rule->action, entry);
+ ret = tc_parse_flow_actions(priv, &rule->action, entry,
+ cls->common.extack);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 6ec9163e232c..e6d1aa882fa5 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -1716,34 +1716,26 @@ static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
pr_cont("\n");
if (stat & PCI_ERR_OTHER) {
- u16 cfg;
+ int pci_errs;
/* Interrogate PCI config space for the
* true cause.
*/
- pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
- netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
- if (cfg & PCI_STATUS_PARITY)
+ pci_errs = pci_status_get_and_clear_errors(cp->pdev);
+
+ netdev_err(dev, "PCI status errors[%04x]\n", pci_errs);
+ if (pci_errs & PCI_STATUS_PARITY)
netdev_err(dev, "PCI parity error detected\n");
- if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
+ if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT)
netdev_err(dev, "PCI target abort\n");
- if (cfg & PCI_STATUS_REC_TARGET_ABORT)
+ if (pci_errs & PCI_STATUS_REC_TARGET_ABORT)
netdev_err(dev, "PCI master acks target abort\n");
- if (cfg & PCI_STATUS_REC_MASTER_ABORT)
+ if (pci_errs & PCI_STATUS_REC_MASTER_ABORT)
netdev_err(dev, "PCI master abort\n");
- if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
+ if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR)
netdev_err(dev, "PCI system error SERR#\n");
- if (cfg & PCI_STATUS_DETECTED_PARITY)
+ if (pci_errs & PCI_STATUS_DETECTED_PARITY)
netdev_err(dev, "PCI parity error\n");
-
- /* Write the error bits back to clear them. */
- cfg &= (PCI_STATUS_PARITY |
- PCI_STATUS_SIG_TARGET_ABORT |
- PCI_STATUS_REC_TARGET_ABORT |
- PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_SIG_SYSTEM_ERROR |
- PCI_STATUS_DETECTED_PARITY);
- pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
}
/* For all PCI errors, we should reset the chip. */
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 8358064fbd48..2d392a7b179a 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -545,37 +545,25 @@ static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
}
if (pci_estat & GREG_PCIESTAT_OTHER) {
- u16 pci_cfg_stat;
+ int pci_errs;
/* Interrogate PCI config space for the
* true cause.
*/
- pci_read_config_word(gp->pdev, PCI_STATUS,
- &pci_cfg_stat);
- netdev_err(dev, "Read PCI cfg space status [%04x]\n",
- pci_cfg_stat);
- if (pci_cfg_stat & PCI_STATUS_PARITY)
+ pci_errs = pci_status_get_and_clear_errors(gp->pdev);
+ netdev_err(dev, "PCI status errors[%04x]\n", pci_errs);
+ if (pci_errs & PCI_STATUS_PARITY)
netdev_err(dev, "PCI parity error detected\n");
- if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
+ if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT)
netdev_err(dev, "PCI target abort\n");
- if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
+ if (pci_errs & PCI_STATUS_REC_TARGET_ABORT)
netdev_err(dev, "PCI master acks target abort\n");
- if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
+ if (pci_errs & PCI_STATUS_REC_MASTER_ABORT)
netdev_err(dev, "PCI master abort\n");
- if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
+ if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR)
netdev_err(dev, "PCI system error SERR#\n");
- if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
+ if (pci_errs & PCI_STATUS_DETECTED_PARITY)
netdev_err(dev, "PCI parity error\n");
-
- /* Write the error bits back to clear them. */
- pci_cfg_stat &= (PCI_STATUS_PARITY |
- PCI_STATUS_SIG_TARGET_ABORT |
- PCI_STATUS_REC_TARGET_ABORT |
- PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_SIG_SYSTEM_ERROR |
- PCI_STATUS_DETECTED_PARITY);
- pci_write_config_word(gp->pdev,
- PCI_STATUS, pci_cfg_stat);
}
/* For all PCI errors, we should reset the chip. */
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
index fde722136869..bc198eadfcab 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
@@ -151,7 +151,6 @@ static int xlgmac_ethtool_get_coalesce(struct net_device *netdev,
{
struct xlgmac_pdata *pdata = netdev_priv(netdev);
- memset(ec, 0, sizeof(struct ethtool_coalesce));
ec->rx_coalesce_usecs = pdata->rx_usecs;
ec->rx_max_coalesced_frames = pdata->rx_frames;
ec->tx_max_coalesced_frames = pdata->tx_frames;
@@ -167,20 +166,6 @@ static int xlgmac_ethtool_set_coalesce(struct net_device *netdev,
unsigned int rx_frames, rx_riwt, rx_usecs;
unsigned int tx_frames;
- /* Check for not supported parameters */
- if ((ec->rx_coalesce_usecs_irq) || (ec->rx_max_coalesced_frames_irq) ||
- (ec->tx_coalesce_usecs) || (ec->tx_coalesce_usecs_high) ||
- (ec->tx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
- (ec->stats_block_coalesce_usecs) || (ec->pkt_rate_low) ||
- (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
- (ec->rx_max_coalesced_frames_low) || (ec->rx_coalesce_usecs_low) ||
- (ec->tx_coalesce_usecs_low) || (ec->tx_max_coalesced_frames_low) ||
- (ec->pkt_rate_high) || (ec->rx_coalesce_usecs_high) ||
- (ec->rx_max_coalesced_frames_high) ||
- (ec->tx_max_coalesced_frames_high) ||
- (ec->rate_sample_interval))
- return -EOPNOTSUPP;
-
rx_usecs = ec->rx_coalesce_usecs;
rx_riwt = hw_ops->usec_to_riwt(pdata, rx_usecs);
rx_frames = ec->rx_max_coalesced_frames;
@@ -257,6 +242,8 @@ static void xlgmac_ethtool_get_ethtool_stats(struct net_device *netdev,
}
static const struct ethtool_ops xlgmac_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = xlgmac_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_msglevel = xlgmac_ethtool_get_msglevel,
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 0f8a924fc60c..40a2ce0ca808 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2373,6 +2373,8 @@ static void bdx_get_ethtool_stats(struct net_device *netdev,
static void bdx_set_ethtool_ops(struct net_device *netdev)
{
static const struct ethtool_ops bdx_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = bdx_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_coalesce = bdx_get_coalesce,
diff --git a/drivers/net/ethernet/tehuti/tehuti.h b/drivers/net/ethernet/tehuti/tehuti.h
index 5fc03c8eba0c..909e7296cecf 100644
--- a/drivers/net/ethernet/tehuti/tehuti.h
+++ b/drivers/net/ethernet/tehuti/tehuti.h
@@ -330,7 +330,7 @@ struct txd_desc {
u16 length;
u32 va_lo;
u32 va_hi;
- struct pbl pbl[0]; /* Fragments */
+ struct pbl pbl[]; /* Fragments */
} __packed;
/* Register region size */
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index bf98e0fa7d8b..89cec778cf2d 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_TI
bool "Texas Instruments (TI) devices"
default y
- depends on PCI || EISA || AR7 || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE
+ depends on PCI || EISA || AR7 || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -31,7 +31,7 @@ config TI_DAVINCI_EMAC
config TI_DAVINCI_MDIO
tristate "TI DaVinci MDIO Support"
- depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
select PHYLIB
---help---
This driver supports TI's DaVinci MDIO module.
@@ -53,6 +53,7 @@ config TI_CPSW
select MFD_SYSCON
select PAGE_POOL
select REGMAP
+ imply PHY_TI_GMII_SEL
---help---
This driver supports TI's CPSW Ethernet Switch.
@@ -94,6 +95,21 @@ config TI_CPTS_MOD
imply PTP_1588_CLOCK
default m
+config TI_K3_AM65_CPSW_NUSS
+ tristate "TI K3 AM654x/J721E CPSW Ethernet driver"
+ depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ select TI_DAVINCI_MDIO
+ imply PHY_TI_GMII_SEL
+ help
+ This driver supports TI K3 AM654/J721E CPSW2G Ethernet SubSystem.
+ The two-port Gigabit Ethernet MAC (MCU_CPSW0) subsystem provides
+ Ethernet packet communication for the device: One Ethernet port
+ (port 1) with selectable RGMII and RMII interfaces and an internal
+ Communications Port Programming Interface (CPPI) port (port 0).
+
+ To compile this driver as a module, choose M here: the module
+ will be called ti-am65-cpsw-nuss.
+
config TI_KEYSTONE_NETCP
tristate "TI Keystone NETCP Core Support"
select TI_DAVINCI_MDIO
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index ecf776ad8689..53792190e9c2 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -23,3 +23,6 @@ obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
keystone_netcp-y := netcp_core.o cpsw_ale.o
obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.o
+
+obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
+ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
new file mode 100644
index 000000000000..c3502aa15ea0
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -0,0 +1,747 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver ethtool ops
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ */
+
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "am65-cpsw-nuss.h"
+#include "cpsw_ale.h"
+
+#define AM65_CPSW_REGDUMP_VER 0x1
+
+enum {
+ AM65_CPSW_REGDUMP_MOD_NUSS = 1,
+ AM65_CPSW_REGDUMP_MOD_RGMII_STATUS = 2,
+ AM65_CPSW_REGDUMP_MOD_MDIO = 3,
+ AM65_CPSW_REGDUMP_MOD_CPSW = 4,
+ AM65_CPSW_REGDUMP_MOD_CPSW_P0 = 5,
+ AM65_CPSW_REGDUMP_MOD_CPSW_P1 = 6,
+ AM65_CPSW_REGDUMP_MOD_CPSW_CPTS = 7,
+ AM65_CPSW_REGDUMP_MOD_CPSW_ALE = 8,
+ AM65_CPSW_REGDUMP_MOD_CPSW_ALE_TBL = 9,
+ AM65_CPSW_REGDUMP_MOD_LAST,
+};
+
+/**
+ * struct am65_cpsw_regdump_hdr - regdump record header
+ *
+ * @module_id: CPSW module ID
+ * @len: CPSW module registers space length in u32
+ */
+
+struct am65_cpsw_regdump_hdr {
+ u32 module_id;
+ u32 len;
+};
+
+/**
+ * struct am65_cpsw_regdump_item - regdump module description
+ *
+ * @hdr: CPSW module header
+ * @start_ofs: CPSW module registers start addr
+ * @end_ofs: CPSW module registers end addr
+ *
+ * Registers dump provided in the format:
+ * u32 : module ID
+ * u32 : dump length
+ * u32[..len]: registers values
+ */
+struct am65_cpsw_regdump_item {
+ struct am65_cpsw_regdump_hdr hdr;
+ u32 start_ofs;
+ u32 end_ofs;
+};
+
+#define AM65_CPSW_REGDUMP_REC(mod, start, end) { \
+ .hdr.module_id = (mod), \
+ .hdr.len = (((u32 *)(end)) - ((u32 *)(start)) + 1) * sizeof(u32) * 2 + \
+ sizeof(struct am65_cpsw_regdump_hdr), \
+ .start_ofs = (start), \
+ .end_ofs = end, \
+}
+
+static const struct am65_cpsw_regdump_item am65_cpsw_regdump[] = {
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_NUSS, 0x0, 0x1c),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_RGMII_STATUS, 0x30, 0x4c),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_MDIO, 0xf00, 0xffc),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW, 0x20000, 0x2011c),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW_P0, 0x21000, 0x21320),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW_P1, 0x22000, 0x223a4),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW_CPTS,
+ 0x3d000, 0x3d048),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW_ALE, 0x3e000, 0x3e13c),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW_ALE_TBL, 0, 0),
+};
+
+struct am65_cpsw_stats_regs {
+ u32 rx_good_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_pause_frames; /* slave */
+ u32 rx_crc_errors;
+ u32 rx_align_code_errors; /* slave */
+ u32 rx_oversized_frames;
+ u32 rx_jabber_frames; /* slave */
+ u32 rx_undersized_frames;
+ u32 rx_fragments; /* slave */
+ u32 ale_drop;
+ u32 ale_overrun_drop;
+ u32 rx_octets;
+ u32 tx_good_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_pause_frames; /* slave */
+ u32 tx_deferred_frames; /* slave */
+ u32 tx_collision_frames; /* slave */
+ u32 tx_single_coll_frames; /* slave */
+ u32 tx_mult_coll_frames; /* slave */
+ u32 tx_excessive_collisions; /* slave */
+ u32 tx_late_collisions; /* slave */
+ u32 rx_ipg_error; /* slave 10G only */
+ u32 tx_carrier_sense_errors; /* slave */
+ u32 tx_octets;
+ u32 tx_64B_frames;
+ u32 tx_65_to_127B_frames;
+ u32 tx_128_to_255B_frames;
+ u32 tx_256_to_511B_frames;
+ u32 tx_512_to_1023B_frames;
+ u32 tx_1024B_frames;
+ u32 net_octets;
+ u32 rx_bottom_fifo_drop;
+ u32 rx_port_mask_drop;
+ u32 rx_top_fifo_drop;
+ u32 ale_rate_limit_drop;
+ u32 ale_vid_ingress_drop;
+ u32 ale_da_eq_sa_drop;
+ u32 ale_block_drop; /* K3 */
+ u32 ale_secure_drop; /* K3 */
+ u32 ale_auth_drop; /* K3 */
+ u32 ale_unknown_ucast;
+ u32 ale_unknown_ucast_bytes;
+ u32 ale_unknown_mcast;
+ u32 ale_unknown_mcast_bytes;
+ u32 ale_unknown_bcast;
+ u32 ale_unknown_bcast_bytes;
+ u32 ale_pol_match;
+ u32 ale_pol_match_red;
+ u32 ale_pol_match_yellow;
+ u32 ale_mcast_sa_drop; /* K3 */
+ u32 ale_dual_vlan_drop; /* K3 */
+ u32 ale_len_err_drop; /* K3 */
+ u32 ale_ip_next_hdr_drop; /* K3 */
+ u32 ale_ipv4_frag_drop; /* K3 */
+ u32 __rsvd_1[24];
+ u32 iet_rx_assembly_err; /* K3 slave */
+ u32 iet_rx_assembly_ok; /* K3 slave */
+ u32 iet_rx_smd_err; /* K3 slave */
+ u32 iet_rx_frag; /* K3 slave */
+ u32 iet_tx_hold; /* K3 slave */
+ u32 iet_tx_frag; /* K3 slave */
+ u32 __rsvd_2[9];
+ u32 tx_mem_protect_err;
+ /* following NU only */
+ u32 tx_pri0;
+ u32 tx_pri1;
+ u32 tx_pri2;
+ u32 tx_pri3;
+ u32 tx_pri4;
+ u32 tx_pri5;
+ u32 tx_pri6;
+ u32 tx_pri7;
+ u32 tx_pri0_bcnt;
+ u32 tx_pri1_bcnt;
+ u32 tx_pri2_bcnt;
+ u32 tx_pri3_bcnt;
+ u32 tx_pri4_bcnt;
+ u32 tx_pri5_bcnt;
+ u32 tx_pri6_bcnt;
+ u32 tx_pri7_bcnt;
+ u32 tx_pri0_drop;
+ u32 tx_pri1_drop;
+ u32 tx_pri2_drop;
+ u32 tx_pri3_drop;
+ u32 tx_pri4_drop;
+ u32 tx_pri5_drop;
+ u32 tx_pri6_drop;
+ u32 tx_pri7_drop;
+ u32 tx_pri0_drop_bcnt;
+ u32 tx_pri1_drop_bcnt;
+ u32 tx_pri2_drop_bcnt;
+ u32 tx_pri3_drop_bcnt;
+ u32 tx_pri4_drop_bcnt;
+ u32 tx_pri5_drop_bcnt;
+ u32 tx_pri6_drop_bcnt;
+ u32 tx_pri7_drop_bcnt;
+};
+
+struct am65_cpsw_ethtool_stat {
+ char desc[ETH_GSTRING_LEN];
+ int offset;
+};
+
+#define AM65_CPSW_STATS(prefix, field) \
+{ \
+ #prefix#field, \
+ offsetof(struct am65_cpsw_stats_regs, field) \
+}
+
+static const struct am65_cpsw_ethtool_stat am65_host_stats[] = {
+ AM65_CPSW_STATS(p0_, rx_good_frames),
+ AM65_CPSW_STATS(p0_, rx_broadcast_frames),
+ AM65_CPSW_STATS(p0_, rx_multicast_frames),
+ AM65_CPSW_STATS(p0_, rx_crc_errors),
+ AM65_CPSW_STATS(p0_, rx_oversized_frames),
+ AM65_CPSW_STATS(p0_, rx_undersized_frames),
+ AM65_CPSW_STATS(p0_, ale_drop),
+ AM65_CPSW_STATS(p0_, ale_overrun_drop),
+ AM65_CPSW_STATS(p0_, rx_octets),
+ AM65_CPSW_STATS(p0_, tx_good_frames),
+ AM65_CPSW_STATS(p0_, tx_broadcast_frames),
+ AM65_CPSW_STATS(p0_, tx_multicast_frames),
+ AM65_CPSW_STATS(p0_, tx_octets),
+ AM65_CPSW_STATS(p0_, tx_64B_frames),
+ AM65_CPSW_STATS(p0_, tx_65_to_127B_frames),
+ AM65_CPSW_STATS(p0_, tx_128_to_255B_frames),
+ AM65_CPSW_STATS(p0_, tx_256_to_511B_frames),
+ AM65_CPSW_STATS(p0_, tx_512_to_1023B_frames),
+ AM65_CPSW_STATS(p0_, tx_1024B_frames),
+ AM65_CPSW_STATS(p0_, net_octets),
+ AM65_CPSW_STATS(p0_, rx_bottom_fifo_drop),
+ AM65_CPSW_STATS(p0_, rx_port_mask_drop),
+ AM65_CPSW_STATS(p0_, rx_top_fifo_drop),
+ AM65_CPSW_STATS(p0_, ale_rate_limit_drop),
+ AM65_CPSW_STATS(p0_, ale_vid_ingress_drop),
+ AM65_CPSW_STATS(p0_, ale_da_eq_sa_drop),
+ AM65_CPSW_STATS(p0_, ale_block_drop),
+ AM65_CPSW_STATS(p0_, ale_secure_drop),
+ AM65_CPSW_STATS(p0_, ale_auth_drop),
+ AM65_CPSW_STATS(p0_, ale_unknown_ucast),
+ AM65_CPSW_STATS(p0_, ale_unknown_ucast_bytes),
+ AM65_CPSW_STATS(p0_, ale_unknown_mcast),
+ AM65_CPSW_STATS(p0_, ale_unknown_mcast_bytes),
+ AM65_CPSW_STATS(p0_, ale_unknown_bcast),
+ AM65_CPSW_STATS(p0_, ale_unknown_bcast_bytes),
+ AM65_CPSW_STATS(p0_, ale_pol_match),
+ AM65_CPSW_STATS(p0_, ale_pol_match_red),
+ AM65_CPSW_STATS(p0_, ale_pol_match_yellow),
+ AM65_CPSW_STATS(p0_, ale_mcast_sa_drop),
+ AM65_CPSW_STATS(p0_, ale_dual_vlan_drop),
+ AM65_CPSW_STATS(p0_, ale_len_err_drop),
+ AM65_CPSW_STATS(p0_, ale_ip_next_hdr_drop),
+ AM65_CPSW_STATS(p0_, ale_ipv4_frag_drop),
+ AM65_CPSW_STATS(p0_, tx_mem_protect_err),
+ AM65_CPSW_STATS(p0_, tx_pri0),
+ AM65_CPSW_STATS(p0_, tx_pri1),
+ AM65_CPSW_STATS(p0_, tx_pri2),
+ AM65_CPSW_STATS(p0_, tx_pri3),
+ AM65_CPSW_STATS(p0_, tx_pri4),
+ AM65_CPSW_STATS(p0_, tx_pri5),
+ AM65_CPSW_STATS(p0_, tx_pri6),
+ AM65_CPSW_STATS(p0_, tx_pri7),
+ AM65_CPSW_STATS(p0_, tx_pri0_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri1_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri2_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri3_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri4_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri5_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri6_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri7_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri0_drop),
+ AM65_CPSW_STATS(p0_, tx_pri1_drop),
+ AM65_CPSW_STATS(p0_, tx_pri2_drop),
+ AM65_CPSW_STATS(p0_, tx_pri3_drop),
+ AM65_CPSW_STATS(p0_, tx_pri4_drop),
+ AM65_CPSW_STATS(p0_, tx_pri5_drop),
+ AM65_CPSW_STATS(p0_, tx_pri6_drop),
+ AM65_CPSW_STATS(p0_, tx_pri7_drop),
+ AM65_CPSW_STATS(p0_, tx_pri0_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri1_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri2_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri3_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri4_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri5_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri6_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri7_drop_bcnt),
+};
+
+static const struct am65_cpsw_ethtool_stat am65_slave_stats[] = {
+ AM65_CPSW_STATS(, rx_good_frames),
+ AM65_CPSW_STATS(, rx_broadcast_frames),
+ AM65_CPSW_STATS(, rx_multicast_frames),
+ AM65_CPSW_STATS(, rx_pause_frames),
+ AM65_CPSW_STATS(, rx_crc_errors),
+ AM65_CPSW_STATS(, rx_align_code_errors),
+ AM65_CPSW_STATS(, rx_oversized_frames),
+ AM65_CPSW_STATS(, rx_jabber_frames),
+ AM65_CPSW_STATS(, rx_undersized_frames),
+ AM65_CPSW_STATS(, rx_fragments),
+ AM65_CPSW_STATS(, ale_drop),
+ AM65_CPSW_STATS(, ale_overrun_drop),
+ AM65_CPSW_STATS(, rx_octets),
+ AM65_CPSW_STATS(, tx_good_frames),
+ AM65_CPSW_STATS(, tx_broadcast_frames),
+ AM65_CPSW_STATS(, tx_multicast_frames),
+ AM65_CPSW_STATS(, tx_pause_frames),
+ AM65_CPSW_STATS(, tx_deferred_frames),
+ AM65_CPSW_STATS(, tx_collision_frames),
+ AM65_CPSW_STATS(, tx_single_coll_frames),
+ AM65_CPSW_STATS(, tx_mult_coll_frames),
+ AM65_CPSW_STATS(, tx_excessive_collisions),
+ AM65_CPSW_STATS(, tx_late_collisions),
+ AM65_CPSW_STATS(, rx_ipg_error),
+ AM65_CPSW_STATS(, tx_carrier_sense_errors),
+ AM65_CPSW_STATS(, tx_octets),
+ AM65_CPSW_STATS(, tx_64B_frames),
+ AM65_CPSW_STATS(, tx_65_to_127B_frames),
+ AM65_CPSW_STATS(, tx_128_to_255B_frames),
+ AM65_CPSW_STATS(, tx_256_to_511B_frames),
+ AM65_CPSW_STATS(, tx_512_to_1023B_frames),
+ AM65_CPSW_STATS(, tx_1024B_frames),
+ AM65_CPSW_STATS(, net_octets),
+ AM65_CPSW_STATS(, rx_bottom_fifo_drop),
+ AM65_CPSW_STATS(, rx_port_mask_drop),
+ AM65_CPSW_STATS(, rx_top_fifo_drop),
+ AM65_CPSW_STATS(, ale_rate_limit_drop),
+ AM65_CPSW_STATS(, ale_vid_ingress_drop),
+ AM65_CPSW_STATS(, ale_da_eq_sa_drop),
+ AM65_CPSW_STATS(, ale_block_drop),
+ AM65_CPSW_STATS(, ale_secure_drop),
+ AM65_CPSW_STATS(, ale_auth_drop),
+ AM65_CPSW_STATS(, ale_unknown_ucast),
+ AM65_CPSW_STATS(, ale_unknown_ucast_bytes),
+ AM65_CPSW_STATS(, ale_unknown_mcast),
+ AM65_CPSW_STATS(, ale_unknown_mcast_bytes),
+ AM65_CPSW_STATS(, ale_unknown_bcast),
+ AM65_CPSW_STATS(, ale_unknown_bcast_bytes),
+ AM65_CPSW_STATS(, ale_pol_match),
+ AM65_CPSW_STATS(, ale_pol_match_red),
+ AM65_CPSW_STATS(, ale_pol_match_yellow),
+ AM65_CPSW_STATS(, ale_mcast_sa_drop),
+ AM65_CPSW_STATS(, ale_dual_vlan_drop),
+ AM65_CPSW_STATS(, ale_len_err_drop),
+ AM65_CPSW_STATS(, ale_ip_next_hdr_drop),
+ AM65_CPSW_STATS(, ale_ipv4_frag_drop),
+ AM65_CPSW_STATS(, iet_rx_assembly_err),
+ AM65_CPSW_STATS(, iet_rx_assembly_ok),
+ AM65_CPSW_STATS(, iet_rx_smd_err),
+ AM65_CPSW_STATS(, iet_rx_frag),
+ AM65_CPSW_STATS(, iet_tx_hold),
+ AM65_CPSW_STATS(, iet_tx_frag),
+ AM65_CPSW_STATS(, tx_mem_protect_err),
+ AM65_CPSW_STATS(, tx_pri0),
+ AM65_CPSW_STATS(, tx_pri1),
+ AM65_CPSW_STATS(, tx_pri2),
+ AM65_CPSW_STATS(, tx_pri3),
+ AM65_CPSW_STATS(, tx_pri4),
+ AM65_CPSW_STATS(, tx_pri5),
+ AM65_CPSW_STATS(, tx_pri6),
+ AM65_CPSW_STATS(, tx_pri7),
+ AM65_CPSW_STATS(, tx_pri0_bcnt),
+ AM65_CPSW_STATS(, tx_pri1_bcnt),
+ AM65_CPSW_STATS(, tx_pri2_bcnt),
+ AM65_CPSW_STATS(, tx_pri3_bcnt),
+ AM65_CPSW_STATS(, tx_pri4_bcnt),
+ AM65_CPSW_STATS(, tx_pri5_bcnt),
+ AM65_CPSW_STATS(, tx_pri6_bcnt),
+ AM65_CPSW_STATS(, tx_pri7_bcnt),
+ AM65_CPSW_STATS(, tx_pri0_drop),
+ AM65_CPSW_STATS(, tx_pri1_drop),
+ AM65_CPSW_STATS(, tx_pri2_drop),
+ AM65_CPSW_STATS(, tx_pri3_drop),
+ AM65_CPSW_STATS(, tx_pri4_drop),
+ AM65_CPSW_STATS(, tx_pri5_drop),
+ AM65_CPSW_STATS(, tx_pri6_drop),
+ AM65_CPSW_STATS(, tx_pri7_drop),
+ AM65_CPSW_STATS(, tx_pri0_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri1_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri2_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri3_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri4_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri5_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri6_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri7_drop_bcnt),
+};
+
+/* Ethtool priv_flags */
+static const char am65_cpsw_ethtool_priv_flags[][ETH_GSTRING_LEN] = {
+#define AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN BIT(0)
+ "p0-rx-ptype-rrobin",
+};
+
+static int am65_cpsw_ethtool_op_begin(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(common->dev);
+ if (ret < 0) {
+ dev_err(common->dev, "ethtool begin failed %d\n", ret);
+ pm_runtime_put_noidle(common->dev);
+ }
+
+ return ret;
+}
+
+static void am65_cpsw_ethtool_op_complete(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ int ret;
+
+ ret = pm_runtime_put(common->dev);
+ if (ret < 0 && ret != -EBUSY)
+ dev_err(common->dev, "ethtool complete failed %d\n", ret);
+}
+
+static void am65_cpsw_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ strlcpy(info->driver, dev_driver_string(common->dev),
+ sizeof(info->driver));
+ strlcpy(info->bus_info, dev_name(common->dev), sizeof(info->bus_info));
+}
+
+static u32 am65_cpsw_get_msglevel(struct net_device *ndev)
+{
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+
+ return priv->msg_enable;
+}
+
+static void am65_cpsw_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+
+ priv->msg_enable = value;
+}
+
+static void am65_cpsw_get_channels(struct net_device *ndev,
+ struct ethtool_channels *ch)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ ch->max_rx = AM65_CPSW_MAX_RX_QUEUES;
+ ch->max_tx = AM65_CPSW_MAX_TX_QUEUES;
+ ch->rx_count = AM65_CPSW_MAX_RX_QUEUES;
+ ch->tx_count = common->tx_ch_num;
+}
+
+static int am65_cpsw_set_channels(struct net_device *ndev,
+ struct ethtool_channels *chs)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ if (!chs->rx_count || !chs->tx_count)
+ return -EINVAL;
+
+ /* Check if interface is up. Can change the num queues when
+ * the interface is down.
+ */
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ am65_cpsw_nuss_remove_tx_chns(common);
+
+ return am65_cpsw_nuss_update_tx_chns(common, chs->tx_count);
+}
+
+static void am65_cpsw_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ /* not supported */
+ ering->tx_pending = common->tx_chns[0].descs_num;
+ ering->rx_pending = common->rx_chns.descs_num;
+}
+
+static void am65_cpsw_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ pause->autoneg = AUTONEG_DISABLE;
+ pause->rx_pause = salve->rx_pause ? true : false;
+ pause->tx_pause = salve->tx_pause ? true : false;
+}
+
+static int am65_cpsw_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ if (!salve->phy)
+ return -EINVAL;
+
+ if (!phy_validate_pause(salve->phy, pause))
+ return -EINVAL;
+
+ salve->rx_pause = pause->rx_pause ? true : false;
+ salve->tx_pause = pause->tx_pause ? true : false;
+
+ phy_set_asym_pause(salve->phy, salve->rx_pause, salve->tx_pause);
+
+ return 0;
+}
+
+static void am65_cpsw_get_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (salve->phy)
+ phy_ethtool_get_wol(salve->phy, wol);
+}
+
+static int am65_cpsw_set_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ if (!salve->phy)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_set_wol(salve->phy, wol);
+}
+
+static int am65_cpsw_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ if (!salve->phy)
+ return -EOPNOTSUPP;
+
+ phy_ethtool_ksettings_get(salve->phy, ecmd);
+ return 0;
+}
+
+static int
+am65_cpsw_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_ksettings_set(salve->phy, ecmd);
+}
+
+static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_get_eee(salve->phy, edata);
+}
+
+static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_set_eee(salve->phy, edata);
+}
+
+static int am65_cpsw_nway_reset(struct net_device *ndev)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ if (!salve->phy || phy_is_pseudo_fixed_link(salve->phy))
+ return -EOPNOTSUPP;
+
+ return phy_restart_aneg(salve->phy);
+}
+
+static int am65_cpsw_get_regs_len(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ u32 i, regdump_len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(am65_cpsw_regdump); i++) {
+ if (am65_cpsw_regdump[i].hdr.module_id ==
+ AM65_CPSW_REGDUMP_MOD_CPSW_ALE_TBL) {
+ regdump_len += sizeof(struct am65_cpsw_regdump_hdr);
+ regdump_len += common->ale->params.ale_entries *
+ ALE_ENTRY_WORDS * sizeof(u32);
+ continue;
+ }
+ regdump_len += am65_cpsw_regdump[i].hdr.len;
+ }
+
+ return regdump_len;
+}
+
+static void am65_cpsw_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ u32 i, j, pos, *reg = p;
+
+ /* update CPSW IP version */
+ regs->version = AM65_CPSW_REGDUMP_VER;
+
+ pos = 0;
+ for (i = 0; i < ARRAY_SIZE(am65_cpsw_regdump); i++) {
+ reg[pos++] = am65_cpsw_regdump[i].hdr.module_id;
+
+ if (am65_cpsw_regdump[i].hdr.module_id ==
+ AM65_CPSW_REGDUMP_MOD_CPSW_ALE_TBL) {
+ u32 ale_tbl_len = common->ale->params.ale_entries *
+ ALE_ENTRY_WORDS * sizeof(u32) +
+ sizeof(struct am65_cpsw_regdump_hdr);
+ reg[pos++] = ale_tbl_len;
+ cpsw_ale_dump(common->ale, &reg[pos]);
+ pos += ale_tbl_len;
+ continue;
+ }
+
+ reg[pos++] = am65_cpsw_regdump[i].hdr.len;
+
+ j = am65_cpsw_regdump[i].start_ofs;
+ do {
+ reg[pos++] = j;
+ reg[pos++] = readl_relaxed(common->ss_base + j);
+ j += sizeof(u32);
+ } while (j <= am65_cpsw_regdump[i].end_ofs);
+ }
+}
+
+static int am65_cpsw_get_sset_count(struct net_device *ndev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(am65_host_stats) +
+ ARRAY_SIZE(am65_slave_stats);
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(am65_cpsw_ethtool_priv_flags);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void am65_cpsw_get_strings(struct net_device *ndev,
+ u32 stringset, u8 *data)
+{
+ const struct am65_cpsw_ethtool_stat *hw_stats;
+ u32 i, num_stats;
+ u8 *p = data;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ num_stats = ARRAY_SIZE(am65_host_stats);
+ hw_stats = am65_host_stats;
+ for (i = 0; i < num_stats; i++) {
+ memcpy(p, hw_stats[i].desc, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ num_stats = ARRAY_SIZE(am65_slave_stats);
+ hw_stats = am65_slave_stats;
+ for (i = 0; i < num_stats; i++) {
+ memcpy(p, hw_stats[i].desc, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ num_stats = ARRAY_SIZE(am65_cpsw_ethtool_priv_flags);
+
+ for (i = 0; i < num_stats; i++) {
+ memcpy(p, am65_cpsw_ethtool_priv_flags[i],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void am65_cpsw_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ const struct am65_cpsw_ethtool_stat *hw_stats;
+ struct am65_cpsw_host *host_p;
+ struct am65_cpsw_port *port;
+ u32 i, num_stats;
+
+ host_p = am65_common_get_host(common);
+ port = am65_ndev_to_port(ndev);
+ num_stats = ARRAY_SIZE(am65_host_stats);
+ hw_stats = am65_host_stats;
+ for (i = 0; i < num_stats; i++)
+ *data++ = readl_relaxed(host_p->stat_base +
+ hw_stats[i].offset);
+
+ num_stats = ARRAY_SIZE(am65_slave_stats);
+ hw_stats = am65_slave_stats;
+ for (i = 0; i < num_stats; i++)
+ *data++ = readl_relaxed(port->stat_base +
+ hw_stats[i].offset);
+}
+
+static u32 am65_cpsw_get_ethtool_priv_flags(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ u32 priv_flags = 0;
+
+ if (common->pf_p0_rx_ptype_rrobin)
+ priv_flags |= AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN;
+
+ return priv_flags;
+}
+
+static int am65_cpsw_set_ethtool_priv_flags(struct net_device *ndev, u32 flags)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ common->pf_p0_rx_ptype_rrobin =
+ !!(flags & AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN);
+ am65_cpsw_nuss_set_p0_ptype(common);
+
+ return 0;
+}
+
+const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
+ .begin = am65_cpsw_ethtool_op_begin,
+ .complete = am65_cpsw_ethtool_op_complete,
+ .get_drvinfo = am65_cpsw_get_drvinfo,
+ .get_msglevel = am65_cpsw_get_msglevel,
+ .set_msglevel = am65_cpsw_set_msglevel,
+ .get_channels = am65_cpsw_get_channels,
+ .set_channels = am65_cpsw_set_channels,
+ .get_ringparam = am65_cpsw_get_ringparam,
+ .get_regs_len = am65_cpsw_get_regs_len,
+ .get_regs = am65_cpsw_get_regs,
+ .get_sset_count = am65_cpsw_get_sset_count,
+ .get_strings = am65_cpsw_get_strings,
+ .get_ethtool_stats = am65_cpsw_get_ethtool_stats,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_priv_flags = am65_cpsw_get_ethtool_priv_flags,
+ .set_priv_flags = am65_cpsw_set_ethtool_priv_flags,
+
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = am65_cpsw_get_link_ksettings,
+ .set_link_ksettings = am65_cpsw_set_link_ksettings,
+ .get_pauseparam = am65_cpsw_get_pauseparam,
+ .set_pauseparam = am65_cpsw_set_pauseparam,
+ .get_wol = am65_cpsw_get_wol,
+ .set_wol = am65_cpsw_set_wol,
+ .get_eee = am65_cpsw_get_eee,
+ .set_eee = am65_cpsw_set_eee,
+ .nway_reset = am65_cpsw_nway_reset,
+};
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
new file mode 100644
index 000000000000..f71c15c39492
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -0,0 +1,1965 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kmemleak.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/dma/k3-udma-glue.h>
+
+#include "cpsw_ale.h"
+#include "cpsw_sl.h"
+#include "am65-cpsw-nuss.h"
+#include "k3-cppi-desc-pool.h"
+
+#define AM65_CPSW_SS_BASE 0x0
+#define AM65_CPSW_SGMII_BASE 0x100
+#define AM65_CPSW_XGMII_BASE 0x2100
+#define AM65_CPSW_CPSW_NU_BASE 0x20000
+#define AM65_CPSW_NU_PORTS_BASE 0x1000
+#define AM65_CPSW_NU_STATS_BASE 0x1a000
+#define AM65_CPSW_NU_ALE_BASE 0x1e000
+#define AM65_CPSW_NU_CPTS_BASE 0x1d000
+
+#define AM65_CPSW_NU_PORTS_OFFSET 0x1000
+#define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200
+
+#define AM65_CPSW_MAX_PORTS 8
+
+#define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN
+#define AM65_CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define AM65_CPSW_REG_CTL 0x004
+#define AM65_CPSW_REG_STAT_PORT_EN 0x014
+#define AM65_CPSW_REG_PTYPE 0x018
+
+#define AM65_CPSW_P0_REG_CTL 0x004
+#define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET 0x008
+
+#define AM65_CPSW_PORT_REG_PRI_CTL 0x01c
+#define AM65_CPSW_PORT_REG_RX_PRI_MAP 0x020
+#define AM65_CPSW_PORT_REG_RX_MAXLEN 0x024
+
+#define AM65_CPSW_PORTN_REG_SA_L 0x308
+#define AM65_CPSW_PORTN_REG_SA_H 0x30c
+#define AM65_CPSW_PORTN_REG_TS_CTL 0x310
+#define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG 0x314
+#define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318
+#define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C
+
+#define AM65_CPSW_CTL_VLAN_AWARE BIT(1)
+#define AM65_CPSW_CTL_P0_ENABLE BIT(2)
+#define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13)
+#define AM65_CPSW_CTL_P0_RX_PAD BIT(14)
+
+/* AM65_CPSW_P0_REG_CTL */
+#define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN BIT(0)
+
+/* AM65_CPSW_PORT_REG_PRI_CTL */
+#define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8)
+
+/* AM65_CPSW_PN_TS_CTL register fields */
+#define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN BIT(4)
+#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN BIT(5)
+#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN BIT(6)
+#define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN BIT(7)
+#define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN BIT(10)
+#define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11)
+#define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16
+
+/* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */
+#define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16
+
+/* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 BIT(16)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 BIT(17)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 BIT(18)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 BIT(19)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 BIT(20)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 BIT(21)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 BIT(22)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23)
+
+/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
+#define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+#define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e)
+
+#define AM65_CPSW_TS_TX_ANX_ALL_EN \
+ (AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN | \
+ AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \
+ AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN)
+
+#define AM65_CPSW_ALE_AGEOUT_DEFAULT 30
+/* Number of TX/RX descriptors */
+#define AM65_CPSW_MAX_TX_DESC 500
+#define AM65_CPSW_MAX_RX_DESC 500
+
+#define AM65_CPSW_NAV_PS_DATA_SIZE 16
+#define AM65_CPSW_NAV_SW_DATA_SIZE 16
+
+#define AM65_CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \
+ NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \
+ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
+ const u8 *dev_addr)
+{
+ u32 mac_hi = (dev_addr[0] << 0) | (dev_addr[1] << 8) |
+ (dev_addr[2] << 16) | (dev_addr[3] << 24);
+ u32 mac_lo = (dev_addr[4] << 0) | (dev_addr[5] << 8);
+
+ writel(mac_hi, slave->port_base + AM65_CPSW_PORTN_REG_SA_H);
+ writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L);
+}
+
+static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port)
+{
+ cpsw_sl_reset(port->slave.mac_sl, 100);
+ /* Max length register has to be restored after MAC SL reset */
+ writel(AM65_CPSW_MAX_PACKET_SIZE,
+ port->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
+}
+
+static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common)
+{
+ common->nuss_ver = readl(common->ss_base);
+ common->cpsw_ver = readl(common->cpsw_base);
+ dev_info(common->dev,
+ "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u\n",
+ common->nuss_ver,
+ common->cpsw_ver,
+ common->port_num + 1);
+}
+
+void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct phy_device *phy = port->slave.phy;
+ u32 mac_control = 0;
+
+ if (!phy)
+ return;
+
+ if (phy->link) {
+ mac_control = CPSW_SL_CTL_GMII_EN;
+
+ if (phy->speed == 1000)
+ mac_control |= CPSW_SL_CTL_GIG;
+ if (phy->speed == 10 && phy_interface_is_rgmii(phy))
+ /* Can be used with in band mode only */
+ mac_control |= CPSW_SL_CTL_EXT_EN;
+ if (phy->duplex)
+ mac_control |= CPSW_SL_CTL_FULLDUPLEX;
+
+ /* RGMII speed is 100M if !CPSW_SL_CTL_GIG*/
+
+ /* rx_pause/tx_pause */
+ if (port->slave.rx_pause)
+ mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
+
+ if (port->slave.tx_pause)
+ mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+
+ cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
+
+ /* enable forwarding */
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ netif_tx_wake_all_queues(ndev);
+ } else {
+ int tmo;
+ /* disable forwarding */
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
+
+ tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
+ dev_dbg(common->dev, "donw msc_sl %08x tmo %d\n",
+ cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS),
+ tmo);
+
+ cpsw_sl_ctl_reset(port->slave.mac_sl);
+
+ netif_tx_stop_all_queues(ndev);
+ }
+
+ phy_print_status(phy);
+}
+
+static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 port_mask, unreg_mcast = 0;
+ int ret;
+
+ ret = pm_runtime_get_sync(common->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(common->dev);
+ return ret;
+ }
+
+ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+ if (!vid)
+ unreg_mcast = port_mask;
+ dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid);
+ ret = cpsw_ale_add_vlan(common->ale, vid, port_mask,
+ unreg_mcast, port_mask, 0);
+
+ pm_runtime_put(common->dev);
+ return ret;
+}
+
+static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(common->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(common->dev);
+ return ret;
+ }
+
+ dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
+ ret = cpsw_ale_del_vlan(common->ale, vid, 0);
+
+ pm_runtime_put(common->dev);
+ return ret;
+}
+
+static void am65_cpsw_slave_set_promisc_2g(struct am65_cpsw_port *port,
+ bool promisc)
+{
+ struct am65_cpsw_common *common = port->common;
+
+ if (promisc) {
+ /* Enable promiscuous mode */
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_MACONLY_CAF, 1);
+ dev_dbg(common->dev, "promisc enabled\n");
+ } else {
+ /* Disable promiscuous mode */
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_MACONLY_CAF, 0);
+ dev_dbg(common->dev, "promisc disabled\n");
+ }
+}
+
+static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 port_mask;
+ bool promisc;
+
+ promisc = !!(ndev->flags & IFF_PROMISC);
+ am65_cpsw_slave_set_promisc_2g(port, promisc);
+
+ if (promisc)
+ return;
+
+ /* Restore allmulti on vlans if necessary */
+ cpsw_ale_set_allmulti(common->ale,
+ ndev->flags & IFF_ALLMULTI, port->port_id);
+
+ port_mask = ALE_PORT_HOST;
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(common->ale, port_mask, -1);
+
+ if (!netdev_mc_empty(ndev)) {
+ struct netdev_hw_addr *ha;
+
+ /* program multicast address list into ALE register */
+ netdev_for_each_mc_addr(ha, ndev) {
+ cpsw_ale_add_mcast(common->ale, ha->addr,
+ port_mask, 0, 0, 0);
+ }
+ }
+}
+
+static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
+ unsigned int txqueue)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ unsigned long trans_start;
+
+ netif_txq = netdev_get_tx_queue(ndev, txqueue);
+ tx_chn = &common->tx_chns[txqueue];
+ trans_start = netif_txq->trans_start;
+
+ netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n",
+ txqueue,
+ netif_tx_queue_stopped(netif_txq),
+ jiffies_to_msecs(jiffies - trans_start),
+ dql_avail(&netif_txq->dql),
+ k3_cppi_desc_pool_avail(tx_chn->desc_pool));
+
+ if (netif_tx_queue_stopped(netif_txq)) {
+ /* try recover if stopped by us */
+ txq_trans_update(netif_txq);
+ netif_tx_wake_queue(netif_txq);
+ }
+}
+
+static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
+ struct sk_buff *skb)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct cppi5_host_desc_t *desc_rx;
+ struct device *dev = common->dev;
+ u32 pkt_len = skb_tailroom(skb);
+ dma_addr_t desc_dma;
+ dma_addr_t buf_dma;
+ void *swdata;
+
+ desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
+ if (!desc_rx) {
+ dev_err(dev, "Failed to allocate RXFDQ descriptor\n");
+ return -ENOMEM;
+ }
+ desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
+
+ buf_dma = dma_map_single(dev, skb->data, pkt_len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ dev_err(dev, "Failed to map rx skb buffer\n");
+ return -EINVAL;
+ }
+
+ cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ AM65_CPSW_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_attach_buf(desc_rx, 0, 0, buf_dma, skb_tailroom(skb));
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ *((void **)swdata) = skb;
+
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma);
+}
+
+void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
+ u32 val, pri_map;
+
+ /* P0 set Receive Priority Type */
+ val = readl(host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
+
+ if (common->pf_p0_rx_ptype_rrobin) {
+ val |= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
+ /* Enet Ports fifos works in fixed priority mode only, so
+ * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0
+ */
+ pri_map = 0x0;
+ } else {
+ val &= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
+ /* restore P0_Rx_Pri_Map */
+ pri_map = 0x76543210;
+ }
+
+ writel(pri_map, host_p->port_base + AM65_CPSW_PORT_REG_RX_PRI_MAP);
+ writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
+}
+
+static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
+ netdev_features_t features)
+{
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
+ int port_idx, i, ret;
+ struct sk_buff *skb;
+ u32 val, port_mask;
+
+ if (common->usage_count)
+ return 0;
+
+ /* Control register */
+ writel(AM65_CPSW_CTL_P0_ENABLE | AM65_CPSW_CTL_P0_TX_CRC_REMOVE |
+ AM65_CPSW_CTL_VLAN_AWARE | AM65_CPSW_CTL_P0_RX_PAD,
+ common->cpsw_base + AM65_CPSW_REG_CTL);
+ /* Max length register */
+ writel(AM65_CPSW_MAX_PACKET_SIZE,
+ host_p->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
+ /* set base flow_id */
+ writel(common->rx_flow_id_base,
+ host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET);
+ /* en tx crc offload */
+ if (features & NETIF_F_HW_CSUM)
+ writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN,
+ host_p->port_base + AM65_CPSW_P0_REG_CTL);
+
+ am65_cpsw_nuss_set_p0_ptype(common);
+
+ /* enable statistic */
+ val = BIT(HOST_PORT_NUM);
+ for (port_idx = 0; port_idx < common->port_num; port_idx++) {
+ struct am65_cpsw_port *port = &common->ports[port_idx];
+
+ if (!port->disabled)
+ val |= BIT(port->port_id);
+ }
+ writel(val, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
+
+ /* disable priority elevation */
+ writel(0, common->cpsw_base + AM65_CPSW_REG_PTYPE);
+
+ cpsw_ale_start(common->ale);
+
+ /* limit to one RX flow only */
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
+ ALE_DEFAULT_THREAD_ID, 0);
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
+ ALE_DEFAULT_THREAD_ENABLE, 1);
+ if (AM65_CPSW_IS_CPSW2G(common))
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
+ ALE_PORT_NOLEARN, 1);
+ /* switch to vlan unaware mode */
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ /* default vlan cfg: create mask based on enabled ports */
+ port_mask = GENMASK(common->port_num, 0) &
+ ~common->disabled_ports_mask;
+
+ cpsw_ale_add_vlan(common->ale, 0, port_mask,
+ port_mask, port_mask,
+ port_mask & ~ALE_PORT_HOST);
+
+ for (i = 0; i < common->rx_chns.descs_num; i++) {
+ skb = __netdev_alloc_skb_ip_align(NULL,
+ AM65_CPSW_MAX_PACKET_SIZE,
+ GFP_KERNEL);
+ if (!skb) {
+ dev_err(common->dev, "cannot allocate skb\n");
+ return -ENOMEM;
+ }
+
+ ret = am65_cpsw_nuss_rx_push(common, skb);
+ if (ret < 0) {
+ dev_err(common->dev,
+ "cannot submit skb to channel rx, error %d\n",
+ ret);
+ kfree_skb(skb);
+ return ret;
+ }
+ kmemleak_not_leak(skb);
+ }
+ k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ ret = k3_udma_glue_enable_tx_chn(common->tx_chns[i].tx_chn);
+ if (ret)
+ return ret;
+ napi_enable(&common->tx_chns[i].napi_tx);
+ }
+
+ napi_enable(&common->napi_rx);
+
+ dev_dbg(common->dev, "cpsw_nuss started\n");
+ return 0;
+}
+
+static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
+static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
+
+static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
+{
+ int i;
+
+ if (common->usage_count != 1)
+ return 0;
+
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ /* shutdown tx channels */
+ atomic_set(&common->tdown_cnt, common->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ reinit_completion(&common->tdown_complete);
+
+ for (i = 0; i < common->tx_ch_num; i++)
+ k3_udma_glue_tdown_tx_chn(common->tx_chns[i].tx_chn, false);
+
+ i = wait_for_completion_timeout(&common->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!i)
+ dev_err(common->dev, "tx timeout\n");
+ for (i = 0; i < common->tx_ch_num; i++)
+ napi_disable(&common->tx_chns[i].napi_tx);
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn,
+ &common->tx_chns[i],
+ am65_cpsw_nuss_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
+ }
+
+ k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
+ napi_disable(&common->napi_rx);
+
+ for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
+ k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, i,
+ &common->rx_chns,
+ am65_cpsw_nuss_rx_cleanup, !!i);
+
+ k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
+
+ cpsw_ale_stop(common->ale);
+
+ writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
+ writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
+
+ dev_dbg(common->dev, "cpsw_nuss stopped\n");
+ return 0;
+}
+
+static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int ret;
+
+ if (port->slave.phy)
+ phy_stop(port->slave.phy);
+
+ netif_tx_stop_all_queues(ndev);
+
+ if (port->slave.phy) {
+ phy_disconnect(port->slave.phy);
+ port->slave.phy = NULL;
+ }
+
+ ret = am65_cpsw_nuss_common_stop(common);
+ if (ret)
+ return ret;
+
+ common->usage_count--;
+ pm_runtime_put(common->dev);
+ return 0;
+}
+
+static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 port_mask;
+ int ret, i;
+
+ ret = pm_runtime_get_sync(common->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(common->dev);
+ return ret;
+ }
+
+ /* Notify the stack of the actual queue counts. */
+ ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
+ if (ret) {
+ dev_err(common->dev, "cannot set real number of tx queues\n");
+ return ret;
+ }
+
+ ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES);
+ if (ret) {
+ dev_err(common->dev, "cannot set real number of rx queues\n");
+ return ret;
+ }
+
+ for (i = 0; i < common->tx_ch_num; i++)
+ netdev_tx_reset_queue(netdev_get_tx_queue(ndev, i));
+
+ ret = am65_cpsw_nuss_common_open(common, ndev->features);
+ if (ret)
+ return ret;
+
+ common->usage_count++;
+
+ am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
+
+ if (port->slave.mac_only)
+ /* enable mac-only mode on port */
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_MACONLY, 1);
+ if (AM65_CPSW_IS_CPSW2G(common))
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_NOLEARN, 1);
+
+ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+ cpsw_ale_add_ucast(common->ale, ndev->dev_addr,
+ HOST_PORT_NUM, ALE_SECURE, 0);
+ cpsw_ale_add_mcast(common->ale, ndev->broadcast,
+ port_mask, 0, 0, ALE_MCAST_FWD_2);
+
+ /* mac_sl should be configured via phy-link interface */
+ am65_cpsw_sl_ctl_reset(port);
+
+ ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET,
+ port->slave.phy_if);
+ if (ret)
+ goto error_cleanup;
+
+ if (port->slave.phy_node) {
+ port->slave.phy = of_phy_connect(ndev,
+ port->slave.phy_node,
+ &am65_cpsw_nuss_adjust_link,
+ 0, port->slave.phy_if);
+ if (!port->slave.phy) {
+ dev_err(common->dev, "phy %pOF not found on slave %d\n",
+ port->slave.phy_node,
+ port->port_id);
+ ret = -ENODEV;
+ goto error_cleanup;
+ }
+ }
+
+ phy_attached_info(port->slave.phy);
+ phy_start(port->slave.phy);
+
+ return 0;
+
+error_cleanup:
+ am65_cpsw_nuss_ndo_slave_stop(ndev);
+ return ret;
+}
+
+static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct am65_cpsw_rx_chn *rx_chn = data;
+ struct cppi5_host_desc_t *desc_rx;
+ struct sk_buff *skb;
+ dma_addr_t buf_dma;
+ u32 buf_dma_len;
+ void **swdata;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+
+ dma_unmap_single(rx_chn->dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ dev_kfree_skb_any(skb);
+}
+
+/* RX psdata[2] word format - checksum information */
+#define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0)
+#define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16)
+#define AM65_CPSW_RX_PSD_IS_FRAGMENT BIT(17)
+#define AM65_CPSW_RX_PSD_IS_TCP BIT(18)
+#define AM65_CPSW_RX_PSD_IPV6_VALID BIT(19)
+#define AM65_CPSW_RX_PSD_IPV4_VALID BIT(20)
+
+static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
+{
+ /* HW can verify IPv4/IPv6 TCP/UDP packets checksum
+ * csum information provides in psdata[2] word:
+ * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error
+ * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID
+ * bits - indicates IPv4/IPv6 packet
+ * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet
+ * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets
+ * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR
+ */
+ skb_checksum_none_assert(skb);
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ return;
+
+ if ((csum_info & (AM65_CPSW_RX_PSD_IPV6_VALID |
+ AM65_CPSW_RX_PSD_IPV4_VALID)) &&
+ !(csum_info & AM65_CPSW_RX_PSD_CSUM_ERR)) {
+ /* csum for fragmented packets is unsupported */
+ if (!(csum_info & AM65_CPSW_RX_PSD_IS_FRAGMENT))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+}
+
+static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
+ u32 flow_idx)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
+ struct am65_cpsw_ndev_priv *ndev_priv;
+ struct am65_cpsw_ndev_stats *stats;
+ struct cppi5_host_desc_t *desc_rx;
+ struct device *dev = common->dev;
+ struct sk_buff *skb, *new_skb;
+ dma_addr_t desc_dma, buf_dma;
+ struct am65_cpsw_port *port;
+ struct net_device *ndev;
+ void **swdata;
+ u32 *psdata;
+ int ret = 0;
+
+ ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
+ if (ret) {
+ if (ret != -ENODATA)
+ dev_err(dev, "RX: pop chn fail %d\n", ret);
+ return ret;
+ }
+
+ if (desc_dma & 0x1) {
+ dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
+ return 0;
+ }
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ dev_dbg(dev, "%s flow_idx: %u desc %pad\n",
+ __func__, flow_idx, &desc_dma);
+
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
+ cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
+ dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
+ port = am65_common_get_port(common, port_id);
+ ndev = port->ndev;
+ skb->dev = ndev;
+
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ csum_info = psdata[2];
+ dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
+
+ dma_unmap_single(dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE);
+ if (new_skb) {
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ am65_cpsw_nuss_rx_csum(skb, csum_info);
+ napi_gro_receive(&common->napi_rx, skb);
+
+ ndev_priv = netdev_priv(ndev);
+ stats = this_cpu_ptr(ndev_priv->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ u64_stats_update_end(&stats->syncp);
+ kmemleak_not_leak(new_skb);
+ } else {
+ ndev->stats.rx_dropped++;
+ new_skb = skb;
+ }
+
+ if (netif_dormant(ndev)) {
+ dev_kfree_skb_any(new_skb);
+ ndev->stats.rx_dropped++;
+ return 0;
+ }
+
+ ret = am65_cpsw_nuss_rx_push(common, new_skb);
+ if (WARN_ON(ret < 0)) {
+ dev_kfree_skb_any(new_skb);
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ }
+
+ return ret;
+}
+
+static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
+ int flow = AM65_CPSW_MAX_RX_FLOWS;
+ int cur_budget, ret;
+ int num_rx = 0;
+
+ /* process every flow */
+ while (flow--) {
+ cur_budget = budget - num_rx;
+
+ while (cur_budget--) {
+ ret = am65_cpsw_nuss_rx_packets(common, flow);
+ if (ret)
+ break;
+ num_rx++;
+ }
+
+ if (num_rx >= budget)
+ break;
+ }
+
+ dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
+
+ if (num_rx < budget && napi_complete_done(napi_rx, num_rx))
+ enable_irq(common->rx_chns.irq);
+
+ return num_rx;
+}
+
+static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
+ struct device *dev,
+ struct cppi5_host_desc_t *desc)
+{
+ struct cppi5_host_desc_t *first_desc, *next_desc;
+ dma_addr_t buf_dma, next_desc_dma;
+ u32 buf_dma_len;
+
+ first_desc = desc;
+ next_desc = first_desc;
+
+ cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
+
+ dma_unmap_single(dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
+ while (next_desc_dma) {
+ next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ next_desc_dma);
+ cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
+
+ dma_unmap_page(dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ }
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
+}
+
+static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct am65_cpsw_tx_chn *tx_chn = data;
+ struct cppi5_host_desc_t *desc_tx;
+ struct sk_buff *skb;
+ void **swdata;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ skb = *(swdata);
+ am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
+
+ dev_kfree_skb_any(skb);
+}
+
+static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
+ int chn, unsigned int budget)
+{
+ struct cppi5_host_desc_t *desc_tx;
+ struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ unsigned int total_bytes = 0;
+ struct net_device *ndev;
+ struct sk_buff *skb;
+ dma_addr_t desc_dma;
+ int res, num_tx = 0;
+ void **swdata;
+
+ tx_chn = &common->tx_chns[chn];
+
+ while (true) {
+ struct am65_cpsw_ndev_priv *ndev_priv;
+ struct am65_cpsw_ndev_stats *stats;
+
+ res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
+ if (res == -ENODATA)
+ break;
+
+ if (desc_dma & 0x1) {
+ if (atomic_dec_and_test(&common->tdown_cnt))
+ complete(&common->tdown_complete);
+ break;
+ }
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ skb = *(swdata);
+ am65_cpsw_nuss_xmit_free(tx_chn, dev, desc_tx);
+
+ ndev = skb->dev;
+
+ ndev_priv = netdev_priv(ndev);
+ stats = this_cpu_ptr(ndev_priv->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ num_tx++;
+ }
+
+ if (!num_tx)
+ return 0;
+
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+
+ netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
+
+ if (netif_tx_queue_stopped(netif_txq)) {
+ /* Check whether the queue is stopped due to stalled tx dma,
+ * if the queue is stopped then wake the queue as
+ * we have free desc for tx
+ */
+ __netif_tx_lock(netif_txq, smp_processor_id());
+ if (netif_running(ndev) &&
+ (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS))
+ netif_tx_wake_queue(netif_txq);
+
+ __netif_tx_unlock(netif_txq);
+ }
+ dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
+
+ return num_tx;
+}
+
+static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
+ int num_tx;
+
+ num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id,
+ budget);
+ num_tx = min(num_tx, budget);
+ if (num_tx < budget) {
+ napi_complete(napi_tx);
+ enable_irq(tx_chn->irq);
+ }
+
+ return num_tx;
+}
+
+static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
+{
+ struct am65_cpsw_common *common = dev_id;
+
+ disable_irq_nosync(irq);
+ napi_schedule(&common->napi_rx);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t am65_cpsw_nuss_tx_irq(int irq, void *dev_id)
+{
+ struct am65_cpsw_tx_chn *tx_chn = dev_id;
+
+ disable_irq_nosync(irq);
+ napi_schedule(&tx_chn->napi_tx);
+
+ return IRQ_HANDLED;
+}
+
+static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ dma_addr_t desc_dma, buf_dma;
+ int ret, q_idx, i;
+ void **swdata;
+ u32 *psdata;
+ u32 pkt_len;
+
+ /* padding enabled in hw */
+ pkt_len = skb_headlen(skb);
+
+ q_idx = skb_get_queue_mapping(skb);
+ dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx);
+
+ tx_chn = &common->tx_chns[q_idx];
+ netif_txq = netdev_get_tx_queue(ndev, q_idx);
+
+ /* Map the linear buffer */
+ buf_dma = dma_map_single(dev, skb->data, pkt_len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ dev_err(dev, "Failed to map tx skb buffer\n");
+ ndev->stats.tx_errors++;
+ goto err_free_skb;
+ }
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ dev_dbg(dev, "Failed to allocate descriptor\n");
+ dma_unmap_single(dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ goto busy_stop_q;
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ AM65_CPSW_NAV_PS_DATA_SIZE);
+ cppi5_desc_set_pktids(&first_desc->hdr, 0, 0x3FFF);
+ cppi5_hdesc_set_pkttype(first_desc, 0x7);
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
+
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ *(swdata) = skb;
+ psdata = cppi5_hdesc_get_psdata(first_desc);
+
+ /* HW csum offload if enabled */
+ psdata[2] = 0;
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ unsigned int cs_start, cs_offset;
+
+ cs_start = skb_transport_offset(skb);
+ cs_offset = cs_start + skb->csum_offset;
+ /* HW numerates bytes starting from 1 */
+ psdata[2] = ((cs_offset + 1) << 24) |
+ ((cs_start + 1) << 16) | (skb->len - cs_start);
+ dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]);
+ }
+
+ if (!skb_is_nonlinear(skb))
+ goto done_tx;
+
+ dev_dbg(dev, "fragmented SKB\n");
+
+ /* Handle the case where skb is fragmented in pages */
+ cur_desc = first_desc;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 frag_size = skb_frag_size(frag);
+
+ next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!next_desc) {
+ dev_err(dev, "Failed to allocate descriptor\n");
+ goto busy_free_descs;
+ }
+
+ buf_dma = skb_frag_dma_map(dev, frag, 0, frag_size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ dev_err(dev, "Failed to map tx skb page\n");
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ ndev->stats.tx_errors++;
+ goto err_free_descs;
+ }
+
+ cppi5_hdesc_reset_hbdesc(next_desc);
+ cppi5_hdesc_attach_buf(next_desc,
+ buf_dma, frag_size, buf_dma, frag_size);
+
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
+ next_desc);
+ cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
+
+ pkt_len += frag_size;
+ cur_desc = next_desc;
+ }
+ WARN_ON(pkt_len != skb->len);
+
+done_tx:
+ skb_tx_timestamp(skb);
+
+ /* report bql before sending packet */
+ netdev_tx_sent_queue(netif_txq, pkt_len);
+
+ cppi5_hdesc_set_pktlen(first_desc, pkt_len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ dev_err(dev, "can't push desc %d\n", ret);
+ /* inform bql */
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
+ ndev->stats.tx_errors++;
+ goto err_free_descs;
+ }
+
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
+ netif_tx_stop_queue(netif_txq);
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+ dev_dbg(dev, "netif_tx_stop_queue %d\n", q_idx);
+
+ /* re-check for smp */
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS) {
+ netif_tx_wake_queue(netif_txq);
+ dev_dbg(dev, "netif_tx_wake_queue %d\n", q_idx);
+ }
+ }
+
+ return NETDEV_TX_OK;
+
+err_free_descs:
+ am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
+err_free_skb:
+ ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+
+busy_free_descs:
+ am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
+busy_stop_q:
+ netif_tx_stop_queue(netif_txq);
+ return NETDEV_TX_BUSY;
+}
+
+static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
+ void *addr)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct sockaddr *sockaddr = (struct sockaddr *)addr;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(ndev, addr);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_get_sync(common->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(common->dev);
+ return ret;
+ }
+
+ cpsw_ale_del_ucast(common->ale, ndev->dev_addr,
+ HOST_PORT_NUM, 0, 0);
+ cpsw_ale_add_ucast(common->ale, sockaddr->sa_data,
+ HOST_PORT_NUM, ALE_SECURE, 0);
+
+ am65_cpsw_port_set_sl_mac(port, addr);
+ eth_commit_mac_addr_change(ndev, sockaddr);
+
+ pm_runtime_put(common->dev);
+
+ return 0;
+}
+
+static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
+ struct ifreq *req, int cmd)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!port->slave.phy)
+ return -EOPNOTSUPP;
+
+ return phy_mii_ioctl(port->slave.phy, req, cmd);
+}
+
+static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct am65_cpsw_ndev_priv *ndev_priv = netdev_priv(dev);
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct am65_cpsw_ndev_stats *cpu_stats;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+
+ cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu);
+ do {
+ start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ rx_packets = cpu_stats->rx_packets;
+ rx_bytes = cpu_stats->rx_bytes;
+ tx_packets = cpu_stats->tx_packets;
+ tx_bytes = cpu_stats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ }
+
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->rx_dropped = dev->stats.rx_dropped;
+ stats->tx_dropped = dev->stats.tx_dropped;
+}
+
+static int am65_cpsw_nuss_ndo_slave_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ netdev_features_t changes = features ^ ndev->features;
+ struct am65_cpsw_host *host_p;
+
+ host_p = am65_common_get_host(common);
+
+ if (changes & NETIF_F_HW_CSUM) {
+ bool enable = !!(features & NETIF_F_HW_CSUM);
+
+ dev_info(common->dev, "Turn %s tx-checksum-ip-generic\n",
+ enable ? "ON" : "OFF");
+ if (enable)
+ writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN,
+ host_p->port_base + AM65_CPSW_P0_REG_CTL);
+ else
+ writel(0,
+ host_p->port_base + AM65_CPSW_P0_REG_CTL);
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = {
+ .ndo_open = am65_cpsw_nuss_ndo_slave_open,
+ .ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
+ .ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit,
+ .ndo_set_rx_mode = am65_cpsw_nuss_ndo_slave_set_rx_mode,
+ .ndo_get_stats64 = am65_cpsw_nuss_ndo_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = am65_cpsw_nuss_ndo_slave_set_mac_address,
+ .ndo_tx_timeout = am65_cpsw_nuss_ndo_host_tx_timeout,
+ .ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid,
+ .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
+ .ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
+ .ndo_set_features = am65_cpsw_nuss_ndo_slave_set_features,
+};
+
+static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_common *common = port->common;
+
+ if (!port->disabled)
+ return;
+
+ common->disabled_ports_mask |= BIT(port->port_id);
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_reset(port->slave.mac_sl, 100);
+ cpsw_sl_ctl_reset(port->slave.mac_sl);
+}
+
+static void am65_cpsw_nuss_free_tx_chns(void *data)
+{
+ struct am65_cpsw_common *common = data;
+ int i;
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+
+ if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
+ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
+
+ if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
+ k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
+
+ memset(tx_chn, 0, sizeof(*tx_chn));
+ }
+}
+
+void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ int i;
+
+ devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+
+ if (tx_chn->irq)
+ devm_free_irq(dev, tx_chn->irq, tx_chn);
+
+ netif_napi_del(&tx_chn->napi_tx);
+
+ if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
+ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
+
+ if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
+ k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
+
+ memset(tx_chn, 0, sizeof(*tx_chn));
+ }
+}
+
+static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
+{
+ u32 max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS);
+ struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 };
+ struct device *dev = common->dev;
+ struct k3_ring_cfg ring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0
+ };
+ u32 hdesc_size;
+ int i, ret = 0;
+
+ hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
+ AM65_CPSW_NAV_SW_DATA_SIZE);
+
+ tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
+ tx_cfg.tx_cfg = ring_cfg;
+ tx_cfg.txcq_cfg = ring_cfg;
+ tx_cfg.tx_cfg.size = max_desc_num;
+ tx_cfg.txcq_cfg.size = max_desc_num;
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+
+ snprintf(tx_chn->tx_chn_name,
+ sizeof(tx_chn->tx_chn_name), "tx%d", i);
+
+ tx_chn->common = common;
+ tx_chn->id = i;
+ tx_chn->descs_num = max_desc_num;
+ tx_chn->desc_pool =
+ k3_cppi_desc_pool_create_name(dev,
+ tx_chn->descs_num,
+ hdesc_size,
+ tx_chn->tx_chn_name);
+ if (IS_ERR(tx_chn->desc_pool)) {
+ ret = PTR_ERR(tx_chn->desc_pool);
+ dev_err(dev, "Failed to create poll %d\n", ret);
+ goto err;
+ }
+
+ tx_chn->tx_chn =
+ k3_udma_glue_request_tx_chn(dev,
+ tx_chn->tx_chn_name,
+ &tx_cfg);
+ if (IS_ERR(tx_chn->tx_chn)) {
+ ret = PTR_ERR(tx_chn->tx_chn);
+ dev_err(dev, "Failed to request tx dma channel %d\n",
+ ret);
+ goto err;
+ }
+
+ tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
+ if (tx_chn->irq <= 0) {
+ dev_err(dev, "Failed to get tx dma irq %d\n",
+ tx_chn->irq);
+ goto err;
+ }
+
+ snprintf(tx_chn->tx_chn_name,
+ sizeof(tx_chn->tx_chn_name), "%s-tx%d",
+ dev_name(dev), tx_chn->id);
+ }
+
+err:
+ i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
+ if (i) {
+ dev_err(dev, "failed to add free_tx_chns action %d", i);
+ return i;
+ }
+
+ return ret;
+}
+
+static void am65_cpsw_nuss_free_rx_chns(void *data)
+{
+ struct am65_cpsw_common *common = data;
+ struct am65_cpsw_rx_chn *rx_chn;
+
+ rx_chn = &common->rx_chns;
+
+ if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
+ k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+
+ if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
+ k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
+}
+
+static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
+ u32 max_desc_num = AM65_CPSW_MAX_RX_DESC;
+ struct device *dev = common->dev;
+ u32 hdesc_size;
+ u32 fdqring_id;
+ int i, ret = 0;
+
+ hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
+ AM65_CPSW_NAV_SW_DATA_SIZE);
+
+ rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
+ rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS;
+ rx_cfg.flow_id_base = common->rx_flow_id_base;
+
+ /* init all flows */
+ rx_chn->dev = dev;
+ rx_chn->descs_num = max_desc_num;
+ rx_chn->desc_pool = k3_cppi_desc_pool_create_name(dev,
+ rx_chn->descs_num,
+ hdesc_size, "rx");
+ if (IS_ERR(rx_chn->desc_pool)) {
+ ret = PTR_ERR(rx_chn->desc_pool);
+ dev_err(dev, "Failed to create rx poll %d\n", ret);
+ goto err;
+ }
+
+ rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
+ if (IS_ERR(rx_chn->rx_chn)) {
+ ret = PTR_ERR(rx_chn->rx_chn);
+ dev_err(dev, "Failed to request rx dma channel %d\n", ret);
+ goto err;
+ }
+
+ common->rx_flow_id_base =
+ k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
+ dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
+
+ fdqring_id = K3_RINGACC_RING_ID_ANY;
+ for (i = 0; i < rx_cfg.flow_id_num; i++) {
+ struct k3_ring_cfg rxring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0,
+ };
+ struct k3_ring_cfg fdqring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_MESSAGE,
+ .flags = K3_RINGACC_RING_SHARED,
+ };
+ struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
+ .rx_cfg = rxring_cfg,
+ .rxfdq_cfg = fdqring_cfg,
+ .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
+ .src_tag_lo_sel =
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
+ };
+
+ rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
+ rx_flow_cfg.rx_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
+
+ ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
+ i, &rx_flow_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
+ goto err;
+ }
+ if (!i)
+ fdqring_id =
+ k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
+ i);
+
+ rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+
+ if (rx_chn->irq <= 0) {
+ dev_err(dev, "Failed to get rx dma irq %d\n",
+ rx_chn->irq);
+ ret = -ENXIO;
+ goto err;
+ }
+ }
+
+err:
+ i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
+ if (i) {
+ dev_err(dev, "failed to add free_rx_chns action %d", i);
+ return i;
+ }
+
+ return ret;
+}
+
+static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
+
+ host_p->common = common;
+ host_p->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE;
+ host_p->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE;
+
+ return 0;
+}
+
+static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
+ int slave, u8 *mac_addr)
+{
+ u32 mac_lo, mac_hi, offset;
+ struct regmap *syscon;
+ int ret;
+
+ syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse");
+ if (IS_ERR(syscon)) {
+ if (PTR_ERR(syscon) == -ENODEV)
+ return 0;
+ return PTR_ERR(syscon);
+ }
+
+ ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1,
+ &offset);
+ if (ret)
+ return ret;
+
+ regmap_read(syscon, offset, &mac_lo);
+ regmap_read(syscon, offset + 4, &mac_hi);
+
+ mac_addr[0] = (mac_hi >> 8) & 0xff;
+ mac_addr[1] = mac_hi & 0xff;
+ mac_addr[2] = (mac_lo >> 24) & 0xff;
+ mac_addr[3] = (mac_lo >> 16) & 0xff;
+ mac_addr[4] = (mac_lo >> 8) & 0xff;
+ mac_addr[5] = mac_lo & 0xff;
+
+ return 0;
+}
+
+static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
+{
+ struct device_node *node, *port_np;
+ struct device *dev = common->dev;
+ int ret;
+
+ node = of_get_child_by_name(dev->of_node, "ethernet-ports");
+ if (!node)
+ return -ENOENT;
+
+ for_each_child_of_node(node, port_np) {
+ struct am65_cpsw_port *port;
+ const void *mac_addr;
+ u32 port_id;
+
+ /* it is not a slave port node, continue */
+ if (strcmp(port_np->name, "port"))
+ continue;
+
+ ret = of_property_read_u32(port_np, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "%pOF error reading port_id %d\n",
+ port_np, ret);
+ return ret;
+ }
+
+ if (!port_id || port_id > common->port_num) {
+ dev_err(dev, "%pOF has invalid port_id %u %s\n",
+ port_np, port_id, port_np->name);
+ return -EINVAL;
+ }
+
+ port = am65_common_get_port(common, port_id);
+ port->port_id = port_id;
+ port->common = common;
+ port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE +
+ AM65_CPSW_NU_PORTS_OFFSET * (port_id);
+ port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
+ (AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
+ port->name = of_get_property(port_np, "label", NULL);
+
+ port->disabled = !of_device_is_available(port_np);
+ if (port->disabled)
+ continue;
+
+ port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL);
+ if (IS_ERR(port->slave.ifphy)) {
+ ret = PTR_ERR(port->slave.ifphy);
+ dev_err(dev, "%pOF error retrieving port phy: %d\n",
+ port_np, ret);
+ return ret;
+ }
+
+ port->slave.mac_only =
+ of_property_read_bool(port_np, "ti,mac-only");
+
+ /* get phy/link info */
+ if (of_phy_is_fixed_link(port_np)) {
+ ret = of_phy_register_fixed_link(port_np);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
+ port_np, ret);
+ return ret;
+ }
+ port->slave.phy_node = of_node_get(port_np);
+ } else {
+ port->slave.phy_node =
+ of_parse_phandle(port_np, "phy-handle", 0);
+ }
+
+ if (!port->slave.phy_node) {
+ dev_err(dev,
+ "slave[%d] no phy found\n", port_id);
+ return -ENODEV;
+ }
+
+ ret = of_get_phy_mode(port_np, &port->slave.phy_if);
+ if (ret) {
+ dev_err(dev, "%pOF read phy-mode err %d\n",
+ port_np, ret);
+ return ret;
+ }
+
+ port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
+ if (IS_ERR(port->slave.mac_sl))
+ return PTR_ERR(port->slave.mac_sl);
+
+ mac_addr = of_get_mac_address(port_np);
+ if (!IS_ERR(mac_addr)) {
+ ether_addr_copy(port->slave.mac_addr, mac_addr);
+ } else if (am65_cpsw_am654_get_efuse_macid(port_np,
+ port->port_id,
+ port->slave.mac_addr) ||
+ !is_valid_ether_addr(port->slave.mac_addr)) {
+ random_ether_addr(port->slave.mac_addr);
+ dev_err(dev, "Use random MAC address\n");
+ }
+ }
+ of_node_put(node);
+
+ return 0;
+}
+
+static void am65_cpsw_pcpu_stats_free(void *data)
+{
+ struct am65_cpsw_ndev_stats __percpu *stats = data;
+
+ free_percpu(stats);
+}
+
+static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_ndev_priv *ndev_priv;
+ struct device *dev = common->dev;
+ struct am65_cpsw_port *port;
+ int ret;
+
+ port = am65_common_get_port(common, 1);
+
+ /* alloc netdev */
+ port->ndev = devm_alloc_etherdev_mqs(common->dev,
+ sizeof(struct am65_cpsw_ndev_priv),
+ AM65_CPSW_MAX_TX_QUEUES,
+ AM65_CPSW_MAX_RX_QUEUES);
+ if (!port->ndev) {
+ dev_err(dev, "error allocating slave net_device %u\n",
+ port->port_id);
+ return -ENOMEM;
+ }
+
+ ndev_priv = netdev_priv(port->ndev);
+ ndev_priv->port = port;
+ ndev_priv->msg_enable = AM65_CPSW_DEBUG;
+ SET_NETDEV_DEV(port->ndev, dev);
+
+ ether_addr_copy(port->ndev->dev_addr, port->slave.mac_addr);
+
+ port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
+ port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
+ port->ndev->hw_features = NETIF_F_SG |
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM;
+ port->ndev->features = port->ndev->hw_features |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+ port->ndev->vlan_features |= NETIF_F_SG;
+ port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops_2g;
+ port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
+
+ /* Disable TX checksum offload by default due to HW bug */
+ if (common->pdata->quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
+ port->ndev->features &= ~NETIF_F_HW_CSUM;
+
+ ndev_priv->stats = netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats);
+ if (!ndev_priv->stats)
+ return -ENOMEM;
+
+ ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free,
+ ndev_priv->stats);
+ if (ret) {
+ dev_err(dev, "failed to add percpu stat free action %d", ret);
+ return ret;
+ }
+
+ netif_napi_add(port->ndev, &common->napi_rx,
+ am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT);
+
+ common->pf_p0_rx_ptype_rrobin = false;
+
+ return ret;
+}
+
+static int am65_cpsw_nuss_ndev_add_napi_2g(struct am65_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ struct am65_cpsw_port *port;
+ int i, ret = 0;
+
+ port = am65_common_get_port(common, 1);
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+
+ netif_tx_napi_add(port->ndev, &tx_chn->napi_tx,
+ am65_cpsw_nuss_tx_poll, NAPI_POLL_WEIGHT);
+
+ ret = devm_request_irq(dev, tx_chn->irq,
+ am65_cpsw_nuss_tx_irq,
+ 0, tx_chn->tx_chn_name, tx_chn);
+ if (ret) {
+ dev_err(dev, "failure requesting tx%u irq %u, %d\n",
+ tx_chn->id, tx_chn->irq, ret);
+ goto err;
+ }
+ }
+
+err:
+ return ret;
+}
+
+static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ struct am65_cpsw_port *port;
+ int ret = 0;
+
+ port = am65_common_get_port(common, 1);
+ ret = am65_cpsw_nuss_ndev_add_napi_2g(common);
+ if (ret)
+ goto err;
+
+ ret = devm_request_irq(dev, common->rx_chns.irq,
+ am65_cpsw_nuss_rx_irq,
+ 0, dev_name(dev), common);
+ if (ret) {
+ dev_err(dev, "failure requesting rx irq %u, %d\n",
+ common->rx_chns.irq, ret);
+ goto err;
+ }
+
+ ret = register_netdev(port->ndev);
+ if (ret)
+ dev_err(dev, "error registering slave net device %d\n", ret);
+
+ /* can't auto unregister ndev using devm_add_action() due to
+ * devres release sequence in DD core for DMA
+ */
+err:
+ return ret;
+}
+
+int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
+{
+ int ret;
+
+ common->tx_ch_num = num_tx;
+ ret = am65_cpsw_nuss_init_tx_chns(common);
+ if (ret)
+ return ret;
+
+ return am65_cpsw_nuss_ndev_add_napi_2g(common);
+}
+
+static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ if (port->ndev)
+ unregister_netdev(port->ndev);
+ }
+}
+
+static const struct am65_cpsw_pdata am65x_sr1_0 = {
+ .quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
+};
+
+static const struct am65_cpsw_pdata j721e_sr1_0 = {
+ .quirks = 0,
+};
+
+static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
+ { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0 },
+ { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_sr1_0 },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
+
+static int am65_cpsw_nuss_probe(struct platform_device *pdev)
+{
+ struct cpsw_ale_params ale_params;
+ const struct of_device_id *of_id;
+ struct device *dev = &pdev->dev;
+ struct am65_cpsw_common *common;
+ struct device_node *node;
+ struct resource *res;
+ int ret, i;
+
+ common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
+ if (!common)
+ return -ENOMEM;
+ common->dev = dev;
+
+ of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev);
+ if (!of_id)
+ return -EINVAL;
+ common->pdata = of_id->data;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss");
+ common->ss_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(common->ss_base))
+ return PTR_ERR(common->ss_base);
+ common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE;
+
+ node = of_get_child_by_name(dev->of_node, "ethernet-ports");
+ if (!node)
+ return -ENOENT;
+ common->port_num = of_get_child_count(node);
+ if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
+ return -ENOENT;
+ of_node_put(node);
+
+ if (common->port_num != 1)
+ return -EOPNOTSUPP;
+
+ common->rx_flow_id_base = -1;
+ init_completion(&common->tdown_complete);
+ common->tx_ch_num = 1;
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(dev, "error setting dma mask: %d\n", ret);
+ return ret;
+ }
+
+ common->ports = devm_kcalloc(dev, common->port_num,
+ sizeof(*common->ports),
+ GFP_KERNEL);
+ if (!common->ports)
+ return -ENOMEM;
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ /* We do not want to force this, as in some cases may not have child */
+ if (ret)
+ dev_warn(dev, "populating child nodes err:%d\n", ret);
+
+ am65_cpsw_nuss_get_ver(common);
+
+ /* init tx channels */
+ ret = am65_cpsw_nuss_init_tx_chns(common);
+ if (ret)
+ goto err_of_clear;
+ ret = am65_cpsw_nuss_init_rx_chns(common);
+ if (ret)
+ goto err_of_clear;
+
+ ret = am65_cpsw_nuss_init_host_p(common);
+ if (ret)
+ goto err_of_clear;
+
+ ret = am65_cpsw_nuss_init_slave_ports(common);
+ if (ret)
+ goto err_of_clear;
+
+ /* init common data */
+ ale_params.dev = dev;
+ ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
+ ale_params.ale_entries = 0;
+ ale_params.ale_ports = common->port_num + 1;
+ ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE;
+ ale_params.nu_switch_ale = true;
+
+ common->ale = cpsw_ale_create(&ale_params);
+ if (!common->ale) {
+ dev_err(dev, "error initializing ale engine\n");
+ goto err_of_clear;
+ }
+
+ /* init ports */
+ for (i = 0; i < common->port_num; i++)
+ am65_cpsw_nuss_slave_disable_unused(&common->ports[i]);
+
+ dev_set_drvdata(dev, common);
+
+ ret = am65_cpsw_nuss_init_ndev_2g(common);
+ if (ret)
+ goto err_of_clear;
+
+ ret = am65_cpsw_nuss_ndev_reg_2g(common);
+ if (ret)
+ goto err_of_clear;
+
+ pm_runtime_put(dev);
+ return 0;
+
+err_of_clear:
+ of_platform_depopulate(dev);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int am65_cpsw_nuss_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct am65_cpsw_common *common;
+ int ret;
+
+ common = dev_get_drvdata(dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return ret;
+ }
+
+ /* must unregister ndevs here because DD release_driver routine calls
+ * dma_deconfigure(dev) before devres_release_all(dev)
+ */
+ am65_cpsw_nuss_cleanup_ndev(common);
+
+ of_platform_depopulate(dev);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver am65_cpsw_nuss_driver = {
+ .driver = {
+ .name = AM65_CPSW_DRV_NAME,
+ .of_match_table = am65_cpsw_nuss_of_mtable,
+ },
+ .probe = am65_cpsw_nuss_probe,
+ .remove = am65_cpsw_nuss_remove,
+};
+
+module_platform_driver(am65_cpsw_nuss_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
+MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver");
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
new file mode 100644
index 000000000000..41ae5b4c7931
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ */
+
+#ifndef AM65_CPSW_NUSS_H_
+#define AM65_CPSW_NUSS_H_
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#define HOST_PORT_NUM 0
+
+#define AM65_CPSW_MAX_TX_QUEUES 8
+#define AM65_CPSW_MAX_RX_QUEUES 1
+#define AM65_CPSW_MAX_RX_FLOWS 1
+
+struct am65_cpsw_slave_data {
+ bool mac_only;
+ struct cpsw_sl *mac_sl;
+ struct device_node *phy_node;
+ struct phy_device *phy;
+ phy_interface_t phy_if;
+ struct phy *ifphy;
+ bool rx_pause;
+ bool tx_pause;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct am65_cpsw_port {
+ struct am65_cpsw_common *common;
+ struct net_device *ndev;
+ const char *name;
+ u32 port_id;
+ void __iomem *port_base;
+ void __iomem *stat_base;
+ bool disabled;
+ struct am65_cpsw_slave_data slave;
+};
+
+struct am65_cpsw_host {
+ struct am65_cpsw_common *common;
+ void __iomem *port_base;
+ void __iomem *stat_base;
+};
+
+struct am65_cpsw_tx_chn {
+ struct napi_struct napi_tx;
+ struct am65_cpsw_common *common;
+ struct k3_cppi_desc_pool *desc_pool;
+ struct k3_udma_glue_tx_channel *tx_chn;
+ int irq;
+ u32 id;
+ u32 descs_num;
+ char tx_chn_name[128];
+};
+
+struct am65_cpsw_rx_chn {
+ struct device *dev;
+ struct k3_cppi_desc_pool *desc_pool;
+ struct k3_udma_glue_rx_channel *rx_chn;
+ u32 descs_num;
+ int irq;
+};
+
+#define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0)
+
+struct am65_cpsw_pdata {
+ u32 quirks;
+};
+
+struct am65_cpsw_common {
+ struct device *dev;
+ const struct am65_cpsw_pdata *pdata;
+
+ void __iomem *ss_base;
+ void __iomem *cpsw_base;
+
+ u32 port_num;
+ struct am65_cpsw_host host;
+ struct am65_cpsw_port *ports;
+ u32 disabled_ports_mask;
+
+ int usage_count; /* number of opened ports */
+ struct cpsw_ale *ale;
+ int tx_ch_num;
+ u32 rx_flow_id_base;
+
+ struct am65_cpsw_tx_chn tx_chns[AM65_CPSW_MAX_TX_QUEUES];
+ struct completion tdown_complete;
+ atomic_t tdown_cnt;
+
+ struct am65_cpsw_rx_chn rx_chns;
+ struct napi_struct napi_rx;
+
+ u32 nuss_ver;
+ u32 cpsw_ver;
+
+ bool pf_p0_rx_ptype_rrobin;
+};
+
+struct am65_cpsw_ndev_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ struct u64_stats_sync syncp;
+};
+
+struct am65_cpsw_ndev_priv {
+ u32 msg_enable;
+ struct am65_cpsw_port *port;
+ struct am65_cpsw_ndev_stats __percpu *stats;
+};
+
+#define am65_ndev_to_priv(ndev) \
+ ((struct am65_cpsw_ndev_priv *)netdev_priv(ndev))
+#define am65_ndev_to_port(ndev) (am65_ndev_to_priv(ndev)->port)
+#define am65_ndev_to_common(ndev) (am65_ndev_to_port(ndev)->common)
+#define am65_ndev_to_slave(ndev) (&am65_ndev_to_port(ndev)->slave)
+
+#define am65_common_get_host(common) (&(common)->host)
+#define am65_common_get_port(common, id) (&(common)->ports[(id) - 1])
+
+#define am65_cpsw_napi_to_common(pnapi) \
+ container_of(pnapi, struct am65_cpsw_common, napi_rx)
+#define am65_cpsw_napi_to_tx_chn(pnapi) \
+ container_of(pnapi, struct am65_cpsw_tx_chn, napi_tx)
+
+#define AM65_CPSW_DRV_NAME "am65-cpsw-nuss"
+
+#define AM65_CPSW_IS_CPSW2G(common) ((common)->port_num == 1)
+
+extern const struct ethtool_ops am65_cpsw_ethtool_ops_slave;
+
+void am65_cpsw_nuss_adjust_link(struct net_device *ndev);
+void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common);
+void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common);
+int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx);
+
+#endif /* AM65_CPSW_NUSS_H_ */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 6ae4a72e6f43..c2c5bf87da01 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1211,6 +1211,7 @@ static int cpsw_set_channels(struct net_device *ndev,
}
static const struct ethtool_ops cpsw_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
.set_msglevel = cpsw_set_msglevel,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index ecdbde539eb7..0374e6936091 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -44,6 +44,8 @@
#define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS 0x9C
#define ALE_VLAN_MASK_MUX(reg) (0xc0 + (0x4 * (reg)))
+#define AM65_CPSW_ALE_THREAD_DEF_REG 0x134
+
#define ALE_TABLE_WRITE BIT(31)
#define ALE_TYPE_FREE 0
@@ -122,6 +124,8 @@ DEFINE_ALE_FIELD(mcast, 40, 1)
DEFINE_ALE_FIELD(vlan_unreg_mcast_idx, 20, 3)
DEFINE_ALE_FIELD(vlan_reg_mcast_idx, 44, 3)
+#define NU_VLAN_UNREG_MCAST_IDX 1
+
/* The MAC address field in the ALE entry cannot be macroized as above */
static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
{
@@ -455,6 +459,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port_mask, int untag,
cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
ale->vlan_field_bits);
} else {
+ cpsw_ale_set_vlan_unreg_mcast_idx(ale_entry,
+ NU_VLAN_UNREG_MCAST_IDX);
cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast);
}
cpsw_ale_set_vlan_member_list(ale_entry, port_mask,
@@ -775,6 +781,22 @@ static struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
.port_shift = 0,
.bits = 1,
},
+ [ALE_PORT_MACONLY] = {
+ .name = "mac_only_port_mode",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 11,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_PORT_MACONLY_CAF] = {
+ .name = "mac_only_port_caf",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 13,
+ .port_shift = 0,
+ .bits = 1,
+ },
[ALE_PORT_MCAST_LIMIT] = {
.name = "mcast_limit",
.offset = ALE_PORTCTL,
@@ -823,6 +845,22 @@ static struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
.port_shift = 0,
.bits = 6,
},
+ [ALE_DEFAULT_THREAD_ID] = {
+ .name = "default_thread_id",
+ .offset = AM65_CPSW_ALE_THREAD_DEF_REG,
+ .port_offset = 0,
+ .shift = 0,
+ .port_shift = 0,
+ .bits = 6,
+ },
+ [ALE_DEFAULT_THREAD_ENABLE] = {
+ .name = "default_thread_id_enable",
+ .offset = AM65_CPSW_ALE_THREAD_DEF_REG,
+ .port_offset = 0,
+ .shift = 15,
+ .port_shift = 0,
+ .bits = 1,
+ },
};
int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 70d0955c2652..6a3cb6898728 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -62,8 +62,12 @@ enum cpsw_ale_control {
ALE_PORT_UNKNOWN_MCAST_FLOOD,
ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
ALE_PORT_UNTAGGED_EGRESS,
+ ALE_PORT_MACONLY,
+ ALE_PORT_MACONLY_CAF,
ALE_PORT_BCAST_LIMIT,
ALE_PORT_MCAST_LIMIT,
+ ALE_DEFAULT_THREAD_ID,
+ ALE_DEFAULT_THREAD_ENABLE,
ALE_NUM_CONTROLS,
};
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 71215db7934b..9209e613257d 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1175,6 +1175,7 @@ static int cpsw_set_channels(struct net_device *ndev,
}
static const struct ethtool_ops cpsw_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
.set_msglevel = cpsw_set_msglevel,
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 75d4e16c692b..de282531f68b 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -481,6 +481,7 @@ static int emac_set_coalesce(struct net_device *ndev,
* Ethtool support for EMAC adapter
*/
static const struct ethtool_ops ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = emac_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_coalesce = emac_get_coalesce,
diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
new file mode 100644
index 000000000000..ad7cfc1316ce
--- /dev/null
+++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/* TI K3 CPPI5 descriptors pool API
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/kernel.h>
+
+#include "k3-cppi-desc-pool.h"
+
+struct k3_cppi_desc_pool {
+ struct device *dev;
+ dma_addr_t dma_addr;
+ void *cpumem; /* dma_alloc map */
+ size_t desc_size;
+ size_t mem_size;
+ size_t num_desc;
+ struct gen_pool *gen_pool;
+};
+
+void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool)
+{
+ if (!pool)
+ return;
+
+ WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
+ "k3_knav_desc_pool size %zu != avail %zu",
+ gen_pool_size(pool->gen_pool),
+ gen_pool_avail(pool->gen_pool));
+ if (pool->cpumem)
+ dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem,
+ pool->dma_addr);
+
+ gen_pool_destroy(pool->gen_pool); /* frees pool->name */
+}
+
+struct k3_cppi_desc_pool *
+k3_cppi_desc_pool_create_name(struct device *dev, size_t size,
+ size_t desc_size,
+ const char *name)
+{
+ struct k3_cppi_desc_pool *pool;
+ const char *pool_name = NULL;
+ int ret = -ENOMEM;
+
+ pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(ret);
+
+ pool->dev = dev;
+ pool->desc_size = roundup_pow_of_two(desc_size);
+ pool->num_desc = size;
+ pool->mem_size = pool->num_desc * pool->desc_size;
+
+ pool_name = kstrdup_const(name ? name : dev_name(pool->dev),
+ GFP_KERNEL);
+ if (!pool_name)
+ return ERR_PTR(-ENOMEM);
+
+ pool->gen_pool = gen_pool_create(ilog2(pool->desc_size), -1);
+ if (IS_ERR(pool->gen_pool)) {
+ ret = PTR_ERR(pool->gen_pool);
+ dev_err(pool->dev, "pool create failed %d\n", ret);
+ kfree_const(pool_name);
+ goto gen_pool_create_fail;
+ }
+
+ pool->gen_pool->name = pool_name;
+
+ pool->cpumem = dma_alloc_coherent(pool->dev, pool->mem_size,
+ &pool->dma_addr, GFP_KERNEL);
+
+ if (!pool->cpumem)
+ goto dma_alloc_fail;
+
+ ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->cpumem,
+ (phys_addr_t)pool->dma_addr, pool->mem_size,
+ -1);
+ if (ret < 0) {
+ dev_err(pool->dev, "pool add failed %d\n", ret);
+ goto gen_pool_add_virt_fail;
+ }
+
+ return pool;
+
+gen_pool_add_virt_fail:
+ dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem,
+ pool->dma_addr);
+dma_alloc_fail:
+ gen_pool_destroy(pool->gen_pool); /* frees pool->name */
+gen_pool_create_fail:
+ devm_kfree(pool->dev, pool);
+ return ERR_PTR(ret);
+}
+
+dma_addr_t k3_cppi_desc_pool_virt2dma(struct k3_cppi_desc_pool *pool,
+ void *addr)
+{
+ return addr ? pool->dma_addr + (addr - pool->cpumem) : 0;
+}
+
+void *k3_cppi_desc_pool_dma2virt(struct k3_cppi_desc_pool *pool, dma_addr_t dma)
+{
+ return dma ? pool->cpumem + (dma - pool->dma_addr) : NULL;
+}
+
+void *k3_cppi_desc_pool_alloc(struct k3_cppi_desc_pool *pool)
+{
+ return (void *)gen_pool_alloc(pool->gen_pool, pool->desc_size);
+}
+
+void k3_cppi_desc_pool_free(struct k3_cppi_desc_pool *pool, void *addr)
+{
+ gen_pool_free(pool->gen_pool, (unsigned long)addr, pool->desc_size);
+}
+
+size_t k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool)
+{
+ return gen_pool_avail(pool->gen_pool) / pool->desc_size;
+}
diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.h b/drivers/net/ethernet/ti/k3-cppi-desc-pool.h
new file mode 100644
index 000000000000..a7e3fa5e7b62
--- /dev/null
+++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* TI K3 CPPI5 descriptors pool
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_CPPI_DESC_POOL_H_
+#define K3_CPPI_DESC_POOL_H_
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+struct k3_cppi_desc_pool;
+
+void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool);
+struct k3_cppi_desc_pool *
+k3_cppi_desc_pool_create_name(struct device *dev, size_t size,
+ size_t desc_size,
+ const char *name);
+#define k3_cppi_desc_pool_create(dev, size, desc_size) \
+ k3_cppi_desc_pool_create_name(dev, size, desc_size, NULL)
+dma_addr_t
+k3_cppi_desc_pool_virt2dma(struct k3_cppi_desc_pool *pool, void *addr);
+void *
+k3_cppi_desc_pool_dma2virt(struct k3_cppi_desc_pool *pool, dma_addr_t dma);
+void *k3_cppi_desc_pool_alloc(struct k3_cppi_desc_pool *pool);
+void k3_cppi_desc_pool_free(struct k3_cppi_desc_pool *pool, void *addr);
+size_t k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool);
+
+#endif /* K3_CPPI_DESC_POOL_H_ */
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 805903dbddcc..68f324ed4eaf 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -308,7 +308,7 @@ struct gelic_port {
struct gelic_card *card;
struct net_device *netdev;
enum gelic_port_type type;
- long priv[0]; /* long for alignment */
+ long priv[]; /* long for alignment */
};
static inline struct gelic_card *port_to_card(struct gelic_port *p)
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
index 4041d946b649..1f203d1ae8db 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
@@ -158,7 +158,7 @@ struct gelic_eurus_scan_info {
__be32 reserved2;
__be32 reserved3;
__be32 reserved4;
- u8 elements[0]; /* ie */
+ u8 elements[]; /* ie */
} __packed;
/* the hypervisor returns bbs up to 16 */
diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
index c0c68cbc898c..05b1a0736835 100644
--- a/drivers/net/ethernet/toshiba/spider_net.h
+++ b/drivers/net/ethernet/toshiba/spider_net.h
@@ -470,7 +470,7 @@ struct spider_net_card {
struct spider_net_extra_stats spider_stats;
/* Must be last item in struct */
- struct spider_net_descr darray[0];
+ struct spider_net_descr darray[];
};
#endif
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 3fd43d30b20d..b50c3ec3495b 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -367,7 +367,7 @@ struct TxFD {
struct RxFD {
struct FDesc fd;
- struct BDesc bd[0]; /* variable length */
+ struct BDesc bd[]; /* variable length */
};
struct FrFD {
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 4b556b74541a..713dbc04b25b 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -3648,6 +3648,8 @@ static void velocity_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops velocity_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = velocity_get_drvinfo,
.get_wol = velocity_ethtool_get_wol,
.set_wol = velocity_ethtool_set_wol,
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 6304ebd8b5c6..0810af8193cb 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -32,7 +32,6 @@ config XILINX_AXI_EMAC
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
- depends on PPC || MICROBLAZE || X86 || COMPILE_TEST
select PHYLIB
---help---
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 53fb8141f1a6..4a73127e10a6 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -369,18 +369,20 @@ struct temac_local {
/* Buffer descriptors */
struct cdmac_bd *tx_bd_v;
dma_addr_t tx_bd_p;
+ u32 tx_bd_num;
struct cdmac_bd *rx_bd_v;
dma_addr_t rx_bd_p;
+ u32 rx_bd_num;
int tx_bd_ci;
- int tx_bd_next;
int tx_bd_tail;
int rx_bd_ci;
int rx_bd_tail;
/* DMA channel control setup */
- u32 tx_chnl_ctrl;
- u32 rx_chnl_ctrl;
+ u8 coalesce_count_tx;
+ u8 coalesce_delay_tx;
u8 coalesce_count_rx;
+ u8 coalesce_delay_rx;
struct delayed_work restart_work;
};
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9461acec6f70..3e313e71ae36 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -58,8 +58,11 @@
#include "ll_temac.h"
-#define TX_BD_NUM 64
-#define RX_BD_NUM 128
+/* Descriptors defines for Tx and Rx DMA */
+#define TX_BD_NUM_DEFAULT 64
+#define RX_BD_NUM_DEFAULT 1024
+#define TX_BD_NUM_MAX 4096
+#define RX_BD_NUM_MAX 4096
/* ---------------------------------------------------------------------
* Low level register access functions
@@ -301,7 +304,7 @@ static void temac_dma_bd_release(struct net_device *ndev)
/* Reset Local Link (DMA) */
lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
if (!lp->rx_skb[i])
break;
else {
@@ -312,12 +315,12 @@ static void temac_dma_bd_release(struct net_device *ndev)
}
if (lp->rx_bd_v)
dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- lp->rx_bd_v, lp->rx_bd_p);
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
+ lp->rx_bd_v, lp->rx_bd_p);
if (lp->tx_bd_v)
dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- lp->tx_bd_v, lp->tx_bd_p);
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
+ lp->tx_bd_v, lp->tx_bd_p);
}
/**
@@ -330,33 +333,33 @@ static int temac_dma_bd_init(struct net_device *ndev)
dma_addr_t skb_dma_addr;
int i;
- lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb),
- GFP_KERNEL);
+ lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num,
+ sizeof(*lp->rx_skb), GFP_KERNEL);
if (!lp->rx_skb)
goto out;
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
&lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
- for (i = 0; i < TX_BD_NUM; i++) {
+ for (i = 0; i < lp->tx_bd_num; i++) {
lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
- + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM));
+ + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num));
}
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
- + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM));
+ + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
@@ -376,21 +379,22 @@ static int temac_dma_bd_init(struct net_device *ndev)
}
/* Configure DMA channel (irq setup) */
- lp->dma_out(lp, TX_CHNL_CTRL, lp->tx_chnl_ctrl |
+ lp->dma_out(lp, TX_CHNL_CTRL,
+ lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 |
0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
- lp->dma_out(lp, RX_CHNL_CTRL, lp->rx_chnl_ctrl |
+ lp->dma_out(lp, RX_CHNL_CTRL,
+ lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 |
CHNL_CTRL_IRQ_IOE |
CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
/* Init descriptor indexes */
lp->tx_bd_ci = 0;
- lp->tx_bd_next = 0;
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
- lp->rx_bd_tail = RX_BD_NUM - 1;
+ lp->rx_bd_tail = lp->rx_bd_num - 1;
/* Enable RX DMA transfers */
wmb();
@@ -785,7 +789,7 @@ static void temac_start_xmit_done(struct net_device *ndev)
ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
lp->tx_bd_ci++;
- if (lp->tx_bd_ci >= TX_BD_NUM)
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
lp->tx_bd_ci = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
@@ -811,7 +815,7 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
return NETDEV_TX_BUSY;
tail++;
- if (tail >= TX_BD_NUM)
+ if (tail >= lp->tx_bd_num)
tail = 0;
cur_p = &lp->tx_bd_v[tail];
@@ -826,14 +830,13 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct cdmac_bd *cur_p;
- dma_addr_t start_p, tail_p, skb_dma_addr;
+ dma_addr_t tail_p, skb_dma_addr;
int ii;
unsigned long num_frag;
skb_frag_t *frag;
num_frag = skb_shinfo(skb)->nr_frags;
frag = &skb_shinfo(skb)->frags[0];
- start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (temac_check_tx_bd_space(lp, num_frag + 1)) {
@@ -876,7 +879,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ptr_to_txbd((void *)skb, cur_p);
for (ii = 0; ii < num_frag; ii++) {
- if (++lp->tx_bd_tail >= TX_BD_NUM)
+ if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@@ -886,7 +889,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
if (--lp->tx_bd_tail < 0)
- lp->tx_bd_tail = TX_BD_NUM - 1;
+ lp->tx_bd_tail = lp->tx_bd_num - 1;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
while (--ii >= 0) {
--frag;
@@ -895,7 +898,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_frag_size(frag),
DMA_TO_DEVICE);
if (--lp->tx_bd_tail < 0)
- lp->tx_bd_tail = TX_BD_NUM - 1;
+ lp->tx_bd_tail = lp->tx_bd_num - 1;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
}
dma_unmap_single(ndev->dev.parent,
@@ -914,7 +917,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
lp->tx_bd_tail++;
- if (lp->tx_bd_tail >= TX_BD_NUM)
+ if (lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
skb_tx_timestamp(skb);
@@ -934,7 +937,7 @@ static int ll_temac_recv_buffers_available(struct temac_local *lp)
return 0;
available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
if (available <= 0)
- available += RX_BD_NUM;
+ available += lp->rx_bd_num;
return available;
}
@@ -1003,7 +1006,7 @@ static void ll_temac_recv(struct net_device *ndev)
ndev->stats.rx_bytes += length;
rx_bd = lp->rx_bd_ci;
- if (++lp->rx_bd_ci >= RX_BD_NUM)
+ if (++lp->rx_bd_ci >= lp->rx_bd_num)
lp->rx_bd_ci = 0;
} while (rx_bd != lp->rx_bd_tail);
@@ -1034,7 +1037,7 @@ static void ll_temac_recv(struct net_device *ndev)
dma_addr_t skb_dma_addr;
rx_bd = lp->rx_bd_tail + 1;
- if (rx_bd >= RX_BD_NUM)
+ if (rx_bd >= lp->rx_bd_num)
rx_bd = 0;
bd = &lp->rx_bd_v[rx_bd];
@@ -1250,13 +1253,96 @@ static const struct attribute_group temac_attr_group = {
.attrs = temac_device_attrs,
};
-/* ethtool support */
+/* ---------------------------------------------------------------------
+ * ethtool support
+ */
+
+static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ ering->rx_max_pending = RX_BD_NUM_MAX;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+ ering->tx_max_pending = TX_BD_NUM_MAX;
+ ering->rx_pending = lp->rx_bd_num;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+ ering->tx_pending = lp->tx_bd_num;
+}
+
+static int ll_temac_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ if (ering->rx_pending > RX_BD_NUM_MAX ||
+ ering->rx_mini_pending ||
+ ering->rx_jumbo_pending ||
+ ering->rx_pending > TX_BD_NUM_MAX)
+ return -EINVAL;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ lp->rx_bd_num = ering->rx_pending;
+ lp->tx_bd_num = ering->tx_pending;
+ return 0;
+}
+
+static int ll_temac_ethtools_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ ec->rx_max_coalesced_frames = lp->coalesce_count_rx;
+ ec->tx_max_coalesced_frames = lp->coalesce_count_tx;
+ ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100;
+ ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100;
+ return 0;
+}
+
+static int ll_temac_ethtools_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ netdev_err(ndev,
+ "Please stop netif before applying configuration\n");
+ return -EFAULT;
+ }
+
+ if (ec->rx_max_coalesced_frames)
+ lp->coalesce_count_rx = ec->rx_max_coalesced_frames;
+ if (ec->tx_max_coalesced_frames)
+ lp->coalesce_count_tx = ec->tx_max_coalesced_frames;
+ /* With typical LocalLink clock speed of 200 MHz and
+ * C_PRESCALAR=1023, each delay count corresponds to 5.12 us.
+ */
+ if (ec->rx_coalesce_usecs)
+ lp->coalesce_delay_rx =
+ min(255U, (ec->rx_coalesce_usecs * 100) / 512);
+ if (ec->tx_coalesce_usecs)
+ lp->coalesce_delay_tx =
+ min(255U, (ec->tx_coalesce_usecs * 100) / 512);
+
+ return 0;
+}
+
static const struct ethtool_ops temac_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_ringparam = ll_temac_ethtools_get_ringparam,
+ .set_ringparam = ll_temac_ethtools_set_ringparam,
+ .get_coalesce = ll_temac_ethtools_get_coalesce,
+ .set_coalesce = ll_temac_ethtools_set_coalesce,
};
static int temac_probe(struct platform_device *pdev)
@@ -1300,6 +1386,8 @@ static int temac_probe(struct platform_device *pdev)
lp->ndev = ndev;
lp->dev = &pdev->dev;
lp->options = XTE_OPTION_DEFAULTS;
+ lp->rx_bd_num = RX_BD_NUM_DEFAULT;
+ lp->tx_bd_num = TX_BD_NUM_DEFAULT;
spin_lock_init(&lp->rx_lock);
INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
@@ -1364,6 +1452,14 @@ static int temac_probe(struct platform_device *pdev)
/* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
+ /* Defaults for IRQ delay/coalescing setup. These are
+ * configuration values, so does not belong in device-tree.
+ */
+ lp->coalesce_delay_tx = 0x10;
+ lp->coalesce_count_tx = 0x22;
+ lp->coalesce_delay_rx = 0xff;
+ lp->coalesce_count_rx = 0x07;
+
/* Setup LocalLink DMA */
if (temac_np) {
/* Find the DMA node, map the DMA registers, and
@@ -1402,14 +1498,6 @@ static int temac_probe(struct platform_device *pdev)
lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
- /* Use defaults for IRQ delay/coalescing setup. These
- * are configuration values, so does not belong in
- * device-tree.
- */
- lp->tx_chnl_ctrl = 0x10220000;
- lp->rx_chnl_ctrl = 0xff070000;
- lp->coalesce_count_rx = 0x07;
-
/* Finished with the DMA node; drop the reference */
of_node_put(dma_np);
} else if (pdata) {
@@ -1435,18 +1523,13 @@ static int temac_probe(struct platform_device *pdev)
lp->tx_irq = platform_get_irq(pdev, 1);
/* IRQ delay/coalescing setup */
- if (pdata->tx_irq_timeout || pdata->tx_irq_count)
- lp->tx_chnl_ctrl = (pdata->tx_irq_timeout << 24) |
- (pdata->tx_irq_count << 16);
- else
- lp->tx_chnl_ctrl = 0x10220000;
+ if (pdata->tx_irq_timeout || pdata->tx_irq_count) {
+ lp->coalesce_delay_tx = pdata->tx_irq_timeout;
+ lp->coalesce_count_tx = pdata->tx_irq_count;
+ }
if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
- lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) |
- (pdata->rx_irq_count << 16);
+ lp->coalesce_delay_rx = pdata->rx_irq_timeout;
lp->coalesce_count_rx = pdata->rx_irq_count;
- } else {
- lp->rx_chnl_ctrl = 0xff070000;
- lp->coalesce_count_rx = 0x07;
}
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 2dacfc85b3ba..fbaf3c987d9c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -161,17 +161,11 @@
#define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */
#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */
#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */
+#define XAE_ID_OFFSET 0x000004F8 /* Identification register */
#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */
#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */
#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
-#define XAE_MDIO_MIS_OFFSET 0x00000600 /* MII Management Interrupt Status */
-/* MII Mgmt Interrupt Pending register offset */
-#define XAE_MDIO_MIP_OFFSET 0x00000620
-/* MII Management Interrupt Enable register offset */
-#define XAE_MDIO_MIE_OFFSET 0x00000640
-/* MII Management Interrupt Clear register offset. */
-#define XAE_MDIO_MIC_OFFSET 0x00000660
#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
@@ -335,6 +329,7 @@
#define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1)
#define XAE_FEATURE_FULL_RX_CSUM (1 << 2)
#define XAE_FEATURE_FULL_TX_CSUM (1 << 3)
+#define XAE_FEATURE_DMA_64BIT (1 << 4)
#define XAE_NO_CSUM_OFFLOAD 0
@@ -347,9 +342,9 @@
/**
* struct axidma_bd - Axi Dma buffer descriptor layout
* @next: MM2S/S2MM Next Descriptor Pointer
- * @reserved1: Reserved and not used
+ * @next_msb: MM2S/S2MM Next Descriptor Pointer (high 32 bits)
* @phys: MM2S/S2MM Buffer Address
- * @reserved2: Reserved and not used
+ * @phys_msb: MM2S/S2MM Buffer Address (high 32 bits)
* @reserved3: Reserved and not used
* @reserved4: Reserved and not used
* @cntrl: MM2S/S2MM Control value
@@ -362,9 +357,9 @@
*/
struct axidma_bd {
u32 next; /* Physical address of next buffer descriptor */
- u32 reserved1;
+ u32 next_msb; /* high 32 bits for IP >= v7.1, reserved on older IP */
u32 phys;
- u32 reserved2;
+ u32 phys_msb; /* for IP >= v7.1, reserved for older IP */
u32 reserved3;
u32 reserved4;
u32 cntrl;
@@ -435,7 +430,7 @@ struct axienet_local {
void __iomem *regs;
void __iomem *dma_regs;
- struct tasklet_struct dma_err_tasklet;
+ struct work_struct dma_err_task;
int tx_irq;
int rx_irq;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 20746b801959..fa5dc2993520 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -147,6 +147,34 @@ static inline void axienet_dma_out32(struct axienet_local *lp,
iowrite32(value, lp->dma_regs + reg);
}
+static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
+ dma_addr_t addr)
+{
+ axienet_dma_out32(lp, reg, lower_32_bits(addr));
+
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ axienet_dma_out32(lp, reg + 4, upper_32_bits(addr));
+}
+
+static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
+ struct axidma_bd *desc)
+{
+ desc->phys = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ desc->phys_msb = upper_32_bits(addr);
+}
+
+static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
+ struct axidma_bd *desc)
+{
+ dma_addr_t ret = desc->phys;
+
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
+
+ return ret;
+}
+
/**
* axienet_dma_bd_release - Release buffer descriptor rings
* @ndev: Pointer to the net_device structure
@@ -160,24 +188,41 @@ static void axienet_dma_bd_release(struct net_device *ndev)
int i;
struct axienet_local *lp = netdev_priv(ndev);
+ /* If we end up here, tx_bd_v must have been DMA allocated. */
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
+ lp->tx_bd_v,
+ lp->tx_bd_p);
+
+ if (!lp->rx_bd_v)
+ return;
+
for (i = 0; i < lp->rx_bd_num; i++) {
- dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
- lp->max_frm_size, DMA_FROM_DEVICE);
+ dma_addr_t phys;
+
+ /* A NULL skb means this descriptor has not been initialised
+ * at all.
+ */
+ if (!lp->rx_bd_v[i].skb)
+ break;
+
dev_kfree_skb(lp->rx_bd_v[i].skb);
- }
- if (lp->rx_bd_v) {
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
- lp->rx_bd_v,
- lp->rx_bd_p);
- }
- if (lp->tx_bd_v) {
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
- lp->tx_bd_v,
- lp->tx_bd_p);
+ /* For each descriptor, we programmed cntrl with the (non-zero)
+ * descriptor size, after it had been successfully allocated.
+ * So a non-zero value in there means we need to unmap it.
+ */
+ if (lp->rx_bd_v[i].cntrl) {
+ phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
+ dma_unmap_single(ndev->dev.parent, phys,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ }
}
+
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
+ lp->rx_bd_v,
+ lp->rx_bd_p);
}
/**
@@ -207,7 +252,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
- goto out;
+ return -ENOMEM;
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
@@ -216,25 +261,37 @@ static int axienet_dma_bd_init(struct net_device *ndev)
goto out;
for (i = 0; i < lp->tx_bd_num; i++) {
- lp->tx_bd_v[i].next = lp->tx_bd_p +
- sizeof(*lp->tx_bd_v) *
- ((i + 1) % lp->tx_bd_num);
+ dma_addr_t addr = lp->tx_bd_p +
+ sizeof(*lp->tx_bd_v) *
+ ((i + 1) % lp->tx_bd_num);
+
+ lp->tx_bd_v[i].next = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
}
for (i = 0; i < lp->rx_bd_num; i++) {
- lp->rx_bd_v[i].next = lp->rx_bd_p +
- sizeof(*lp->rx_bd_v) *
- ((i + 1) % lp->rx_bd_num);
+ dma_addr_t addr;
+
+ addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
+ ((i + 1) % lp->rx_bd_num);
+ lp->rx_bd_v[i].next = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
if (!skb)
goto out;
lp->rx_bd_v[i].skb = skb;
- lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
- skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
+ addr = dma_map_single(ndev->dev.parent, skb->data,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, addr)) {
+ netdev_err(ndev, "DMA mapping error\n");
+ goto out;
+ }
+ desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
+
lp->rx_bd_v[i].cntrl = lp->max_frm_size;
}
@@ -267,18 +324,18 @@ static int axienet_dma_bd_init(struct net_device *ndev)
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
- axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
* tail pointer register that the Tx channel will start transmitting.
*/
- axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
@@ -437,9 +494,10 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
}
-static void __axienet_device_reset(struct axienet_local *lp)
+static int __axienet_device_reset(struct axienet_local *lp)
{
u32 timeout;
+
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
* commands/transfers will be flushed or completed during this
@@ -455,9 +513,11 @@ static void __axienet_device_reset(struct axienet_local *lp)
if (--timeout == 0) {
netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
__func__);
- break;
+ return -ETIMEDOUT;
}
}
+
+ return 0;
}
/**
@@ -470,13 +530,17 @@ static void __axienet_device_reset(struct axienet_local *lp)
* areconnected to Axi Ethernet reset lines, this in turn resets the Axi
* Ethernet core. No separate hardware reset is done for the Axi Ethernet
* core.
+ * Returns 0 on success or a negative error number otherwise.
*/
-static void axienet_device_reset(struct net_device *ndev)
+static int axienet_device_reset(struct net_device *ndev)
{
u32 axienet_status;
struct axienet_local *lp = netdev_priv(ndev);
+ int ret;
- __axienet_device_reset(lp);
+ ret = __axienet_device_reset(lp);
+ if (ret)
+ return ret;
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
lp->options |= XAE_OPTION_VLAN;
@@ -491,9 +555,11 @@ static void axienet_device_reset(struct net_device *ndev)
lp->options |= XAE_OPTION_JUMBO;
}
- if (axienet_dma_bd_init(ndev)) {
+ ret = axienet_dma_bd_init(ndev);
+ if (ret) {
netdev_err(ndev, "%s: descriptor allocation failed\n",
__func__);
+ return ret;
}
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
@@ -518,36 +584,54 @@ static void axienet_device_reset(struct net_device *ndev)
axienet_setoptions(ndev, lp->options);
netif_trans_update(ndev);
+
+ return 0;
}
/**
- * axienet_start_xmit_done - Invoked once a transmit is completed by the
- * Axi DMA Tx channel.
+ * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
* @ndev: Pointer to the net_device structure
+ * @first_bd: Index of first descriptor to clean up
+ * @nr_bds: Number of descriptors to clean up, can be -1 if unknown.
+ * @sizep: Pointer to a u32 filled with the total sum of all bytes
+ * in all cleaned-up descriptors. Ignored if NULL.
*
- * This function is invoked from the Axi DMA Tx isr to notify the completion
- * of transmit operation. It clears fields in the corresponding Tx BDs and
- * unmaps the corresponding buffer so that CPU can regain ownership of the
- * buffer. It finally invokes "netif_wake_queue" to restart transmission if
- * required.
+ * Would either be called after a successful transmit operation, or after
+ * there was an error when setting up the chain.
+ * Returns the number of descriptors handled.
*/
-static void axienet_start_xmit_done(struct net_device *ndev)
+static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
+ int nr_bds, u32 *sizep)
{
- u32 size = 0;
- u32 packets = 0;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
- unsigned int status = 0;
-
- cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
- status = cur_p->status;
- while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
- DMA_TO_DEVICE);
- if (cur_p->skb)
+ int max_bds = nr_bds;
+ unsigned int status;
+ dma_addr_t phys;
+ int i;
+
+ if (max_bds == -1)
+ max_bds = lp->tx_bd_num;
+
+ for (i = 0; i < max_bds; i++) {
+ cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
+ status = cur_p->status;
+
+ /* If no number is given, clean up *all* descriptors that have
+ * been completed by the MAC.
+ */
+ if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
+ break;
+
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(ndev->dev.parent, phys,
+ (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
+
+ if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
dev_consume_skb_irq(cur_p->skb);
- /*cur_p->phys = 0;*/
+
+ cur_p->cntrl = 0;
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
@@ -555,15 +639,36 @@ static void axienet_start_xmit_done(struct net_device *ndev)
cur_p->status = 0;
cur_p->skb = NULL;
- size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
- packets++;
-
- if (++lp->tx_bd_ci >= lp->tx_bd_num)
- lp->tx_bd_ci = 0;
- cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
- status = cur_p->status;
+ if (sizep)
+ *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
}
+ return i;
+}
+
+/**
+ * axienet_start_xmit_done - Invoked once a transmit is completed by the
+ * Axi DMA Tx channel.
+ * @ndev: Pointer to the net_device structure
+ *
+ * This function is invoked from the Axi DMA Tx isr to notify the completion
+ * of transmit operation. It clears fields in the corresponding Tx BDs and
+ * unmaps the corresponding buffer so that CPU can regain ownership of the
+ * buffer. It finally invokes "netif_wake_queue" to restart transmission if
+ * required.
+ */
+static void axienet_start_xmit_done(struct net_device *ndev)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ u32 packets = 0;
+ u32 size = 0;
+
+ packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size);
+
+ lp->tx_bd_ci += packets;
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
+ lp->tx_bd_ci -= lp->tx_bd_num;
+
ndev->stats.tx_packets += packets;
ndev->stats.tx_bytes += size;
@@ -617,9 +722,10 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 csum_start_off;
u32 csum_index_off;
skb_frag_t *frag;
- dma_addr_t tail_p;
+ dma_addr_t tail_p, phys;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
+ u32 orig_tail_ptr = lp->tx_bd_tail;
num_frag = skb_shinfo(skb)->nr_frags;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@@ -655,19 +761,37 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
}
+ phys = dma_map_single(ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "TX DMA mapping error\n");
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
- cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
for (ii = 0; ii < num_frag; ii++) {
if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
- cur_p->phys = dma_map_single(ndev->dev.parent,
- skb_frag_address(frag),
- skb_frag_size(frag),
- DMA_TO_DEVICE);
+ phys = dma_map_single(ndev->dev.parent,
+ skb_frag_address(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "TX DMA mapping error\n");
+ ndev->stats.tx_dropped++;
+ axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
+ NULL);
+ lp->tx_bd_tail = orig_tail_ptr;
+
+ return NETDEV_TX_OK;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = skb_frag_size(frag);
}
@@ -676,7 +800,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
/* Start the transfer */
- axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
+ axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
@@ -706,10 +830,12 @@ static void axienet_recv(struct net_device *ndev)
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ dma_addr_t phys;
+
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- lp->max_frm_size,
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
DMA_FROM_DEVICE);
skb = cur_p->skb;
@@ -745,9 +871,17 @@ static void axienet_recv(struct net_device *ndev)
if (!new_skb)
return;
- cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
+ phys = dma_map_single(ndev->dev.parent, new_skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "RX DMA mapping error\n");
+ dev_kfree_skb(new_skb);
+ return;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
+
cur_p->cntrl = lp->max_frm_size;
cur_p->status = 0;
cur_p->skb = new_skb;
@@ -761,7 +895,7 @@ static void axienet_recv(struct net_device *ndev)
ndev->stats.rx_bytes += size;
if (tail_p)
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
}
/**
@@ -791,7 +925,8 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
+ dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
+ (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
(lp->tx_bd_v[lp->tx_bd_ci]).phys);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
@@ -806,7 +941,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
/* Write to the Rx channel control register */
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
- tasklet_schedule(&lp->dma_err_tasklet);
+ schedule_work(&lp->dma_err_task);
axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
}
out:
@@ -840,7 +975,8 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
+ dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
+ (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
(lp->rx_bd_v[lp->rx_bd_ci]).phys);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
@@ -855,7 +991,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
/* write to the Rx channel control register */
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
- tasklet_schedule(&lp->dma_err_tasklet);
+ schedule_work(&lp->dma_err_task);
axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
}
out:
@@ -891,7 +1027,7 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
return IRQ_HANDLED;
}
-static void axienet_dma_err_handler(unsigned long data);
+static void axienet_dma_err_handler(struct work_struct *work);
/**
* axienet_open - Driver open routine.
@@ -921,8 +1057,9 @@ static int axienet_open(struct net_device *ndev)
*/
mutex_lock(&lp->mii_bus->mdio_lock);
axienet_mdio_disable(lp);
- axienet_device_reset(ndev);
- ret = axienet_mdio_enable(lp);
+ ret = axienet_device_reset(ndev);
+ if (ret == 0)
+ ret = axienet_mdio_enable(lp);
mutex_unlock(&lp->mii_bus->mdio_lock);
if (ret < 0)
return ret;
@@ -935,9 +1072,8 @@ static int axienet_open(struct net_device *ndev)
phylink_start(lp->phylink);
- /* Enable tasklets for Axi DMA error handling */
- tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
- (unsigned long) lp);
+ /* Enable worker thread for Axi DMA error handling */
+ INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
/* Enable interrupts for Axi DMA Tx */
ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
@@ -966,7 +1102,7 @@ err_rx_irq:
err_tx_irq:
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
- tasklet_kill(&lp->dma_err_tasklet);
+ cancel_work_sync(&lp->dma_err_task);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
}
@@ -1025,7 +1161,7 @@ static int axienet_stop(struct net_device *ndev)
axienet_mdio_enable(lp);
mutex_unlock(&lp->mii_bus->mdio_lock);
- tasklet_kill(&lp->dma_err_tasklet);
+ cancel_work_sync(&lp->dma_err_task);
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
@@ -1083,6 +1219,16 @@ static void axienet_poll_controller(struct net_device *ndev)
}
#endif
+static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phylink_mii_ioctl(lp->phylink, rq, cmd);
+}
+
static const struct net_device_ops axienet_netdev_ops = {
.ndo_open = axienet_open,
.ndo_stop = axienet_stop,
@@ -1090,6 +1236,7 @@ static const struct net_device_ops axienet_netdev_ops = {
.ndo_change_mtu = axienet_change_mtu,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = axienet_ioctl,
.ndo_set_rx_mode = axienet_set_multicast_list,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = axienet_poll_controller,
@@ -1170,10 +1317,6 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
- data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
- data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
- data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
- data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
@@ -1309,27 +1452,6 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EFAULT;
}
- if ((ecoalesce->rx_coalesce_usecs) ||
- (ecoalesce->rx_coalesce_usecs_irq) ||
- (ecoalesce->rx_max_coalesced_frames_irq) ||
- (ecoalesce->tx_coalesce_usecs) ||
- (ecoalesce->tx_coalesce_usecs_irq) ||
- (ecoalesce->tx_max_coalesced_frames_irq) ||
- (ecoalesce->stats_block_coalesce_usecs) ||
- (ecoalesce->use_adaptive_rx_coalesce) ||
- (ecoalesce->use_adaptive_tx_coalesce) ||
- (ecoalesce->pkt_rate_low) ||
- (ecoalesce->rx_coalesce_usecs_low) ||
- (ecoalesce->rx_max_coalesced_frames_low) ||
- (ecoalesce->tx_coalesce_usecs_low) ||
- (ecoalesce->tx_max_coalesced_frames_low) ||
- (ecoalesce->pkt_rate_high) ||
- (ecoalesce->rx_coalesce_usecs_high) ||
- (ecoalesce->rx_max_coalesced_frames_high) ||
- (ecoalesce->tx_coalesce_usecs_high) ||
- (ecoalesce->tx_max_coalesced_frames_high) ||
- (ecoalesce->rate_sample_interval))
- return -EOPNOTSUPP;
if (ecoalesce->rx_max_coalesced_frames)
lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
if (ecoalesce->tx_max_coalesced_frames)
@@ -1357,6 +1479,7 @@ axienet_ethtools_set_link_ksettings(struct net_device *ndev,
}
static const struct ethtool_ops axienet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -1441,6 +1564,22 @@ static void axienet_mac_an_restart(struct phylink_config *config)
static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
+ /* nothing meaningful to do */
+}
+
+static void axienet_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ /* nothing meaningful to do */
+}
+
+static void axienet_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
struct net_device *ndev = to_net_dev(config->dev);
struct axienet_local *lp = netdev_priv(ndev);
u32 emmc_reg, fcc_reg;
@@ -1448,7 +1587,7 @@ static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
- switch (state->speed) {
+ switch (speed) {
case SPEED_1000:
emmc_reg |= XAE_EMMC_LINKSPD_1000;
break;
@@ -1467,32 +1606,17 @@ static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
- if (state->pause & MLO_PAUSE_TX)
+ if (tx_pause)
fcc_reg |= XAE_FCC_FCTX_MASK;
else
fcc_reg &= ~XAE_FCC_FCTX_MASK;
- if (state->pause & MLO_PAUSE_RX)
+ if (rx_pause)
fcc_reg |= XAE_FCC_FCRX_MASK;
else
fcc_reg &= ~XAE_FCC_FCRX_MASK;
axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
}
-static void axienet_mac_link_down(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface)
-{
- /* nothing meaningful to do */
-}
-
-static void axienet_mac_link_up(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phy)
-{
- /* nothing meaningful to do */
-}
-
static const struct phylink_mac_ops axienet_phylink_ops = {
.validate = axienet_validate,
.mac_pcs_get_state = axienet_mac_pcs_get_state,
@@ -1503,17 +1627,18 @@ static const struct phylink_mac_ops axienet_phylink_ops = {
};
/**
- * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
- * @data: Data passed
+ * axienet_dma_err_handler - Work queue task for Axi DMA Error
+ * @work: pointer to work_struct
*
* Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
* Tx/Rx BDs.
*/
-static void axienet_dma_err_handler(unsigned long data)
+static void axienet_dma_err_handler(struct work_struct *work)
{
u32 axienet_status;
u32 cr, i;
- struct axienet_local *lp = (struct axienet_local *) data;
+ struct axienet_local *lp = container_of(work, struct axienet_local,
+ dma_err_task);
struct net_device *ndev = lp->ndev;
struct axidma_bd *cur_p;
@@ -1533,14 +1658,18 @@ static void axienet_dma_err_handler(unsigned long data)
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
- if (cur_p->phys)
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ if (cur_p->cntrl) {
+ dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
+
+ dma_unmap_single(ndev->dev.parent, addr,
(cur_p->cntrl &
XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
+ }
if (cur_p->skb)
dev_kfree_skb_irq(cur_p->skb);
cur_p->phys = 0;
+ cur_p->phys_msb = 0;
cur_p->cntrl = 0;
cur_p->status = 0;
cur_p->app0 = 0;
@@ -1594,18 +1723,18 @@ static void axienet_dma_err_handler(unsigned long data)
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
- axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
* tail pointer register that the Tx channel will start transmitting
*/
- axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
@@ -1651,6 +1780,7 @@ static int axienet_probe(struct platform_device *pdev)
struct net_device *ndev;
const void *mac_addr;
struct resource *ethres;
+ int addr_width = 32;
u32 value;
ndev = alloc_etherdev(sizeof(*lp));
@@ -1781,7 +1911,7 @@ static int axienet_probe(struct platform_device *pdev)
lp->rx_irq = irq_of_parse_and_map(np, 1);
lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
- lp->eth_irq = platform_get_irq(pdev, 0);
+ lp->eth_irq = platform_get_irq_optional(pdev, 0);
} else {
/* Check for these resources directly on the Ethernet node. */
struct resource *res = platform_get_resource(pdev,
@@ -1789,7 +1919,7 @@ static int axienet_probe(struct platform_device *pdev)
lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
lp->rx_irq = platform_get_irq(pdev, 1);
lp->tx_irq = platform_get_irq(pdev, 0);
- lp->eth_irq = platform_get_irq(pdev, 2);
+ lp->eth_irq = platform_get_irq_optional(pdev, 2);
}
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
@@ -1802,6 +1932,36 @@ static int axienet_probe(struct platform_device *pdev)
goto free_netdev;
}
+ /* Autodetect the need for 64-bit DMA pointers.
+ * When the IP is configured for a bus width bigger than 32 bits,
+ * writing the MSB registers is mandatory, even if they are all 0.
+ * We can detect this case by writing all 1's to one such register
+ * and see if that sticks: when the IP is configured for 32 bits
+ * only, those registers are RES0.
+ * Those MSB registers were introduced in IP v7.1, which we check first.
+ */
+ if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
+ void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
+
+ iowrite32(0x0, desc);
+ if (ioread32(desc) == 0) { /* sanity check */
+ iowrite32(0xffffffff, desc);
+ if (ioread32(desc) > 0) {
+ lp->features |= XAE_FEATURE_DMA_64BIT;
+ addr_width = 64;
+ dev_info(&pdev->dev,
+ "autodetected 64-bit DMA range\n");
+ }
+ iowrite32(0x0, desc);
+ }
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
+ if (ret) {
+ dev_err(&pdev->dev, "No suitable DMA available\n");
+ goto free_netdev;
+ }
+
/* Check for Ethernet core IRQ (optional) */
if (lp->eth_irq <= 0)
dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
diff --git a/drivers/net/fddi/skfp/drvfbi.c b/drivers/net/fddi/skfp/drvfbi.c
index 9c8aa3a95463..cc9ac572423e 100644
--- a/drivers/net/fddi/skfp/drvfbi.c
+++ b/drivers/net/fddi/skfp/drvfbi.c
@@ -20,7 +20,7 @@
#include "h/supern_2.h"
#include "h/skfbiinc.h"
#include <linux/bitrev.h>
-#include <linux/pci_regs.h>
+#include <linux/pci.h>
#ifndef lint
static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ;
@@ -112,7 +112,7 @@ static void card_start(struct s_smc *smc)
*/
outp(ADDR(B0_TST_CTRL), TST_CFG_WRITE_ON) ; /* enable for writes */
word = inpw(PCI_C(PCI_STATUS)) ;
- outpw(PCI_C(PCI_STATUS), word | PCI_ERRBITS) ;
+ outpw(PCI_C(PCI_STATUS), word | PCI_STATUS_ERROR_BITS);
outp(ADDR(B0_TST_CTRL), TST_CFG_WRITE_OFF) ; /* disable writes */
/*
diff --git a/drivers/net/fddi/skfp/h/skfbi.h b/drivers/net/fddi/skfp/h/skfbi.h
index 480795681719..ccee00b71dbc 100644
--- a/drivers/net/fddi/skfp/h/skfbi.h
+++ b/drivers/net/fddi/skfp/h/skfbi.h
@@ -33,11 +33,6 @@
*/
#define I2C_ADDR_VPD 0xA0 /* I2C address for the VPD EEPROM */
-
-#define PCI_ERRBITS (PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_PARITY)
-
-
-
/*
* Control Register File:
* Bank 0
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 1b320bcf150a..ca68aa1df801 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -388,10 +388,11 @@ static int netvsc_init_buf(struct hv_device *device,
net_device->recv_section_size = resp->sections[0].sub_alloc_size;
net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
- /* Setup receive completion ring */
- net_device->recv_completion_cnt
- = round_up(net_device->recv_section_cnt + 1,
- PAGE_SIZE / sizeof(u64));
+ /* Setup receive completion ring.
+ * Add 1 to the recv_section_cnt because at least one entry in a
+ * ring buffer has to be empty.
+ */
+ net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
ret = netvsc_alloc_recv_comp_ring(net_device, 0);
if (ret)
goto cleanup;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 2c0a24c606fc..d8e86bdbfba1 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -638,10 +638,7 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
} else {
lso_info->lso_v2_transmit.ip_version =
NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
}
lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
@@ -1143,23 +1140,6 @@ out:
return ret;
}
-static bool
-netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
-{
- struct ethtool_link_ksettings diff1 = *cmd;
- struct ethtool_link_ksettings diff2 = {};
-
- diff1.base.speed = 0;
- diff1.base.duplex = 0;
- /* advertising and cmd are usually set */
- ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
- diff1.base.cmd = 0;
- /* We set port to PORT_OTHER */
- diff2.base.port = PORT_OTHER;
-
- return !memcmp(&diff1, &diff2, sizeof(diff1));
-}
-
static void netvsc_init_settings(struct net_device *dev)
{
struct net_device_context *ndc = netdev_priv(dev);
@@ -1176,6 +1156,12 @@ static int netvsc_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct net_device_context *ndc = netdev_priv(dev);
+ struct net_device *vf_netdev;
+
+ vf_netdev = rtnl_dereference(ndc->vf_netdev);
+
+ if (vf_netdev)
+ return __ethtool_get_link_ksettings(vf_netdev, cmd);
cmd->base.speed = ndc->speed;
cmd->base.duplex = ndc->duplex;
@@ -1188,18 +1174,18 @@ static int netvsc_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct net_device_context *ndc = netdev_priv(dev);
- u32 speed;
+ struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
- speed = cmd->base.speed;
- if (!ethtool_validate_speed(speed) ||
- !ethtool_validate_duplex(cmd->base.duplex) ||
- !netvsc_validate_ethtool_ss_cmd(cmd))
- return -EINVAL;
+ if (vf_netdev) {
+ if (!vf_netdev->ethtool_ops->set_link_ksettings)
+ return -EOPNOTSUPP;
- ndc->speed = speed;
- ndc->duplex = cmd->base.duplex;
+ return vf_netdev->ethtool_ops->set_link_ksettings(vf_netdev,
+ cmd);
+ }
- return 0;
+ return ethtool_virtdev_set_link_ksettings(dev, cmd,
+ &ndc->speed, &ndc->duplex);
}
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 430c93786153..e04c3b60cae7 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -946,7 +946,8 @@ static int ca8210_spi_transfer(
cas_ctl->transfer.bits_per_word = 0; /* Use device setting */
cas_ctl->transfer.tx_buf = cas_ctl->tx_buf;
cas_ctl->transfer.rx_buf = cas_ctl->tx_in_buf;
- cas_ctl->transfer.delay_usecs = 0;
+ cas_ctl->transfer.delay.value = 0;
+ cas_ctl->transfer.delay.unit = SPI_DELAY_UNIT_USECS;
cas_ctl->transfer.cs_change = 0;
cas_ctl->transfer.len = sizeof(struct mac_message);
cas_ctl->msg.complete = ca8210_spi_transfer_complete;
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
new file mode 100644
index 000000000000..9f0d2a93379c
--- /dev/null
+++ b/drivers/net/ipa/Kconfig
@@ -0,0 +1,19 @@
+config QCOM_IPA
+ tristate "Qualcomm IPA support"
+ depends on ARCH_QCOM && 64BIT && NET
+ depends on QCOM_Q6V5_MSS
+ select QCOM_QMI_HELPERS
+ select QCOM_MDT_LOADER
+ help
+ Choose Y or M here to include support for the Qualcomm
+ IP Accelerator (IPA), a hardware block present in some
+ Qualcomm SoCs. The IPA is a programmable protocol processor
+ that is capable of generic hardware handling of IP packets,
+ including routing, filtering, and NAT. Currently the IPA
+ driver supports only basic transport of network traffic
+ between the AP and modem, on the Qualcomm SDM845 SoC.
+
+ Note that if selected, the selection type must match that
+ of QCOM_Q6V5_COMMON (Y or M).
+
+ If unsure, say N.
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
new file mode 100644
index 000000000000..afe5df1e6eee
--- /dev/null
+++ b/drivers/net/ipa/Makefile
@@ -0,0 +1,12 @@
+# Un-comment the next line if you want to validate configuration data
+#ccflags-y += -DIPA_VALIDATE
+
+obj-$(CONFIG_QCOM_IPA) += ipa.o
+
+ipa-y := ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \
+ ipa_table.o ipa_interrupt.o gsi.o gsi_trans.o \
+ ipa_gsi.o ipa_smp2p.o ipa_uc.o \
+ ipa_endpoint.o ipa_cmd.o ipa_modem.o \
+ ipa_qmi.o ipa_qmi_msg.o
+
+ipa-y += ipa_data-sdm845.o ipa_data-sc7180.o
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
new file mode 100644
index 000000000000..845478a19a4f
--- /dev/null
+++ b/drivers/net/ipa/gsi.c
@@ -0,0 +1,2055 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "gsi.h"
+#include "gsi_reg.h"
+#include "gsi_private.h"
+#include "gsi_trans.h"
+#include "ipa_gsi.h"
+#include "ipa_data.h"
+
+/**
+ * DOC: The IPA Generic Software Interface
+ *
+ * The generic software interface (GSI) is an integral component of the IPA,
+ * providing a well-defined communication layer between the AP subsystem
+ * and the IPA core. The modem uses the GSI layer as well.
+ *
+ * -------- ---------
+ * | | | |
+ * | AP +<---. .----+ Modem |
+ * | +--. | | .->+ |
+ * | | | | | | | |
+ * -------- | | | | ---------
+ * v | v |
+ * --+-+---+-+--
+ * | GSI |
+ * |-----------|
+ * | |
+ * | IPA |
+ * | |
+ * -------------
+ *
+ * In the above diagram, the AP and Modem represent "execution environments"
+ * (EEs), which are independent operating environments that use the IPA for
+ * data transfer.
+ *
+ * Each EE uses a set of unidirectional GSI "channels," which allow transfer
+ * of data to or from the IPA. A channel is implemented as a ring buffer,
+ * with a DRAM-resident array of "transfer elements" (TREs) available to
+ * describe transfers to or from other EEs through the IPA. A transfer
+ * element can also contain an immediate command, requesting the IPA perform
+ * actions other than data transfer.
+ *
+ * Each TRE refers to a block of data--also located DRAM. After writing one
+ * or more TREs to a channel, the writer (either the IPA or an EE) writes a
+ * doorbell register to inform the receiving side how many elements have
+ * been written.
+ *
+ * Each channel has a GSI "event ring" associated with it. An event ring
+ * is implemented very much like a channel ring, but is always directed from
+ * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel
+ * events by adding an entry to the event ring associated with the channel.
+ * The GSI then writes its doorbell for the event ring, causing the target
+ * EE to be interrupted. Each entry in an event ring contains a pointer
+ * to the channel TRE whose completion the event represents.
+ *
+ * Each TRE in a channel ring has a set of flags. One flag indicates whether
+ * the completion of the transfer operation generates an entry (and possibly
+ * an interrupt) in the channel's event ring. Other flags allow transfer
+ * elements to be chained together, forming a single logical transaction.
+ * TRE flags are used to control whether and when interrupts are generated
+ * to signal completion of channel transfers.
+ *
+ * Elements in channel and event rings are completed (or consumed) strictly
+ * in order. Completion of one entry implies the completion of all preceding
+ * entries. A single completion interrupt can therefore communicate the
+ * completion of many transfers.
+ *
+ * Note that all GSI registers are little-endian, which is the assumed
+ * endianness of I/O space accesses. The accessor functions perform byte
+ * swapping if needed (i.e., for a big endian CPU).
+ */
+
+/* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
+#define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
+
+#define GSI_CMD_TIMEOUT 5 /* seconds */
+
+#define GSI_CHANNEL_STOP_RX_RETRIES 10
+
+#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
+#define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
+
+#define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */
+
+/* An entry in an event ring */
+struct gsi_event {
+ __le64 xfer_ptr;
+ __le16 len;
+ u8 reserved1;
+ u8 code;
+ __le16 reserved2;
+ u8 type;
+ u8 chid;
+};
+
+/* Hardware values from the error log register error code field */
+enum gsi_err_code {
+ GSI_INVALID_TRE_ERR = 0x1,
+ GSI_OUT_OF_BUFFERS_ERR = 0x2,
+ GSI_OUT_OF_RESOURCES_ERR = 0x3,
+ GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
+ GSI_EVT_RING_EMPTY_ERR = 0x5,
+ GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6,
+ GSI_HWO_1_ERR = 0x8,
+};
+
+/* Hardware values from the error log register error type field */
+enum gsi_err_type {
+ GSI_ERR_TYPE_GLOB = 0x1,
+ GSI_ERR_TYPE_CHAN = 0x2,
+ GSI_ERR_TYPE_EVT = 0x3,
+};
+
+/* Hardware values used when programming an event ring */
+enum gsi_evt_chtype {
+ GSI_EVT_CHTYPE_MHI_EV = 0x0,
+ GSI_EVT_CHTYPE_XHCI_EV = 0x1,
+ GSI_EVT_CHTYPE_GPI_EV = 0x2,
+ GSI_EVT_CHTYPE_XDCI_EV = 0x3,
+};
+
+/* Hardware values used when programming a channel */
+enum gsi_channel_protocol {
+ GSI_CHANNEL_PROTOCOL_MHI = 0x0,
+ GSI_CHANNEL_PROTOCOL_XHCI = 0x1,
+ GSI_CHANNEL_PROTOCOL_GPI = 0x2,
+ GSI_CHANNEL_PROTOCOL_XDCI = 0x3,
+};
+
+/* Hardware values representing an event ring immediate command opcode */
+enum gsi_evt_cmd_opcode {
+ GSI_EVT_ALLOCATE = 0x0,
+ GSI_EVT_RESET = 0x9,
+ GSI_EVT_DE_ALLOC = 0xa,
+};
+
+/* Hardware values representing a generic immediate command opcode */
+enum gsi_generic_cmd_opcode {
+ GSI_GENERIC_HALT_CHANNEL = 0x1,
+ GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
+};
+
+/* Hardware values representing a channel immediate command opcode */
+enum gsi_ch_cmd_opcode {
+ GSI_CH_ALLOCATE = 0x0,
+ GSI_CH_START = 0x1,
+ GSI_CH_STOP = 0x2,
+ GSI_CH_RESET = 0x9,
+ GSI_CH_DE_ALLOC = 0xa,
+};
+
+/** gsi_channel_scratch_gpi - GPI protocol scratch register
+ * @max_outstanding_tre:
+ * Defines the maximum number of TREs allowed in a single transaction
+ * on a channel (in bytes). This determines the amount of prefetch
+ * performed by the hardware. We configure this to equal the size of
+ * the TLV FIFO for the channel.
+ * @outstanding_threshold:
+ * Defines the threshold (in bytes) determining when the sequencer
+ * should update the channel doorbell. We configure this to equal
+ * the size of two TREs.
+ */
+struct gsi_channel_scratch_gpi {
+ u64 reserved1;
+ u16 reserved2;
+ u16 max_outstanding_tre;
+ u16 reserved3;
+ u16 outstanding_threshold;
+};
+
+/** gsi_channel_scratch - channel scratch configuration area
+ *
+ * The exact interpretation of this register is protocol-specific.
+ * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
+ */
+union gsi_channel_scratch {
+ struct gsi_channel_scratch_gpi gpi;
+ struct {
+ u32 word1;
+ u32 word2;
+ u32 word3;
+ u32 word4;
+ } data;
+};
+
+/* Check things that can be validated at build time. */
+static void gsi_validate_build(void)
+{
+ /* This is used as a divisor */
+ BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
+
+ /* Code assumes the size of channel and event ring element are
+ * the same (and fixed). Make sure the size of an event ring
+ * element is what's expected.
+ */
+ BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
+
+ /* Hardware requires a 2^n ring size. We ensure the number of
+ * elements in an event ring is a power of 2 elsewhere; this
+ * ensure the elements themselves meet the requirement.
+ */
+ BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
+
+ /* The channel element size must fit in this field */
+ BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
+
+ /* The event ring element size must fit in this field */
+ BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
+}
+
+/* Return the channel id associated with a given channel */
+static u32 gsi_channel_id(struct gsi_channel *channel)
+{
+ return channel - &channel->gsi->channel[0];
+}
+
+static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
+{
+ u32 val;
+
+ gsi->event_enable_bitmap |= BIT(evt_ring_id);
+ val = gsi->event_enable_bitmap;
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+}
+
+static void gsi_isr_ieob_clear(struct gsi *gsi, u32 mask)
+{
+ iowrite32(mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
+}
+
+static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
+{
+ u32 val;
+
+ gsi->event_enable_bitmap &= ~BIT(evt_ring_id);
+ val = gsi->event_enable_bitmap;
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+}
+
+/* Enable all GSI_interrupt types */
+static void gsi_irq_enable(struct gsi *gsi)
+{
+ u32 val;
+
+ /* We don't use inter-EE channel or event interrupts */
+ val = GSI_CNTXT_TYPE_IRQ_MSK_ALL;
+ val &= ~MSK_INTER_EE_CH_CTRL_FMASK;
+ val &= ~MSK_INTER_EE_EV_CTRL_FMASK;
+ iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
+
+ val = GENMASK(gsi->channel_count - 1, 0);
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+
+ val = GENMASK(gsi->evt_ring_count - 1, 0);
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+
+ /* Each IEOB interrupt is enabled (later) as needed by channels */
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+
+ val = GSI_CNTXT_GLOB_IRQ_ALL;
+ iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+
+ /* Never enable GSI_BREAK_POINT */
+ val = GSI_CNTXT_GSI_IRQ_ALL & ~EN_BREAK_POINT_FMASK;
+ iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+}
+
+/* Disable all GSI_interrupt types */
+static void gsi_irq_disable(struct gsi *gsi)
+{
+ iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
+}
+
+/* Return the virtual address associated with a ring index */
+void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
+{
+ /* Note: index *must* be used modulo the ring count here */
+ return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
+}
+
+/* Return the 32-bit DMA address associated with a ring index */
+static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
+{
+ return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE;
+}
+
+/* Return the ring index of a 32-bit ring offset */
+static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
+{
+ return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
+}
+
+/* Issue a GSI command by writing a value to a register, then wait for
+ * completion to be signaled. Returns true if the command completes
+ * or false if it times out.
+ */
+static bool
+gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
+{
+ reinit_completion(completion);
+
+ iowrite32(val, gsi->virt + reg);
+
+ return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
+}
+
+/* Return the hardware's notion of the current state of an event ring */
+static enum gsi_evt_ring_state
+gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
+{
+ u32 val;
+
+ val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
+
+ return u32_get_bits(val, EV_CHSTATE_FMASK);
+}
+
+/* Issue an event ring command and wait for it to complete */
+static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
+ enum gsi_evt_cmd_opcode opcode)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ struct completion *completion = &evt_ring->completion;
+ u32 val;
+
+ val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
+ val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
+
+ if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion))
+ return 0; /* Success! */
+
+ dev_err(gsi->dev, "GSI command %u to event ring %u timed out "
+ "(state is %u)\n", opcode, evt_ring_id, evt_ring->state);
+
+ return -ETIMEDOUT;
+}
+
+/* Allocate an event ring in NOT_ALLOCATED state */
+static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ int ret;
+
+ /* Get initial event ring state */
+ evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
+
+ if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ return -EINVAL;
+
+ ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
+ if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
+ dev_err(gsi->dev, "bad event ring state (%u) after alloc\n",
+ evt_ring->state);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/* Reset a GSI event ring in ALLOCATED or ERROR state. */
+static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ enum gsi_evt_ring_state state = evt_ring->state;
+ int ret;
+
+ if (state != GSI_EVT_RING_STATE_ALLOCATED &&
+ state != GSI_EVT_RING_STATE_ERROR) {
+ dev_err(gsi->dev, "bad event ring state (%u) before reset\n",
+ evt_ring->state);
+ return;
+ }
+
+ ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
+ if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
+ dev_err(gsi->dev, "bad event ring state (%u) after reset\n",
+ evt_ring->state);
+}
+
+/* Issue a hardware de-allocation request for an allocated event ring */
+static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ int ret;
+
+ if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
+ dev_err(gsi->dev, "bad event ring state (%u) before dealloc\n",
+ evt_ring->state);
+ return;
+ }
+
+ ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
+ if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ dev_err(gsi->dev, "bad event ring state (%u) after dealloc\n",
+ evt_ring->state);
+}
+
+/* Return the hardware's notion of the current state of a channel */
+static enum gsi_channel_state
+gsi_channel_state(struct gsi *gsi, u32 channel_id)
+{
+ u32 val;
+
+ val = ioread32(gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
+
+ return u32_get_bits(val, CHSTATE_FMASK);
+}
+
+/* Issue a channel command and wait for it to complete */
+static int
+gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
+{
+ struct completion *completion = &channel->completion;
+ u32 channel_id = gsi_channel_id(channel);
+ u32 val;
+
+ val = u32_encode_bits(channel_id, CH_CHID_FMASK);
+ val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
+
+ if (gsi_command(channel->gsi, GSI_CH_CMD_OFFSET, val, completion))
+ return 0; /* Success! */
+
+ dev_err(channel->gsi->dev, "GSI command %u to channel %u timed out "
+ "(state is %u)\n", opcode, channel_id, channel->state);
+
+ return -ETIMEDOUT;
+}
+
+/* Allocate GSI channel in NOT_ALLOCATED state */
+static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ int ret;
+
+ /* Get initial channel state */
+ channel->state = gsi_channel_state(gsi, channel_id);
+
+ if (channel->state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+ return -EINVAL;
+
+ ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
+ if (!ret && channel->state != GSI_CHANNEL_STATE_ALLOCATED) {
+ dev_err(gsi->dev, "bad channel state (%u) after alloc\n",
+ channel->state);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/* Start an ALLOCATED channel */
+static int gsi_channel_start_command(struct gsi_channel *channel)
+{
+ enum gsi_channel_state state = channel->state;
+ int ret;
+
+ if (state != GSI_CHANNEL_STATE_ALLOCATED &&
+ state != GSI_CHANNEL_STATE_STOPPED)
+ return -EINVAL;
+
+ ret = gsi_channel_command(channel, GSI_CH_START);
+ if (!ret && channel->state != GSI_CHANNEL_STATE_STARTED) {
+ dev_err(channel->gsi->dev,
+ "bad channel state (%u) after start\n",
+ channel->state);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/* Stop a GSI channel in STARTED state */
+static int gsi_channel_stop_command(struct gsi_channel *channel)
+{
+ enum gsi_channel_state state = channel->state;
+ int ret;
+
+ if (state != GSI_CHANNEL_STATE_STARTED &&
+ state != GSI_CHANNEL_STATE_STOP_IN_PROC)
+ return -EINVAL;
+
+ ret = gsi_channel_command(channel, GSI_CH_STOP);
+ if (ret || channel->state == GSI_CHANNEL_STATE_STOPPED)
+ return ret;
+
+ /* We may have to try again if stop is in progress */
+ if (channel->state == GSI_CHANNEL_STATE_STOP_IN_PROC)
+ return -EAGAIN;
+
+ dev_err(channel->gsi->dev, "bad channel state (%u) after stop\n",
+ channel->state);
+
+ return -EIO;
+}
+
+/* Reset a GSI channel in ALLOCATED or ERROR state. */
+static void gsi_channel_reset_command(struct gsi_channel *channel)
+{
+ int ret;
+
+ msleep(1); /* A short delay is required before a RESET command */
+
+ if (channel->state != GSI_CHANNEL_STATE_STOPPED &&
+ channel->state != GSI_CHANNEL_STATE_ERROR) {
+ dev_err(channel->gsi->dev,
+ "bad channel state (%u) before reset\n",
+ channel->state);
+ return;
+ }
+
+ ret = gsi_channel_command(channel, GSI_CH_RESET);
+ if (!ret && channel->state != GSI_CHANNEL_STATE_ALLOCATED)
+ dev_err(channel->gsi->dev,
+ "bad channel state (%u) after reset\n",
+ channel->state);
+}
+
+/* Deallocate an ALLOCATED GSI channel */
+static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ int ret;
+
+ if (channel->state != GSI_CHANNEL_STATE_ALLOCATED) {
+ dev_err(gsi->dev, "bad channel state (%u) before dealloc\n",
+ channel->state);
+ return;
+ }
+
+ ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
+ if (!ret && channel->state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+ dev_err(gsi->dev, "bad channel state (%u) after dealloc\n",
+ channel->state);
+}
+
+/* Ring an event ring doorbell, reporting the last entry processed by the AP.
+ * The index argument (modulo the ring count) is the first unfilled entry, so
+ * we supply one less than that with the doorbell. Update the event ring
+ * index field with the value provided.
+ */
+static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
+{
+ struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
+ u32 val;
+
+ ring->index = index; /* Next unused entry */
+
+ /* Note: index *must* be used modulo the ring count here */
+ val = gsi_ring_addr(ring, (index - 1) % ring->count);
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
+}
+
+/* Program an event ring for use */
+static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
+{
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
+ u32 val;
+
+ val = u32_encode_bits(GSI_EVT_CHTYPE_GPI_EV, EV_CHTYPE_FMASK);
+ val |= EV_INTYPE_FMASK;
+ val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
+
+ val = u32_encode_bits(size, EV_R_LENGTH_FMASK);
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
+
+ /* The context 2 and 3 registers store the low-order and
+ * high-order 32 bits of the address of the event ring,
+ * respectively.
+ */
+ val = evt_ring->ring.addr & GENMASK(31, 0);
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
+
+ val = evt_ring->ring.addr >> 32;
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
+
+ /* Enable interrupt moderation by setting the moderation delay */
+ val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
+ val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */
+ iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
+
+ /* No MSI write data, and MSI address high and low address is 0 */
+ iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
+ iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
+ iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
+
+ /* We don't need to get event read pointer updates */
+ iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
+ iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
+
+ /* Finally, tell the hardware we've completed event 0 (arbitrary) */
+ gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
+}
+
+/* Return the last (most recent) transaction completed on a channel. */
+static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct gsi_trans *trans;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ if (!list_empty(&trans_info->complete))
+ trans = list_last_entry(&trans_info->complete,
+ struct gsi_trans, links);
+ else if (!list_empty(&trans_info->polled))
+ trans = list_last_entry(&trans_info->polled,
+ struct gsi_trans, links);
+ else
+ trans = NULL;
+
+ /* Caller will wait for this, so take a reference */
+ if (trans)
+ refcount_inc(&trans->refcount);
+
+ spin_unlock_bh(&trans_info->spinlock);
+
+ return trans;
+}
+
+/* Wait for transaction activity on a channel to complete */
+static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
+{
+ struct gsi_trans *trans;
+
+ /* Get the last transaction, and wait for it to complete */
+ trans = gsi_channel_trans_last(channel);
+ if (trans) {
+ wait_for_completion(&trans->completion);
+ gsi_trans_free(trans);
+ }
+}
+
+/* Stop channel activity. Transactions may not be allocated until thawed. */
+static void gsi_channel_freeze(struct gsi_channel *channel)
+{
+ gsi_channel_trans_quiesce(channel);
+
+ napi_disable(&channel->napi);
+
+ gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id);
+}
+
+/* Allow transactions to be used on the channel again. */
+static void gsi_channel_thaw(struct gsi_channel *channel)
+{
+ gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
+
+ napi_enable(&channel->napi);
+}
+
+/* Program a channel for use */
+static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
+{
+ size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
+ u32 channel_id = gsi_channel_id(channel);
+ union gsi_channel_scratch scr = { };
+ struct gsi_channel_scratch_gpi *gpi;
+ struct gsi *gsi = channel->gsi;
+ u32 wrr_weight = 0;
+ u32 val;
+
+ /* Arbitrarily pick TRE 0 as the first channel element to use */
+ channel->tre_ring.index = 0;
+
+ /* We program all channels to use GPI protocol */
+ val = u32_encode_bits(GSI_CHANNEL_PROTOCOL_GPI, CHTYPE_PROTOCOL_FMASK);
+ if (channel->toward_ipa)
+ val |= CHTYPE_DIR_FMASK;
+ val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
+ val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
+ iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
+
+ val = u32_encode_bits(size, R_LENGTH_FMASK);
+ iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
+
+ /* The context 2 and 3 registers store the low-order and
+ * high-order 32 bits of the address of the channel ring,
+ * respectively.
+ */
+ val = channel->tre_ring.addr & GENMASK(31, 0);
+ iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
+
+ val = channel->tre_ring.addr >> 32;
+ iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
+
+ /* Command channel gets low weighted round-robin priority */
+ if (channel->command)
+ wrr_weight = field_max(WRR_WEIGHT_FMASK);
+ val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
+
+ /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
+
+ /* Enable the doorbell engine if requested */
+ if (doorbell)
+ val |= USE_DB_ENG_FMASK;
+
+ if (!channel->use_prefetch)
+ val |= USE_ESCAPE_BUF_ONLY_FMASK;
+
+ iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
+
+ /* Now update the scratch registers for GPI protocol */
+ gpi = &scr.gpi;
+ gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
+ GSI_RING_ELEMENT_SIZE;
+ gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
+
+ val = scr.data.word1;
+ iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
+
+ val = scr.data.word2;
+ iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
+
+ val = scr.data.word3;
+ iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
+
+ /* We must preserve the upper 16 bits of the last scratch register.
+ * The next sequence assumes those bits remain unchanged between the
+ * read and the write.
+ */
+ val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
+ val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
+ iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
+
+ /* All done! */
+}
+
+static void gsi_channel_deprogram(struct gsi_channel *channel)
+{
+ /* Nothing to do */
+}
+
+/* Start an allocated GSI channel */
+int gsi_channel_start(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 evt_ring_id = channel->evt_ring_id;
+ int ret;
+
+ mutex_lock(&gsi->mutex);
+
+ ret = gsi_channel_start_command(channel);
+
+ mutex_unlock(&gsi->mutex);
+
+ /* Clear the channel's event ring interrupt in case it's pending */
+ gsi_isr_ieob_clear(gsi, BIT(evt_ring_id));
+
+ gsi_channel_thaw(channel);
+
+ return ret;
+}
+
+/* Stop a started channel */
+int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 retries;
+ int ret;
+
+ gsi_channel_freeze(channel);
+
+ /* Channel could have entered STOPPED state since last call if the
+ * STOP command timed out. We won't stop a channel if stopping it
+ * was successful previously (so we still want the freeze above).
+ */
+ if (channel->state == GSI_CHANNEL_STATE_STOPPED)
+ return 0;
+
+ /* RX channels might require a little time to enter STOPPED state */
+ retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
+
+ mutex_lock(&gsi->mutex);
+
+ do {
+ ret = gsi_channel_stop_command(channel);
+ if (ret != -EAGAIN)
+ break;
+ msleep(1);
+ } while (retries--);
+
+ mutex_unlock(&gsi->mutex);
+
+ /* Thaw the channel if we need to retry (or on error) */
+ if (ret)
+ gsi_channel_thaw(channel);
+
+ return ret;
+}
+
+/* Reset and reconfigure a channel (possibly leaving doorbell disabled) */
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool db_enable)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ mutex_lock(&gsi->mutex);
+
+ /* Due to a hardware quirk we need to reset RX channels twice. */
+ gsi_channel_reset_command(channel);
+ if (!channel->toward_ipa)
+ gsi_channel_reset_command(channel);
+
+ gsi_channel_program(channel, db_enable);
+ gsi_channel_trans_cancel_pending(channel);
+
+ mutex_unlock(&gsi->mutex);
+}
+
+/* Stop a STARTED channel for suspend (using stop if requested) */
+int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ if (stop)
+ return gsi_channel_stop(gsi, channel_id);
+
+ gsi_channel_freeze(channel);
+
+ return 0;
+}
+
+/* Resume a suspended channel (starting will be requested if STOPPED) */
+int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ if (start)
+ return gsi_channel_start(gsi, channel_id);
+
+ gsi_channel_thaw(channel);
+
+ return 0;
+}
+
+/**
+ * gsi_channel_tx_queued() - Report queued TX transfers for a channel
+ * @channel: Channel for which to report
+ *
+ * Report to the network stack the number of bytes and transactions that
+ * have been queued to hardware since last call. This and the next function
+ * supply information used by the network stack for throttling.
+ *
+ * For each channel we track the number of transactions used and bytes of
+ * data those transactions represent. We also track what those values are
+ * each time this function is called. Subtracting the two tells us
+ * the number of bytes and transactions that have been added between
+ * successive calls.
+ *
+ * Calling this each time we ring the channel doorbell allows us to
+ * provide accurate information to the network stack about how much
+ * work we've given the hardware at any point in time.
+ */
+void gsi_channel_tx_queued(struct gsi_channel *channel)
+{
+ u32 trans_count;
+ u32 byte_count;
+
+ byte_count = channel->byte_count - channel->queued_byte_count;
+ trans_count = channel->trans_count - channel->queued_trans_count;
+ channel->queued_byte_count = channel->byte_count;
+ channel->queued_trans_count = channel->trans_count;
+
+ ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
+ trans_count, byte_count);
+}
+
+/**
+ * gsi_channel_tx_update() - Report completed TX transfers
+ * @channel: Channel that has completed transmitting packets
+ * @trans: Last transation known to be complete
+ *
+ * Compute the number of transactions and bytes that have been transferred
+ * over a TX channel since the given transaction was committed. Report this
+ * information to the network stack.
+ *
+ * At the time a transaction is committed, we record its channel's
+ * committed transaction and byte counts *in the transaction*.
+ * Completions are signaled by the hardware with an interrupt, and
+ * we can determine the latest completed transaction at that time.
+ *
+ * The difference between the byte/transaction count recorded in
+ * the transaction and the count last time we recorded a completion
+ * tells us exactly how much data has been transferred between
+ * completions.
+ *
+ * Calling this each time we learn of a newly-completed transaction
+ * allows us to provide accurate information to the network stack
+ * about how much work has been completed by the hardware at a given
+ * point in time.
+ */
+static void
+gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
+{
+ u64 byte_count = trans->byte_count + trans->len;
+ u64 trans_count = trans->trans_count + 1;
+
+ byte_count -= channel->compl_byte_count;
+ channel->compl_byte_count += byte_count;
+ trans_count -= channel->compl_trans_count;
+ channel->compl_trans_count += trans_count;
+
+ ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
+ trans_count, byte_count);
+}
+
+/* Channel control interrupt handler */
+static void gsi_isr_chan_ctrl(struct gsi *gsi)
+{
+ u32 channel_mask;
+
+ channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
+ iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+
+ while (channel_mask) {
+ u32 channel_id = __ffs(channel_mask);
+ struct gsi_channel *channel;
+
+ channel_mask ^= BIT(channel_id);
+
+ channel = &gsi->channel[channel_id];
+ channel->state = gsi_channel_state(gsi, channel_id);
+
+ complete(&channel->completion);
+ }
+}
+
+/* Event ring control interrupt handler */
+static void gsi_isr_evt_ctrl(struct gsi *gsi)
+{
+ u32 event_mask;
+
+ event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
+ iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+
+ while (event_mask) {
+ u32 evt_ring_id = __ffs(event_mask);
+ struct gsi_evt_ring *evt_ring;
+
+ event_mask ^= BIT(evt_ring_id);
+
+ evt_ring = &gsi->evt_ring[evt_ring_id];
+ evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
+
+ complete(&evt_ring->completion);
+ }
+}
+
+/* Global channel error interrupt handler */
+static void
+gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
+{
+ if (code == GSI_OUT_OF_RESOURCES_ERR) {
+ dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
+ complete(&gsi->channel[channel_id].completion);
+ return;
+ }
+
+ /* Report, but otherwise ignore all other error codes */
+ dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
+ channel_id, err_ee, code);
+}
+
+/* Global event error interrupt handler */
+static void
+gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
+{
+ if (code == GSI_OUT_OF_RESOURCES_ERR) {
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ u32 channel_id = gsi_channel_id(evt_ring->channel);
+
+ complete(&evt_ring->completion);
+ dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
+ channel_id);
+ return;
+ }
+
+ /* Report, but otherwise ignore all other error codes */
+ dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
+ evt_ring_id, err_ee, code);
+}
+
+/* Global error interrupt handler */
+static void gsi_isr_glob_err(struct gsi *gsi)
+{
+ enum gsi_err_type type;
+ enum gsi_err_code code;
+ u32 which;
+ u32 val;
+ u32 ee;
+
+ /* Get the logged error, then reinitialize the log */
+ val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
+ iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
+ iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
+
+ ee = u32_get_bits(val, ERR_EE_FMASK);
+ which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
+ type = u32_get_bits(val, ERR_TYPE_FMASK);
+ code = u32_get_bits(val, ERR_CODE_FMASK);
+
+ if (type == GSI_ERR_TYPE_CHAN)
+ gsi_isr_glob_chan_err(gsi, ee, which, code);
+ else if (type == GSI_ERR_TYPE_EVT)
+ gsi_isr_glob_evt_err(gsi, ee, which, code);
+ else /* type GSI_ERR_TYPE_GLOB should be fatal */
+ dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
+}
+
+/* Generic EE interrupt handler */
+static void gsi_isr_gp_int1(struct gsi *gsi)
+{
+ u32 result;
+ u32 val;
+
+ val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
+ result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
+ if (result != GENERIC_EE_SUCCESS_FVAL)
+ dev_err(gsi->dev, "global INT1 generic result %u\n", result);
+
+ complete(&gsi->completion);
+}
+/* Inter-EE interrupt handler */
+static void gsi_isr_glob_ee(struct gsi *gsi)
+{
+ u32 val;
+
+ val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
+
+ if (val & ERROR_INT_FMASK)
+ gsi_isr_glob_err(gsi);
+
+ iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
+
+ val &= ~ERROR_INT_FMASK;
+
+ if (val & EN_GP_INT1_FMASK) {
+ val ^= EN_GP_INT1_FMASK;
+ gsi_isr_gp_int1(gsi);
+ }
+
+ if (val)
+ dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
+}
+
+/* I/O completion interrupt event */
+static void gsi_isr_ieob(struct gsi *gsi)
+{
+ u32 event_mask;
+
+ event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
+ gsi_isr_ieob_clear(gsi, event_mask);
+
+ while (event_mask) {
+ u32 evt_ring_id = __ffs(event_mask);
+
+ event_mask ^= BIT(evt_ring_id);
+
+ gsi_irq_ieob_disable(gsi, evt_ring_id);
+ napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
+ }
+}
+
+/* General event interrupts represent serious problems, so report them */
+static void gsi_isr_general(struct gsi *gsi)
+{
+ struct device *dev = gsi->dev;
+ u32 val;
+
+ val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
+ iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
+
+ if (val)
+ dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
+}
+
+/**
+ * gsi_isr() - Top level GSI interrupt service routine
+ * @irq: Interrupt number (ignored)
+ * @dev_id: GSI pointer supplied to request_irq()
+ *
+ * This is the main handler function registered for the GSI IRQ. Each type
+ * of interrupt has a separate handler function that is called from here.
+ */
+static irqreturn_t gsi_isr(int irq, void *dev_id)
+{
+ struct gsi *gsi = dev_id;
+ u32 intr_mask;
+ u32 cnt = 0;
+
+ while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
+ /* intr_mask contains bitmask of pending GSI interrupts */
+ do {
+ u32 gsi_intr = BIT(__ffs(intr_mask));
+
+ intr_mask ^= gsi_intr;
+
+ switch (gsi_intr) {
+ case CH_CTRL_FMASK:
+ gsi_isr_chan_ctrl(gsi);
+ break;
+ case EV_CTRL_FMASK:
+ gsi_isr_evt_ctrl(gsi);
+ break;
+ case GLOB_EE_FMASK:
+ gsi_isr_glob_ee(gsi);
+ break;
+ case IEOB_FMASK:
+ gsi_isr_ieob(gsi);
+ break;
+ case GENERAL_FMASK:
+ gsi_isr_general(gsi);
+ break;
+ default:
+ dev_err(gsi->dev,
+ "%s: unrecognized type 0x%08x\n",
+ __func__, gsi_intr);
+ break;
+ }
+ } while (intr_mask);
+
+ if (++cnt > GSI_ISR_MAX_ITER) {
+ dev_err(gsi->dev, "interrupt flood\n");
+ break;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Return the transaction associated with a transfer completion event */
+static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
+ struct gsi_event *event)
+{
+ u32 tre_offset;
+ u32 tre_index;
+
+ /* Event xfer_ptr records the TRE it's associated with */
+ tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0);
+ tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
+
+ return gsi_channel_trans_mapped(channel, tre_index);
+}
+
+/**
+ * gsi_evt_ring_rx_update() - Record lengths of received data
+ * @evt_ring: Event ring associated with channel that received packets
+ * @index: Event index in ring reported by hardware
+ *
+ * Events for RX channels contain the actual number of bytes received into
+ * the buffer. Every event has a transaction associated with it, and here
+ * we update transactions to record their actual received lengths.
+ *
+ * This function is called whenever we learn that the GSI hardware has filled
+ * new events since the last time we checked. The ring's index field tells
+ * the first entry in need of processing. The index provided is the
+ * first *unfilled* event in the ring (following the last filled one).
+ *
+ * Events are sequential within the event ring, and transactions are
+ * sequential within the transaction pool.
+ *
+ * Note that @index always refers to an element *within* the event ring.
+ */
+static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
+{
+ struct gsi_channel *channel = evt_ring->channel;
+ struct gsi_ring *ring = &evt_ring->ring;
+ struct gsi_trans_info *trans_info;
+ struct gsi_event *event_done;
+ struct gsi_event *event;
+ struct gsi_trans *trans;
+ u32 byte_count = 0;
+ u32 old_index;
+ u32 event_avail;
+
+ trans_info = &channel->trans_info;
+
+ /* We'll start with the oldest un-processed event. RX channels
+ * replenish receive buffers in single-TRE transactions, so we
+ * can just map that event to its transaction. Transactions
+ * associated with completion events are consecutive.
+ */
+ old_index = ring->index;
+ event = gsi_ring_virt(ring, old_index);
+ trans = gsi_event_trans(channel, event);
+
+ /* Compute the number of events to process before we wrap,
+ * and determine when we'll be done processing events.
+ */
+ event_avail = ring->count - old_index % ring->count;
+ event_done = gsi_ring_virt(ring, index);
+ do {
+ trans->len = __le16_to_cpu(event->len);
+ byte_count += trans->len;
+
+ /* Move on to the next event and transaction */
+ if (--event_avail)
+ event++;
+ else
+ event = gsi_ring_virt(ring, 0);
+ trans = gsi_trans_pool_next(&trans_info->pool, trans);
+ } while (event != event_done);
+
+ /* We record RX bytes when they are received */
+ channel->byte_count += byte_count;
+ channel->trans_count++;
+}
+
+/* Initialize a ring, including allocating DMA memory for its entries */
+static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
+{
+ size_t size = count * GSI_RING_ELEMENT_SIZE;
+ struct device *dev = gsi->dev;
+ dma_addr_t addr;
+
+ /* Hardware requires a 2^n ring size, with alignment equal to size */
+ ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
+ if (ring->virt && addr % size) {
+ dma_free_coherent(dev, size, ring->virt, ring->addr);
+ dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
+ size);
+ return -EINVAL; /* Not a good error value, but distinct */
+ } else if (!ring->virt) {
+ return -ENOMEM;
+ }
+ ring->addr = addr;
+ ring->count = count;
+
+ return 0;
+}
+
+/* Free a previously-allocated ring */
+static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
+{
+ size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
+
+ dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
+}
+
+/* Allocate an available event ring id */
+static int gsi_evt_ring_id_alloc(struct gsi *gsi)
+{
+ u32 evt_ring_id;
+
+ if (gsi->event_bitmap == ~0U) {
+ dev_err(gsi->dev, "event rings exhausted\n");
+ return -ENOSPC;
+ }
+
+ evt_ring_id = ffz(gsi->event_bitmap);
+ gsi->event_bitmap |= BIT(evt_ring_id);
+
+ return (int)evt_ring_id;
+}
+
+/* Free a previously-allocated event ring id */
+static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
+{
+ gsi->event_bitmap &= ~BIT(evt_ring_id);
+}
+
+/* Ring a channel doorbell, reporting the first un-filled entry */
+void gsi_channel_doorbell(struct gsi_channel *channel)
+{
+ struct gsi_ring *tre_ring = &channel->tre_ring;
+ u32 channel_id = gsi_channel_id(channel);
+ struct gsi *gsi = channel->gsi;
+ u32 val;
+
+ /* Note: index *must* be used modulo the ring count here */
+ val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
+ iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
+}
+
+/* Consult hardware, move any newly completed transactions to completed list */
+static void gsi_channel_update(struct gsi_channel *channel)
+{
+ u32 evt_ring_id = channel->evt_ring_id;
+ struct gsi *gsi = channel->gsi;
+ struct gsi_evt_ring *evt_ring;
+ struct gsi_trans *trans;
+ struct gsi_ring *ring;
+ u32 offset;
+ u32 index;
+
+ evt_ring = &gsi->evt_ring[evt_ring_id];
+ ring = &evt_ring->ring;
+
+ /* See if there's anything new to process; if not, we're done. Note
+ * that index always refers to an entry *within* the event ring.
+ */
+ offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
+ index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
+ if (index == ring->index % ring->count)
+ return;
+
+ /* Get the transaction for the latest completed event. Take a
+ * reference to keep it from completing before we give the events
+ * for this and previous transactions back to the hardware.
+ */
+ trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
+ refcount_inc(&trans->refcount);
+
+ /* For RX channels, update each completed transaction with the number
+ * of bytes that were actually received. For TX channels, report
+ * the number of transactions and bytes this completion represents
+ * up the network stack.
+ */
+ if (channel->toward_ipa)
+ gsi_channel_tx_update(channel, trans);
+ else
+ gsi_evt_ring_rx_update(evt_ring, index);
+
+ gsi_trans_move_complete(trans);
+
+ /* Tell the hardware we've handled these events */
+ gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
+
+ gsi_trans_free(trans);
+}
+
+/**
+ * gsi_channel_poll_one() - Return a single completed transaction on a channel
+ * @channel: Channel to be polled
+ *
+ * @Return: Transaction pointer, or null if none are available
+ *
+ * This function returns the first entry on a channel's completed transaction
+ * list. If that list is empty, the hardware is consulted to determine
+ * whether any new transactions have completed. If so, they're moved to the
+ * completed list and the new first entry is returned. If there are no more
+ * completed transactions, a null pointer is returned.
+ */
+static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
+{
+ struct gsi_trans *trans;
+
+ /* Get the first transaction from the completed list */
+ trans = gsi_channel_trans_complete(channel);
+ if (!trans) {
+ /* List is empty; see if there's more to do */
+ gsi_channel_update(channel);
+ trans = gsi_channel_trans_complete(channel);
+ }
+
+ if (trans)
+ gsi_trans_move_polled(trans);
+
+ return trans;
+}
+
+/**
+ * gsi_channel_poll() - NAPI poll function for a channel
+ * @napi: NAPI structure for the channel
+ * @budget: Budget supplied by NAPI core
+
+ * @Return: Number of items polled (<= budget)
+ *
+ * Single transactions completed by hardware are polled until either
+ * the budget is exhausted, or there are no more. Each transaction
+ * polled is passed to gsi_trans_complete(), to perform remaining
+ * completion processing and retire/free the transaction.
+ */
+static int gsi_channel_poll(struct napi_struct *napi, int budget)
+{
+ struct gsi_channel *channel;
+ int count = 0;
+
+ channel = container_of(napi, struct gsi_channel, napi);
+ while (count < budget) {
+ struct gsi_trans *trans;
+
+ trans = gsi_channel_poll_one(channel);
+ if (!trans)
+ break;
+ gsi_trans_complete(trans);
+ }
+
+ if (count < budget) {
+ napi_complete(&channel->napi);
+ gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
+ }
+
+ return count;
+}
+
+/* The event bitmap represents which event ids are available for allocation.
+ * Set bits are not available, clear bits can be used. This function
+ * initializes the map so all events supported by the hardware are available,
+ * then precludes any reserved events from being allocated.
+ */
+static u32 gsi_event_bitmap_init(u32 evt_ring_max)
+{
+ u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
+
+ event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
+
+ return event_bitmap;
+}
+
+/* Setup function for event rings */
+static void gsi_evt_ring_setup(struct gsi *gsi)
+{
+ /* Nothing to do */
+}
+
+/* Inverse of gsi_evt_ring_setup() */
+static void gsi_evt_ring_teardown(struct gsi *gsi)
+{
+ /* Nothing to do */
+}
+
+/* Setup function for a single channel */
+static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
+ bool db_enable)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 evt_ring_id = channel->evt_ring_id;
+ int ret;
+
+ if (!channel->gsi)
+ return 0; /* Ignore uninitialized channels */
+
+ ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
+ if (ret)
+ return ret;
+
+ gsi_evt_ring_program(gsi, evt_ring_id);
+
+ ret = gsi_channel_alloc_command(gsi, channel_id);
+ if (ret)
+ goto err_evt_ring_de_alloc;
+
+ gsi_channel_program(channel, db_enable);
+
+ if (channel->toward_ipa)
+ netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
+ gsi_channel_poll, NAPI_POLL_WEIGHT);
+ else
+ netif_napi_add(&gsi->dummy_dev, &channel->napi,
+ gsi_channel_poll, NAPI_POLL_WEIGHT);
+
+ return 0;
+
+err_evt_ring_de_alloc:
+ /* We've done nothing with the event ring yet so don't reset */
+ gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_setup_one() */
+static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 evt_ring_id = channel->evt_ring_id;
+
+ if (!channel->gsi)
+ return; /* Ignore uninitialized channels */
+
+ netif_napi_del(&channel->napi);
+
+ gsi_channel_deprogram(channel);
+ gsi_channel_de_alloc_command(gsi, channel_id);
+ gsi_evt_ring_reset_command(gsi, evt_ring_id);
+ gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
+}
+
+static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
+ enum gsi_generic_cmd_opcode opcode)
+{
+ struct completion *completion = &gsi->completion;
+ u32 val;
+
+ val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
+ val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
+ val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
+
+ if (gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion))
+ return 0; /* Success! */
+
+ dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
+ opcode, channel_id);
+
+ return -ETIMEDOUT;
+}
+
+static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
+{
+ return gsi_generic_command(gsi, channel_id,
+ GSI_GENERIC_ALLOCATE_CHANNEL);
+}
+
+static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
+{
+ int ret;
+
+ ret = gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL);
+ if (ret)
+ dev_err(gsi->dev, "error %d halting modem channel %u\n",
+ ret, channel_id);
+}
+
+/* Setup function for channels */
+static int gsi_channel_setup(struct gsi *gsi, bool db_enable)
+{
+ u32 channel_id = 0;
+ u32 mask;
+ int ret;
+
+ gsi_evt_ring_setup(gsi);
+ gsi_irq_enable(gsi);
+
+ mutex_lock(&gsi->mutex);
+
+ do {
+ ret = gsi_channel_setup_one(gsi, channel_id, db_enable);
+ if (ret)
+ goto err_unwind;
+ } while (++channel_id < gsi->channel_count);
+
+ /* Make sure no channels were defined that hardware does not support */
+ while (channel_id < GSI_CHANNEL_COUNT_MAX) {
+ struct gsi_channel *channel = &gsi->channel[channel_id++];
+
+ if (!channel->gsi)
+ continue; /* Ignore uninitialized channels */
+
+ dev_err(gsi->dev, "channel %u not supported by hardware\n",
+ channel_id - 1);
+ channel_id = gsi->channel_count;
+ goto err_unwind;
+ }
+
+ /* Allocate modem channels if necessary */
+ mask = gsi->modem_channel_bitmap;
+ while (mask) {
+ u32 modem_channel_id = __ffs(mask);
+
+ ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
+ if (ret)
+ goto err_unwind_modem;
+
+ /* Clear bit from mask only after success (for unwind) */
+ mask ^= BIT(modem_channel_id);
+ }
+
+ mutex_unlock(&gsi->mutex);
+
+ return 0;
+
+err_unwind_modem:
+ /* Compute which modem channels need to be deallocated */
+ mask ^= gsi->modem_channel_bitmap;
+ while (mask) {
+ u32 channel_id = __fls(mask);
+
+ mask ^= BIT(channel_id);
+
+ gsi_modem_channel_halt(gsi, channel_id);
+ }
+
+err_unwind:
+ while (channel_id--)
+ gsi_channel_teardown_one(gsi, channel_id);
+
+ mutex_unlock(&gsi->mutex);
+
+ gsi_irq_disable(gsi);
+ gsi_evt_ring_teardown(gsi);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_setup() */
+static void gsi_channel_teardown(struct gsi *gsi)
+{
+ u32 mask = gsi->modem_channel_bitmap;
+ u32 channel_id;
+
+ mutex_lock(&gsi->mutex);
+
+ while (mask) {
+ u32 channel_id = __fls(mask);
+
+ mask ^= BIT(channel_id);
+
+ gsi_modem_channel_halt(gsi, channel_id);
+ }
+
+ channel_id = gsi->channel_count - 1;
+ do
+ gsi_channel_teardown_one(gsi, channel_id);
+ while (channel_id--);
+
+ mutex_unlock(&gsi->mutex);
+
+ gsi_irq_disable(gsi);
+ gsi_evt_ring_teardown(gsi);
+}
+
+/* Setup function for GSI. GSI firmware must be loaded and initialized */
+int gsi_setup(struct gsi *gsi, bool db_enable)
+{
+ u32 val;
+
+ /* Here is where we first touch the GSI hardware */
+ val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
+ if (!(val & ENABLED_FMASK)) {
+ dev_err(gsi->dev, "GSI has not been enabled\n");
+ return -EIO;
+ }
+
+ val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
+
+ gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
+ if (!gsi->channel_count) {
+ dev_err(gsi->dev, "GSI reports zero channels supported\n");
+ return -EINVAL;
+ }
+ if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
+ dev_warn(gsi->dev,
+ "limiting to %u channels (hardware supports %u)\n",
+ GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
+ gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
+ }
+
+ gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
+ if (!gsi->evt_ring_count) {
+ dev_err(gsi->dev, "GSI reports zero event rings supported\n");
+ return -EINVAL;
+ }
+ if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
+ dev_warn(gsi->dev,
+ "limiting to %u event rings (hardware supports %u)\n",
+ GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
+ gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
+ }
+
+ /* Initialize the error log */
+ iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
+
+ /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
+ iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
+
+ return gsi_channel_setup(gsi, db_enable);
+}
+
+/* Inverse of gsi_setup() */
+void gsi_teardown(struct gsi *gsi)
+{
+ gsi_channel_teardown(gsi);
+}
+
+/* Initialize a channel's event ring */
+static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
+{
+ struct gsi *gsi = channel->gsi;
+ struct gsi_evt_ring *evt_ring;
+ int ret;
+
+ ret = gsi_evt_ring_id_alloc(gsi);
+ if (ret < 0)
+ return ret;
+ channel->evt_ring_id = ret;
+
+ evt_ring = &gsi->evt_ring[channel->evt_ring_id];
+ evt_ring->channel = channel;
+
+ ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
+ if (!ret)
+ return 0; /* Success! */
+
+ dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
+ ret, gsi_channel_id(channel));
+
+ gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_evt_ring_init() */
+static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
+{
+ u32 evt_ring_id = channel->evt_ring_id;
+ struct gsi *gsi = channel->gsi;
+ struct gsi_evt_ring *evt_ring;
+
+ evt_ring = &gsi->evt_ring[evt_ring_id];
+ gsi_ring_free(gsi, &evt_ring->ring);
+ gsi_evt_ring_id_free(gsi, evt_ring_id);
+}
+
+/* Init function for event rings */
+static void gsi_evt_ring_init(struct gsi *gsi)
+{
+ u32 evt_ring_id = 0;
+
+ gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
+ gsi->event_enable_bitmap = 0;
+ do
+ init_completion(&gsi->evt_ring[evt_ring_id].completion);
+ while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
+}
+
+/* Inverse of gsi_evt_ring_init() */
+static void gsi_evt_ring_exit(struct gsi *gsi)
+{
+ /* Nothing to do */
+}
+
+static bool gsi_channel_data_valid(struct gsi *gsi,
+ const struct ipa_gsi_endpoint_data *data)
+{
+#ifdef IPA_VALIDATION
+ u32 channel_id = data->channel_id;
+ struct device *dev = gsi->dev;
+
+ /* Make sure channel ids are in the range driver supports */
+ if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
+ dev_err(dev, "bad channel id %u (must be less than %u)\n",
+ channel_id, GSI_CHANNEL_COUNT_MAX);
+ return false;
+ }
+
+ if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
+ dev_err(dev, "bad EE id %u (AP or modem)\n", data->ee_id);
+ return false;
+ }
+
+ if (!data->channel.tlv_count ||
+ data->channel.tlv_count > GSI_TLV_MAX) {
+ dev_err(dev, "channel %u bad tlv_count %u (must be 1..%u)\n",
+ channel_id, data->channel.tlv_count, GSI_TLV_MAX);
+ return false;
+ }
+
+ /* We have to allow at least one maximally-sized transaction to
+ * be outstanding (which would use tlv_count TREs). Given how
+ * gsi_channel_tre_max() is computed, tre_count has to be almost
+ * twice the TLV FIFO size to satisfy this requirement.
+ */
+ if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
+ dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
+ channel_id, data->channel.tlv_count,
+ data->channel.tre_count);
+ return false;
+ }
+
+ if (!is_power_of_2(data->channel.tre_count)) {
+ dev_err(dev, "channel %u bad tre_count %u (not power of 2)\n",
+ channel_id, data->channel.tre_count);
+ return false;
+ }
+
+ if (!is_power_of_2(data->channel.event_count)) {
+ dev_err(dev, "channel %u bad event_count %u (not power of 2)\n",
+ channel_id, data->channel.event_count);
+ return false;
+ }
+#endif /* IPA_VALIDATION */
+
+ return true;
+}
+
+/* Init function for a single channel */
+static int gsi_channel_init_one(struct gsi *gsi,
+ const struct ipa_gsi_endpoint_data *data,
+ bool command, bool prefetch)
+{
+ struct gsi_channel *channel;
+ u32 tre_count;
+ int ret;
+
+ if (!gsi_channel_data_valid(gsi, data))
+ return -EINVAL;
+
+ /* Worst case we need an event for every outstanding TRE */
+ if (data->channel.tre_count > data->channel.event_count) {
+ dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
+ data->channel_id, data->channel.tre_count);
+ tre_count = data->channel.event_count;
+ } else {
+ tre_count = data->channel.tre_count;
+ }
+
+ channel = &gsi->channel[data->channel_id];
+ memset(channel, 0, sizeof(*channel));
+
+ channel->gsi = gsi;
+ channel->toward_ipa = data->toward_ipa;
+ channel->command = command;
+ channel->use_prefetch = command && prefetch;
+ channel->tlv_count = data->channel.tlv_count;
+ channel->tre_count = tre_count;
+ channel->event_count = data->channel.event_count;
+ init_completion(&channel->completion);
+
+ ret = gsi_channel_evt_ring_init(channel);
+ if (ret)
+ goto err_clear_gsi;
+
+ ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
+ if (ret) {
+ dev_err(gsi->dev, "error %d allocating channel %u ring\n",
+ ret, data->channel_id);
+ goto err_channel_evt_ring_exit;
+ }
+
+ ret = gsi_channel_trans_init(gsi, data->channel_id);
+ if (ret)
+ goto err_ring_free;
+
+ if (command) {
+ u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
+
+ ret = ipa_cmd_pool_init(channel, tre_max);
+ }
+ if (!ret)
+ return 0; /* Success! */
+
+ gsi_channel_trans_exit(channel);
+err_ring_free:
+ gsi_ring_free(gsi, &channel->tre_ring);
+err_channel_evt_ring_exit:
+ gsi_channel_evt_ring_exit(channel);
+err_clear_gsi:
+ channel->gsi = NULL; /* Mark it not (fully) initialized */
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_init_one() */
+static void gsi_channel_exit_one(struct gsi_channel *channel)
+{
+ if (!channel->gsi)
+ return; /* Ignore uninitialized channels */
+
+ if (channel->command)
+ ipa_cmd_pool_exit(channel);
+ gsi_channel_trans_exit(channel);
+ gsi_ring_free(channel->gsi, &channel->tre_ring);
+ gsi_channel_evt_ring_exit(channel);
+}
+
+/* Init function for channels */
+static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count,
+ const struct ipa_gsi_endpoint_data *data,
+ bool modem_alloc)
+{
+ int ret = 0;
+ u32 i;
+
+ gsi_evt_ring_init(gsi);
+
+ /* The endpoint data array is indexed by endpoint name */
+ for (i = 0; i < count; i++) {
+ bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
+
+ if (ipa_gsi_endpoint_data_empty(&data[i]))
+ continue; /* Skip over empty slots */
+
+ /* Mark modem channels to be allocated (hardware workaround) */
+ if (data[i].ee_id == GSI_EE_MODEM) {
+ if (modem_alloc)
+ gsi->modem_channel_bitmap |=
+ BIT(data[i].channel_id);
+ continue;
+ }
+
+ ret = gsi_channel_init_one(gsi, &data[i], command, prefetch);
+ if (ret)
+ goto err_unwind;
+ }
+
+ return ret;
+
+err_unwind:
+ while (i--) {
+ if (ipa_gsi_endpoint_data_empty(&data[i]))
+ continue;
+ if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
+ gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
+ continue;
+ }
+ gsi_channel_exit_one(&gsi->channel[data->channel_id]);
+ }
+ gsi_evt_ring_exit(gsi);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_init() */
+static void gsi_channel_exit(struct gsi *gsi)
+{
+ u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
+
+ do
+ gsi_channel_exit_one(&gsi->channel[channel_id]);
+ while (channel_id--);
+ gsi->modem_channel_bitmap = 0;
+
+ gsi_evt_ring_exit(gsi);
+}
+
+/* Init function for GSI. GSI hardware does not need to be "ready" */
+int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
+ u32 count, const struct ipa_gsi_endpoint_data *data,
+ bool modem_alloc)
+{
+ struct resource *res;
+ resource_size_t size;
+ unsigned int irq;
+ int ret;
+
+ gsi_validate_build();
+
+ gsi->dev = &pdev->dev;
+
+ /* The GSI layer performs NAPI on all endpoints. NAPI requires a
+ * network device structure, but the GSI layer does not have one,
+ * so we must create a dummy network device for this purpose.
+ */
+ init_dummy_netdev(&gsi->dummy_dev);
+
+ /* Get the GSI IRQ and request for it to wake the system */
+ ret = platform_get_irq_byname(pdev, "gsi");
+ if (ret <= 0) {
+ dev_err(gsi->dev,
+ "DT error %d getting \"gsi\" IRQ property\n", ret);
+ return ret ? : -EINVAL;
+ }
+ irq = ret;
+
+ ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
+ if (ret) {
+ dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
+ return ret;
+ }
+ gsi->irq = irq;
+
+ ret = enable_irq_wake(gsi->irq);
+ if (ret)
+ dev_warn(gsi->dev, "error %d enabling gsi wake irq\n", ret);
+ gsi->irq_wake_enabled = !ret;
+
+ /* Get GSI memory range and map it */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
+ if (!res) {
+ dev_err(gsi->dev,
+ "DT error getting \"gsi\" memory property\n");
+ ret = -ENODEV;
+ goto err_disable_irq_wake;
+ }
+
+ size = resource_size(res);
+ if (res->start > U32_MAX || size > U32_MAX - res->start) {
+ dev_err(gsi->dev, "DT memory resource \"gsi\" out of range\n");
+ ret = -EINVAL;
+ goto err_disable_irq_wake;
+ }
+
+ gsi->virt = ioremap(res->start, size);
+ if (!gsi->virt) {
+ dev_err(gsi->dev, "unable to remap \"gsi\" memory\n");
+ ret = -ENOMEM;
+ goto err_disable_irq_wake;
+ }
+
+ ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
+ if (ret)
+ goto err_iounmap;
+
+ mutex_init(&gsi->mutex);
+ init_completion(&gsi->completion);
+
+ return 0;
+
+err_iounmap:
+ iounmap(gsi->virt);
+err_disable_irq_wake:
+ if (gsi->irq_wake_enabled)
+ (void)disable_irq_wake(gsi->irq);
+ free_irq(gsi->irq, gsi);
+
+ return ret;
+}
+
+/* Inverse of gsi_init() */
+void gsi_exit(struct gsi *gsi)
+{
+ mutex_destroy(&gsi->mutex);
+ gsi_channel_exit(gsi);
+ if (gsi->irq_wake_enabled)
+ (void)disable_irq_wake(gsi->irq);
+ free_irq(gsi->irq, gsi);
+ iounmap(gsi->virt);
+}
+
+/* The maximum number of outstanding TREs on a channel. This limits
+ * a channel's maximum number of transactions outstanding (worst case
+ * is one TRE per transaction).
+ *
+ * The absolute limit is the number of TREs in the channel's TRE ring,
+ * and in theory we should be able use all of them. But in practice,
+ * doing that led to the hardware reporting exhaustion of event ring
+ * slots for writing completion information. So the hardware limit
+ * would be (tre_count - 1).
+ *
+ * We reduce it a bit further though. Transaction resource pools are
+ * sized to be a little larger than this maximum, to allow resource
+ * allocations to always be contiguous. The number of entries in a
+ * TRE ring buffer is a power of 2, and the extra resources in a pool
+ * tends to nearly double the memory allocated for it. Reducing the
+ * maximum number of outstanding TREs allows the number of entries in
+ * a pool to avoid crossing that power-of-2 boundary, and this can
+ * substantially reduce pool memory requirements. The number we
+ * reduce it by matches the number added in gsi_trans_pool_init().
+ */
+u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ /* Hardware limit is channel->tre_count - 1 */
+ return channel->tre_count - (channel->tlv_count - 1);
+}
+
+/* Returns the maximum number of TREs in a single transaction for a channel */
+u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ return channel->tlv_count;
+}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
new file mode 100644
index 000000000000..0698ff1ae7a6
--- /dev/null
+++ b/drivers/net/ipa/gsi.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _GSI_H_
+#define _GSI_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+/* Maximum number of channels and event rings supported by the driver */
+#define GSI_CHANNEL_COUNT_MAX 17
+#define GSI_EVT_RING_COUNT_MAX 13
+
+/* Maximum TLV FIFO size for a channel; 64 here is arbitrary (and high) */
+#define GSI_TLV_MAX 64
+
+struct device;
+struct scatterlist;
+struct platform_device;
+
+struct gsi;
+struct gsi_trans;
+struct gsi_channel_data;
+struct ipa_gsi_endpoint_data;
+
+/* Execution environment IDs */
+enum gsi_ee_id {
+ GSI_EE_AP = 0,
+ GSI_EE_MODEM = 1,
+ GSI_EE_UC = 2,
+ GSI_EE_TZ = 3,
+};
+
+struct gsi_ring {
+ void *virt; /* ring array base address */
+ dma_addr_t addr; /* primarily low 32 bits used */
+ u32 count; /* number of elements in ring */
+
+ /* The ring index value indicates the next "open" entry in the ring.
+ *
+ * A channel ring consists of TRE entries filled by the AP and passed
+ * to the hardware for processing. For a channel ring, the ring index
+ * identifies the next unused entry to be filled by the AP.
+ *
+ * An event ring consists of event structures filled by the hardware
+ * and passed to the AP. For event rings, the ring index identifies
+ * the next ring entry that is not known to have been filled by the
+ * hardware.
+ */
+ u32 index;
+};
+
+/* Transactions use several resources that can be allocated dynamically
+ * but taken from a fixed-size pool. The number of elements required for
+ * the pool is limited by the total number of TREs that can be outstanding.
+ *
+ * If sufficient TREs are available to reserve for a transaction,
+ * allocation from these pools is guaranteed to succeed. Furthermore,
+ * these resources are implicitly freed whenever the TREs in the
+ * transaction they're associated with are released.
+ *
+ * The result of a pool allocation of multiple elements is always
+ * contiguous.
+ */
+struct gsi_trans_pool {
+ void *base; /* base address of element pool */
+ u32 count; /* # elements in the pool */
+ u32 free; /* next free element in pool (modulo) */
+ u32 size; /* size (bytes) of an element */
+ u32 max_alloc; /* max allocation request */
+ dma_addr_t addr; /* DMA address if DMA pool (or 0) */
+};
+
+struct gsi_trans_info {
+ atomic_t tre_avail; /* TREs available for allocation */
+ struct gsi_trans_pool pool; /* transaction pool */
+ struct gsi_trans_pool sg_pool; /* scatterlist pool */
+ struct gsi_trans_pool cmd_pool; /* command payload DMA pool */
+ struct gsi_trans_pool info_pool;/* command information pool */
+ struct gsi_trans **map; /* TRE -> transaction map */
+
+ spinlock_t spinlock; /* protects updates to the lists */
+ struct list_head alloc; /* allocated, not committed */
+ struct list_head pending; /* committed, awaiting completion */
+ struct list_head complete; /* completed, awaiting poll */
+ struct list_head polled; /* returned by gsi_channel_poll_one() */
+};
+
+/* Hardware values signifying the state of a channel */
+enum gsi_channel_state {
+ GSI_CHANNEL_STATE_NOT_ALLOCATED = 0x0,
+ GSI_CHANNEL_STATE_ALLOCATED = 0x1,
+ GSI_CHANNEL_STATE_STARTED = 0x2,
+ GSI_CHANNEL_STATE_STOPPED = 0x3,
+ GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4,
+ GSI_CHANNEL_STATE_ERROR = 0xf,
+};
+
+/* We only care about channels between IPA and AP */
+struct gsi_channel {
+ struct gsi *gsi;
+ bool toward_ipa;
+ bool command; /* AP command TX channel or not */
+ bool use_prefetch; /* use prefetch (else escape buf) */
+
+ u8 tlv_count; /* # entries in TLV FIFO */
+ u16 tre_count;
+ u16 event_count;
+
+ struct completion completion; /* signals channel state changes */
+ enum gsi_channel_state state;
+
+ struct gsi_ring tre_ring;
+ u32 evt_ring_id;
+
+ u64 byte_count; /* total # bytes transferred */
+ u64 trans_count; /* total # transactions */
+ /* The following counts are used only for TX endpoints */
+ u64 queued_byte_count; /* last reported queued byte count */
+ u64 queued_trans_count; /* ...and queued trans count */
+ u64 compl_byte_count; /* last reported completed byte count */
+ u64 compl_trans_count; /* ...and completed trans count */
+
+ struct gsi_trans_info trans_info;
+
+ struct napi_struct napi;
+};
+
+/* Hardware values signifying the state of an event ring */
+enum gsi_evt_ring_state {
+ GSI_EVT_RING_STATE_NOT_ALLOCATED = 0x0,
+ GSI_EVT_RING_STATE_ALLOCATED = 0x1,
+ GSI_EVT_RING_STATE_ERROR = 0xf,
+};
+
+struct gsi_evt_ring {
+ struct gsi_channel *channel;
+ struct completion completion; /* signals event ring state changes */
+ enum gsi_evt_ring_state state;
+ struct gsi_ring ring;
+};
+
+struct gsi {
+ struct device *dev; /* Same as IPA device */
+ struct net_device dummy_dev; /* needed for NAPI */
+ void __iomem *virt;
+ u32 irq;
+ bool irq_wake_enabled;
+ u32 channel_count;
+ u32 evt_ring_count;
+ struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
+ struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
+ u32 event_bitmap;
+ u32 event_enable_bitmap;
+ u32 modem_channel_bitmap;
+ struct completion completion; /* for global EE commands */
+ struct mutex mutex; /* protects commands, programming */
+};
+
+/**
+ * gsi_setup() - Set up the GSI subsystem
+ * @gsi: Address of GSI structure embedded in an IPA structure
+ * @db_enable: Whether to use the GSI doorbell engine
+ *
+ * @Return: 0 if successful, or a negative error code
+ *
+ * Performs initialization that must wait until the GSI hardware is
+ * ready (including firmware loaded).
+ */
+int gsi_setup(struct gsi *gsi, bool db_enable);
+
+/**
+ * gsi_teardown() - Tear down GSI subsystem
+ * @gsi: GSI address previously passed to a successful gsi_setup() call
+ */
+void gsi_teardown(struct gsi *gsi);
+
+/**
+ * gsi_channel_tre_max() - Channel maximum number of in-flight TREs
+ * @gsi: GSI pointer
+ * @channel_id: Channel whose limit is to be returned
+ *
+ * @Return: The maximum number of TREs oustanding on the channel
+ */
+u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_trans_tre_max() - Maximum TREs in a single transaction
+ * @gsi: GSI pointer
+ * @channel_id: Channel whose limit is to be returned
+ *
+ * @Return: The maximum TRE count per transaction on the channel
+ */
+u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_start() - Start an allocated GSI channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel to start
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int gsi_channel_start(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_stop() - Stop a started GSI channel
+ * @gsi: GSI pointer returned by gsi_setup()
+ * @channel_id: Channel to stop
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_reset() - Reset an allocated GSI channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel to be reset
+ * @db_enable: Whether doorbell engine should be enabled
+ *
+ * Reset a channel and reconfigure it. The @db_enable flag indicates
+ * whether the doorbell engine will be enabled following reconfiguration.
+ *
+ * GSI hardware relinquishes ownership of all pending receive buffer
+ * transactions and they will complete with their cancelled flag set.
+ */
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool db_enable);
+
+int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop);
+int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
+
+/**
+ * gsi_init() - Initialize the GSI subsystem
+ * @gsi: Address of GSI structure embedded in an IPA structure
+ * @pdev: IPA platform device
+ *
+ * @Return: 0 if successful, or a negative error code
+ *
+ * Early stage initialization of the GSI subsystem, performing tasks
+ * that can be done before the GSI hardware is ready to use.
+ */
+int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
+ u32 count, const struct ipa_gsi_endpoint_data *data,
+ bool modem_alloc);
+
+/**
+ * gsi_exit() - Exit the GSI subsystem
+ * @gsi: GSI address previously passed to a successful gsi_init() call
+ */
+void gsi_exit(struct gsi *gsi);
+
+#endif /* _GSI_H_ */
diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
new file mode 100644
index 000000000000..b57d0198ebc1
--- /dev/null
+++ b/drivers/net/ipa/gsi_private.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _GSI_PRIVATE_H_
+#define _GSI_PRIVATE_H_
+
+/* === Only "gsi.c" and "gsi_trans.c" should include this file === */
+
+#include <linux/types.h>
+
+struct gsi_trans;
+struct gsi_ring;
+struct gsi_channel;
+
+#define GSI_RING_ELEMENT_SIZE 16 /* bytes */
+
+/* Return the entry that follows one provided in a transaction pool */
+void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element);
+
+/**
+ * gsi_trans_move_complete() - Mark a GSI transaction completed
+ * @trans: Transaction to commit
+ */
+void gsi_trans_move_complete(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_move_polled() - Mark a transaction polled
+ * @trans: Transaction to update
+ */
+void gsi_trans_move_polled(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_complete() - Complete a GSI transaction
+ * @trans: Transaction to complete
+ *
+ * Marks a transaction complete (including freeing it).
+ */
+void gsi_trans_complete(struct gsi_trans *trans);
+
+/**
+ * gsi_channel_trans_mapped() - Return a transaction mapped to a TRE index
+ * @channel: Channel associated with the transaction
+ * @index: Index of the TRE having a transaction
+ *
+ * @Return: The GSI transaction pointer associated with the TRE index
+ */
+struct gsi_trans *gsi_channel_trans_mapped(struct gsi_channel *channel,
+ u32 index);
+
+/**
+ * gsi_channel_trans_complete() - Return a channel's next completed transaction
+ * @channel: Channel whose next transaction is to be returned
+ *
+ * @Return: The next completed transaction, or NULL if nothing new
+ */
+struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel);
+
+/**
+ * gsi_channel_trans_cancel_pending() - Cancel pending transactions
+ * @channel: Channel whose pending transactions should be cancelled
+ *
+ * Cancel all pending transactions on a channel. These are transactions
+ * that have been committed but not yet completed. This is required when
+ * the channel gets reset. At that time all pending transactions will be
+ * marked as cancelled.
+ *
+ * NOTE: Transactions already complete at the time of this call are
+ * unaffected.
+ */
+void gsi_channel_trans_cancel_pending(struct gsi_channel *channel);
+
+/**
+ * gsi_channel_trans_init() - Initialize a channel's GSI transaction info
+ * @gsi: GSI pointer
+ * @channel_id: Channel number
+ *
+ * @Return: 0 if successful, or -ENOMEM on allocation failure
+ *
+ * Creates and sets up information for managing transactions on a channel
+ */
+int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id);
+
+/**
+ * gsi_channel_trans_exit() - Inverse of gsi_channel_trans_init()
+ * @channel: Channel whose transaction information is to be cleaned up
+ */
+void gsi_channel_trans_exit(struct gsi_channel *channel);
+
+/**
+ * gsi_channel_doorbell() - Ring a channel's doorbell
+ * @channel: Channel whose doorbell should be rung
+ *
+ * Rings a channel's doorbell to inform the GSI hardware that new
+ * transactions (TREs, really) are available for it to process.
+ */
+void gsi_channel_doorbell(struct gsi_channel *channel);
+
+/**
+ * gsi_ring_virt() - Return virtual address for a ring entry
+ * @ring: Ring whose address is to be translated
+ * @addr: Index (slot number) of entry
+ */
+void *gsi_ring_virt(struct gsi_ring *ring, u32 index);
+
+/**
+ * gsi_channel_tx_queued() - Report the number of bytes queued to hardware
+ * @channel: Channel whose bytes have been queued
+ *
+ * This arranges for the the number of transactions and bytes for
+ * transfer that have been queued to hardware to be reported. It
+ * passes this information up the network stack so it can be used to
+ * throttle transmissions.
+ */
+void gsi_channel_tx_queued(struct gsi_channel *channel);
+
+#endif /* _GSI_PRIVATE_H_ */
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
new file mode 100644
index 000000000000..7613b9cc7cf6
--- /dev/null
+++ b/drivers/net/ipa/gsi_reg.h
@@ -0,0 +1,417 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _GSI_REG_H_
+#define _GSI_REG_H_
+
+/* === Only "gsi.c" should include this file === */
+
+#include <linux/bits.h>
+
+/**
+ * DOC: GSI Registers
+ *
+ * GSI registers are located within the "gsi" address space defined by Device
+ * Tree. The offset of each register within that space is specified by
+ * symbols defined below. The GSI address space is mapped to virtual memory
+ * space in gsi_init(). All GSI registers are 32 bits wide.
+ *
+ * Each register type is duplicated for a number of instances of something.
+ * For example, each GSI channel has its own set of registers defining its
+ * configuration. The offset to a channel's set of registers is computed
+ * based on a "base" offset plus an additional "stride" amount computed
+ * from the channel's ID. For such registers, the offset is computed by a
+ * function-like macro that takes a parameter used in the computation.
+ *
+ * The offset of a register dependent on execution environment is computed
+ * by a macro that is supplied a parameter "ee". The "ee" value is a member
+ * of the gsi_ee_id enumerated type.
+ *
+ * The offset of a channel register is computed by a macro that is supplied a
+ * parameter "ch". The "ch" value is a channel id whose maximum value is 30
+ * (though the actual limit is hardware-dependent).
+ *
+ * The offset of an event register is computed by a macro that is supplied a
+ * parameter "ev". The "ev" value is an event id whose maximum value is 15
+ * (though the actual limit is hardware-dependent).
+ */
+
+#define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \
+ GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \
+ (0x0000c018 + 0x1000 * (ee))
+
+#define GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET \
+ GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(ee) \
+ (0x0000c01c + 0x1000 * (ee))
+
+#define GSI_INTER_EE_SRC_CH_IRQ_CLR_OFFSET \
+ GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(ee) \
+ (0x0000c028 + 0x1000 * (ee))
+
+#define GSI_INTER_EE_SRC_EV_CH_IRQ_CLR_OFFSET \
+ GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \
+ (0x0000c02c + 0x1000 * (ee))
+
+#define GSI_CH_C_CNTXT_0_OFFSET(ch) \
+ GSI_EE_N_CH_C_CNTXT_0_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_CNTXT_0_OFFSET(ch, ee) \
+ (0x0001c000 + 0x4000 * (ee) + 0x80 * (ch))
+#define CHTYPE_PROTOCOL_FMASK GENMASK(2, 0)
+#define CHTYPE_DIR_FMASK GENMASK(3, 3)
+#define EE_FMASK GENMASK(7, 4)
+#define CHID_FMASK GENMASK(12, 8)
+/* The next field is present for GSI v2.0 and above */
+#define CHTYPE_PROTOCOL_MSB_FMASK GENMASK(13, 13)
+#define ERINDEX_FMASK GENMASK(18, 14)
+#define CHSTATE_FMASK GENMASK(23, 20)
+#define ELEMENT_SIZE_FMASK GENMASK(31, 24)
+
+#define GSI_CH_C_CNTXT_1_OFFSET(ch) \
+ GSI_EE_N_CH_C_CNTXT_1_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_CNTXT_1_OFFSET(ch, ee) \
+ (0x0001c004 + 0x4000 * (ee) + 0x80 * (ch))
+#define R_LENGTH_FMASK GENMASK(15, 0)
+
+#define GSI_CH_C_CNTXT_2_OFFSET(ch) \
+ GSI_EE_N_CH_C_CNTXT_2_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_CNTXT_2_OFFSET(ch, ee) \
+ (0x0001c008 + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_CH_C_CNTXT_3_OFFSET(ch) \
+ GSI_EE_N_CH_C_CNTXT_3_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_CNTXT_3_OFFSET(ch, ee) \
+ (0x0001c00c + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_CH_C_QOS_OFFSET(ch) \
+ GSI_EE_N_CH_C_QOS_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_QOS_OFFSET(ch, ee) \
+ (0x0001c05c + 0x4000 * (ee) + 0x80 * (ch))
+#define WRR_WEIGHT_FMASK GENMASK(3, 0)
+#define MAX_PREFETCH_FMASK GENMASK(8, 8)
+#define USE_DB_ENG_FMASK GENMASK(9, 9)
+/* The next field is present for GSI v2.0 and above */
+#define USE_ESCAPE_BUF_ONLY_FMASK GENMASK(10, 10)
+
+#define GSI_CH_C_SCRATCH_0_OFFSET(ch) \
+ GSI_EE_N_CH_C_SCRATCH_0_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_SCRATCH_0_OFFSET(ch, ee) \
+ (0x0001c060 + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_CH_C_SCRATCH_1_OFFSET(ch) \
+ GSI_EE_N_CH_C_SCRATCH_1_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_SCRATCH_1_OFFSET(ch, ee) \
+ (0x0001c064 + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_CH_C_SCRATCH_2_OFFSET(ch) \
+ GSI_EE_N_CH_C_SCRATCH_2_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_SCRATCH_2_OFFSET(ch, ee) \
+ (0x0001c068 + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_CH_C_SCRATCH_3_OFFSET(ch) \
+ GSI_EE_N_CH_C_SCRATCH_3_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_SCRATCH_3_OFFSET(ch, ee) \
+ (0x0001c06c + 0x4000 * (ee) + 0x80 * (ch))
+
+#define GSI_EV_CH_E_CNTXT_0_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET(ev, ee) \
+ (0x0001d000 + 0x4000 * (ee) + 0x80 * (ev))
+#define EV_CHTYPE_FMASK GENMASK(3, 0)
+#define EV_EE_FMASK GENMASK(7, 4)
+#define EV_EVCHID_FMASK GENMASK(15, 8)
+#define EV_INTYPE_FMASK GENMASK(16, 16)
+#define EV_CHSTATE_FMASK GENMASK(23, 20)
+#define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24)
+
+#define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET(ev, ee) \
+ (0x0001d004 + 0x4000 * (ee) + 0x80 * (ev))
+#define EV_R_LENGTH_FMASK GENMASK(15, 0)
+
+#define GSI_EV_CH_E_CNTXT_2_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_2_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_2_OFFSET(ev, ee) \
+ (0x0001d008 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_3_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_3_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_3_OFFSET(ev, ee) \
+ (0x0001d00c + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_4_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_4_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_4_OFFSET(ev, ee) \
+ (0x0001d010 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_8_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_8_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_8_OFFSET(ev, ee) \
+ (0x0001d020 + 0x4000 * (ee) + 0x80 * (ev))
+#define MODT_FMASK GENMASK(15, 0)
+#define MODC_FMASK GENMASK(23, 16)
+#define MOD_CNT_FMASK GENMASK(31, 24)
+
+#define GSI_EV_CH_E_CNTXT_9_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_9_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_9_OFFSET(ev, ee) \
+ (0x0001d024 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_10_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_10_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_10_OFFSET(ev, ee) \
+ (0x0001d028 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_11_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_11_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_11_OFFSET(ev, ee) \
+ (0x0001d02c + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_12_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_12_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_12_OFFSET(ev, ee) \
+ (0x0001d030 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_CNTXT_13_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_CNTXT_13_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_CNTXT_13_OFFSET(ev, ee) \
+ (0x0001d034 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_SCRATCH_0_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_SCRATCH_0_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_SCRATCH_0_OFFSET(ev, ee) \
+ (0x0001d048 + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_EV_CH_E_SCRATCH_1_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_SCRATCH_1_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_SCRATCH_1_OFFSET(ev, ee) \
+ (0x0001d04c + 0x4000 * (ee) + 0x80 * (ev))
+
+#define GSI_CH_C_DOORBELL_0_OFFSET(ch) \
+ GSI_EE_N_CH_C_DOORBELL_0_OFFSET((ch), GSI_EE_AP)
+#define GSI_EE_N_CH_C_DOORBELL_0_OFFSET(ch, ee) \
+ (0x0001e000 + 0x4000 * (ee) + 0x08 * (ch))
+
+#define GSI_EV_CH_E_DOORBELL_0_OFFSET(ev) \
+ GSI_EE_N_EV_CH_E_DOORBELL_0_OFFSET((ev), GSI_EE_AP)
+#define GSI_EE_N_EV_CH_E_DOORBELL_0_OFFSET(ev, ee) \
+ (0x0001e100 + 0x4000 * (ee) + 0x08 * (ev))
+
+#define GSI_GSI_STATUS_OFFSET \
+ GSI_EE_N_GSI_STATUS_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_GSI_STATUS_OFFSET(ee) \
+ (0x0001f000 + 0x4000 * (ee))
+#define ENABLED_FMASK GENMASK(0, 0)
+
+#define GSI_CH_CMD_OFFSET \
+ GSI_EE_N_CH_CMD_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CH_CMD_OFFSET(ee) \
+ (0x0001f008 + 0x4000 * (ee))
+#define CH_CHID_FMASK GENMASK(7, 0)
+#define CH_OPCODE_FMASK GENMASK(31, 24)
+
+#define GSI_EV_CH_CMD_OFFSET \
+ GSI_EE_N_EV_CH_CMD_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_EV_CH_CMD_OFFSET(ee) \
+ (0x0001f010 + 0x4000 * (ee))
+#define EV_CHID_FMASK GENMASK(7, 0)
+#define EV_OPCODE_FMASK GENMASK(31, 24)
+
+#define GSI_GENERIC_CMD_OFFSET \
+ GSI_EE_N_GENERIC_CMD_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_GENERIC_CMD_OFFSET(ee) \
+ (0x0001f018 + 0x4000 * (ee))
+#define GENERIC_OPCODE_FMASK GENMASK(4, 0)
+#define GENERIC_CHID_FMASK GENMASK(9, 5)
+#define GENERIC_EE_FMASK GENMASK(13, 10)
+
+#define GSI_GSI_HW_PARAM_2_OFFSET \
+ GSI_EE_N_GSI_HW_PARAM_2_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_GSI_HW_PARAM_2_OFFSET(ee) \
+ (0x0001f040 + 0x4000 * (ee))
+#define IRAM_SIZE_FMASK GENMASK(2, 0)
+#define IRAM_SIZE_ONE_KB_FVAL 0
+#define IRAM_SIZE_TWO_KB_FVAL 1
+/* The next two values are available for GSI v2.0 and above */
+#define IRAM_SIZE_TWO_N_HALF_KB_FVAL 2
+#define IRAM_SIZE_THREE_KB_FVAL 3
+#define NUM_CH_PER_EE_FMASK GENMASK(7, 3)
+#define NUM_EV_PER_EE_FMASK GENMASK(12, 8)
+#define GSI_CH_PEND_TRANSLATE_FMASK GENMASK(13, 13)
+#define GSI_CH_FULL_LOGIC_FMASK GENMASK(14, 14)
+/* Fields below are present for GSI v2.0 and above */
+#define GSI_USE_SDMA_FMASK GENMASK(15, 15)
+#define GSI_SDMA_N_INT_FMASK GENMASK(18, 16)
+#define GSI_SDMA_MAX_BURST_FMASK GENMASK(26, 19)
+#define GSI_SDMA_N_IOVEC_FMASK GENMASK(29, 27)
+/* Fields below are present for GSI v2.2 and above */
+#define GSI_USE_RD_WR_ENG_FMASK GENMASK(30, 30)
+#define GSI_USE_INTER_EE_FMASK GENMASK(31, 31)
+
+#define GSI_CNTXT_TYPE_IRQ_OFFSET \
+ GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(ee) \
+ (0x0001f080 + 0x4000 * (ee))
+#define CH_CTRL_FMASK GENMASK(0, 0)
+#define EV_CTRL_FMASK GENMASK(1, 1)
+#define GLOB_EE_FMASK GENMASK(2, 2)
+#define IEOB_FMASK GENMASK(3, 3)
+#define INTER_EE_CH_CTRL_FMASK GENMASK(4, 4)
+#define INTER_EE_EV_CTRL_FMASK GENMASK(5, 5)
+#define GENERAL_FMASK GENMASK(6, 6)
+
+#define GSI_CNTXT_TYPE_IRQ_MSK_OFFSET \
+ GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \
+ (0x0001f088 + 0x4000 * (ee))
+#define MSK_CH_CTRL_FMASK GENMASK(0, 0)
+#define MSK_EV_CTRL_FMASK GENMASK(1, 1)
+#define MSK_GLOB_EE_FMASK GENMASK(2, 2)
+#define MSK_IEOB_FMASK GENMASK(3, 3)
+#define MSK_INTER_EE_CH_CTRL_FMASK GENMASK(4, 4)
+#define MSK_INTER_EE_EV_CTRL_FMASK GENMASK(5, 5)
+#define MSK_GENERAL_FMASK GENMASK(6, 6)
+#define GSI_CNTXT_TYPE_IRQ_MSK_ALL GENMASK(6, 0)
+
+#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
+ GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(ee) \
+ (0x0001f090 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET \
+ GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_OFFSET(ee) \
+ (0x0001f094 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET \
+ GSI_EE_N_CNTXT_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_CH_IRQ_MSK_OFFSET(ee) \
+ (0x0001f098 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET \
+ GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
+ (0x0001f09c + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET \
+ GSI_EE_N_CNTXT_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_CH_IRQ_CLR_OFFSET(ee) \
+ (0x0001f0a0 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET \
+ GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \
+ (0x0001f0a4 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_IEOB_IRQ_OFFSET \
+ GSI_EE_N_CNTXT_SRC_IEOB_IRQ_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_OFFSET(ee) \
+ (0x0001f0b0 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET \
+ GSI_EE_N_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET(ee) \
+ (0x0001f0b8 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET \
+ GSI_EE_N_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET(ee) \
+ (0x0001f0c0 + 0x4000 * (ee))
+
+#define GSI_CNTXT_GLOB_IRQ_STTS_OFFSET \
+ GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(ee) \
+ (0x0001f100 + 0x4000 * (ee))
+#define ERROR_INT_FMASK GENMASK(0, 0)
+#define GP_INT1_FMASK GENMASK(1, 1)
+#define GP_INT2_FMASK GENMASK(2, 2)
+#define GP_INT3_FMASK GENMASK(3, 3)
+
+#define GSI_CNTXT_GLOB_IRQ_EN_OFFSET \
+ GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(ee) \
+ (0x0001f108 + 0x4000 * (ee))
+#define EN_ERROR_INT_FMASK GENMASK(0, 0)
+#define EN_GP_INT1_FMASK GENMASK(1, 1)
+#define EN_GP_INT2_FMASK GENMASK(2, 2)
+#define EN_GP_INT3_FMASK GENMASK(3, 3)
+#define GSI_CNTXT_GLOB_IRQ_ALL GENMASK(3, 0)
+
+#define GSI_CNTXT_GLOB_IRQ_CLR_OFFSET \
+ GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(ee) \
+ (0x0001f110 + 0x4000 * (ee))
+#define CLR_ERROR_INT_FMASK GENMASK(0, 0)
+#define CLR_GP_INT1_FMASK GENMASK(1, 1)
+#define CLR_GP_INT2_FMASK GENMASK(2, 2)
+#define CLR_GP_INT3_FMASK GENMASK(3, 3)
+
+#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
+ GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(ee) \
+ (0x0001f118 + 0x4000 * (ee))
+#define BREAK_POINT_FMASK GENMASK(0, 0)
+#define BUS_ERROR_FMASK GENMASK(1, 1)
+#define CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
+#define MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
+
+#define GSI_CNTXT_GSI_IRQ_EN_OFFSET \
+ GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(ee) \
+ (0x0001f120 + 0x4000 * (ee))
+#define EN_BREAK_POINT_FMASK GENMASK(0, 0)
+#define EN_BUS_ERROR_FMASK GENMASK(1, 1)
+#define EN_CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
+#define EN_MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
+#define GSI_CNTXT_GSI_IRQ_ALL GENMASK(3, 0)
+
+#define GSI_CNTXT_GSI_IRQ_CLR_OFFSET \
+ GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(ee) \
+ (0x0001f128 + 0x4000 * (ee))
+#define CLR_BREAK_POINT_FMASK GENMASK(0, 0)
+#define CLR_BUS_ERROR_FMASK GENMASK(1, 1)
+#define CLR_CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
+#define CLR_MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
+
+#define GSI_CNTXT_INTSET_OFFSET \
+ GSI_EE_N_CNTXT_INTSET_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_INTSET_OFFSET(ee) \
+ (0x0001f180 + 0x4000 * (ee))
+#define INTYPE_FMASK GENMASK(0, 0)
+
+#define GSI_ERROR_LOG_OFFSET \
+ GSI_EE_N_ERROR_LOG_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_ERROR_LOG_OFFSET(ee) \
+ (0x0001f200 + 0x4000 * (ee))
+#define ERR_ARG3_FMASK GENMASK(3, 0)
+#define ERR_ARG2_FMASK GENMASK(7, 4)
+#define ERR_ARG1_FMASK GENMASK(11, 8)
+#define ERR_CODE_FMASK GENMASK(15, 12)
+#define ERR_VIRT_IDX_FMASK GENMASK(23, 19)
+#define ERR_TYPE_FMASK GENMASK(27, 24)
+#define ERR_EE_FMASK GENMASK(31, 28)
+
+#define GSI_ERROR_LOG_CLR_OFFSET \
+ GSI_EE_N_ERROR_LOG_CLR_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_ERROR_LOG_CLR_OFFSET(ee) \
+ (0x0001f210 + 0x4000 * (ee))
+
+#define GSI_CNTXT_SCRATCH_0_OFFSET \
+ GSI_EE_N_CNTXT_SCRATCH_0_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_SCRATCH_0_OFFSET(ee) \
+ (0x0001f400 + 0x4000 * (ee))
+#define INTER_EE_RESULT_FMASK GENMASK(2, 0)
+#define GENERIC_EE_RESULT_FMASK GENMASK(7, 5)
+#define GENERIC_EE_SUCCESS_FVAL 1
+#define GENERIC_EE_NO_RESOURCES_FVAL 7
+#define USB_MAX_PACKET_FMASK GENMASK(15, 15) /* 0: HS; 1: SS */
+#define MHI_BASE_CHANNEL_FMASK GENMASK(31, 24)
+
+#endif /* _GSI_REG_H_ */
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
new file mode 100644
index 000000000000..2fd21d75367d
--- /dev/null
+++ b/drivers/net/ipa/gsi_trans.c
@@ -0,0 +1,786 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/refcount.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-direction.h>
+
+#include "gsi.h"
+#include "gsi_private.h"
+#include "gsi_trans.h"
+#include "ipa_gsi.h"
+#include "ipa_data.h"
+#include "ipa_cmd.h"
+
+/**
+ * DOC: GSI Transactions
+ *
+ * A GSI transaction abstracts the behavior of a GSI channel by representing
+ * everything about a related group of IPA commands in a single structure.
+ * (A "command" in this sense is either a data transfer or an IPA immediate
+ * command.) Most details of interaction with the GSI hardware are managed
+ * by the GSI transaction core, allowing users to simply describe commands
+ * to be performed. When a transaction has completed a callback function
+ * (dependent on the type of endpoint associated with the channel) allows
+ * cleanup of resources associated with the transaction.
+ *
+ * To perform a command (or set of them), a user of the GSI transaction
+ * interface allocates a transaction, indicating the number of TREs required
+ * (one per command). If sufficient TREs are available, they are reserved
+ * for use in the transaction and the allocation succeeds. This way
+ * exhaustion of the available TREs in a channel ring is detected
+ * as early as possible. All resources required to complete a transaction
+ * are allocated at transaction allocation time.
+ *
+ * Commands performed as part of a transaction are represented in an array
+ * of Linux scatterlist structures. This array is allocated with the
+ * transaction, and its entries are initialized using standard scatterlist
+ * functions (such as sg_set_buf() or skb_to_sgvec()).
+ *
+ * Once a transaction's scatterlist structures have been initialized, the
+ * transaction is committed. The caller is responsible for mapping buffers
+ * for DMA if necessary, and this should be done *before* allocating
+ * the transaction. Between a successful allocation and commit of a
+ * transaction no errors should occur.
+ *
+ * Committing transfers ownership of the entire transaction to the GSI
+ * transaction core. The GSI transaction code formats the content of
+ * the scatterlist array into the channel ring buffer and informs the
+ * hardware that new TREs are available to process.
+ *
+ * The last TRE in each transaction is marked to interrupt the AP when the
+ * GSI hardware has completed it. Because transfers described by TREs are
+ * performed strictly in order, signaling the completion of just the last
+ * TRE in the transaction is sufficient to indicate the full transaction
+ * is complete.
+ *
+ * When a transaction is complete, ipa_gsi_trans_complete() is called by the
+ * GSI code into the IPA layer, allowing it to perform any final cleanup
+ * required before the transaction is freed.
+ */
+
+/* Hardware values representing a transfer element type */
+enum gsi_tre_type {
+ GSI_RE_XFER = 0x2,
+ GSI_RE_IMMD_CMD = 0x3,
+};
+
+/* An entry in a channel ring */
+struct gsi_tre {
+ __le64 addr; /* DMA address */
+ __le16 len_opcode; /* length in bytes or enum IPA_CMD_* */
+ __le16 reserved;
+ __le32 flags; /* TRE_FLAGS_* */
+};
+
+/* gsi_tre->flags mask values (in CPU byte order) */
+#define TRE_FLAGS_CHAIN_FMASK GENMASK(0, 0)
+#define TRE_FLAGS_IEOB_FMASK GENMASK(8, 8)
+#define TRE_FLAGS_IEOT_FMASK GENMASK(9, 9)
+#define TRE_FLAGS_BEI_FMASK GENMASK(10, 10)
+#define TRE_FLAGS_TYPE_FMASK GENMASK(23, 16)
+
+int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
+ u32 max_alloc)
+{
+ void *virt;
+
+#ifdef IPA_VALIDATE
+ if (!size || size % 8)
+ return -EINVAL;
+ if (count < max_alloc)
+ return -EINVAL;
+ if (!max_alloc)
+ return -EINVAL;
+#endif /* IPA_VALIDATE */
+
+ /* By allocating a few extra entries in our pool (one less
+ * than the maximum number that will be requested in a
+ * single allocation), we can always satisfy requests without
+ * ever worrying about straddling the end of the pool array.
+ * If there aren't enough entries starting at the free index,
+ * we just allocate free entries from the beginning of the pool.
+ */
+ virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ pool->base = virt;
+ /* If the allocator gave us any extra memory, use it */
+ pool->count = ksize(pool->base) / size;
+ pool->free = 0;
+ pool->max_alloc = max_alloc;
+ pool->size = size;
+ pool->addr = 0; /* Only used for DMA pools */
+
+ return 0;
+}
+
+void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
+{
+ kfree(pool->base);
+ memset(pool, 0, sizeof(*pool));
+}
+
+/* Allocate the requested number of (zeroed) entries from the pool */
+/* Home-grown DMA pool. This way we can preallocate and use the tre_count
+ * to guarantee allocations will succeed. Even though we specify max_alloc
+ * (and it can be more than one), we only allow allocation of a single
+ * element from a DMA pool.
+ */
+int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
+ size_t size, u32 count, u32 max_alloc)
+{
+ size_t total_size;
+ dma_addr_t addr;
+ void *virt;
+
+#ifdef IPA_VALIDATE
+ if (!size || size % 8)
+ return -EINVAL;
+ if (count < max_alloc)
+ return -EINVAL;
+ if (!max_alloc)
+ return -EINVAL;
+#endif /* IPA_VALIDATE */
+
+ /* Don't let allocations cross a power-of-two boundary */
+ size = __roundup_pow_of_two(size);
+ total_size = (count + max_alloc - 1) * size;
+
+ /* The allocator will give us a power-of-2 number of pages. But we
+ * can't guarantee that, so request it. That way we won't waste any
+ * memory that would be available beyond the required space.
+ */
+ total_size = get_order(total_size) << PAGE_SHIFT;
+
+ virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ pool->base = virt;
+ pool->count = total_size / size;
+ pool->free = 0;
+ pool->size = size;
+ pool->max_alloc = max_alloc;
+ pool->addr = addr;
+
+ return 0;
+}
+
+void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
+{
+ dma_free_coherent(dev, pool->size, pool->base, pool->addr);
+ memset(pool, 0, sizeof(*pool));
+}
+
+/* Return the byte offset of the next free entry in the pool */
+static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
+{
+ u32 offset;
+
+ /* assert(count > 0); */
+ /* assert(count <= pool->max_alloc); */
+
+ /* Allocate from beginning if wrap would occur */
+ if (count > pool->count - pool->free)
+ pool->free = 0;
+
+ offset = pool->free * pool->size;
+ pool->free += count;
+ memset(pool->base + offset, 0, count * pool->size);
+
+ return offset;
+}
+
+/* Allocate a contiguous block of zeroed entries from a pool */
+void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count)
+{
+ return pool->base + gsi_trans_pool_alloc_common(pool, count);
+}
+
+/* Allocate a single zeroed entry from a DMA pool */
+void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
+{
+ u32 offset = gsi_trans_pool_alloc_common(pool, 1);
+
+ *addr = pool->addr + offset;
+
+ return pool->base + offset;
+}
+
+/* Return the pool element that immediately follows the one given.
+ * This only works done if elements are allocated one at a time.
+ */
+void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
+{
+ void *end = pool->base + pool->count * pool->size;
+
+ /* assert(element >= pool->base); */
+ /* assert(element < end); */
+ /* assert(pool->max_alloc == 1); */
+ element += pool->size;
+
+ return element < end ? element : pool->base;
+}
+
+/* Map a given ring entry index to the transaction associated with it */
+static void gsi_channel_trans_map(struct gsi_channel *channel, u32 index,
+ struct gsi_trans *trans)
+{
+ /* Note: index *must* be used modulo the ring count here */
+ channel->trans_info.map[index % channel->tre_ring.count] = trans;
+}
+
+/* Return the transaction mapped to a given ring entry */
+struct gsi_trans *
+gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
+{
+ /* Note: index *must* be used modulo the ring count here */
+ return channel->trans_info.map[index % channel->tre_ring.count];
+}
+
+/* Return the oldest completed transaction for a channel (or null) */
+struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
+{
+ return list_first_entry_or_null(&channel->trans_info.complete,
+ struct gsi_trans, links);
+}
+
+/* Move a transaction from the allocated list to the pending list */
+static void gsi_trans_move_pending(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ list_move_tail(&trans->links, &trans_info->pending);
+
+ spin_unlock_bh(&trans_info->spinlock);
+}
+
+/* Move a transaction and all of its predecessors from the pending list
+ * to the completed list.
+ */
+void gsi_trans_move_complete(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct list_head list;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ /* Move this transaction and all predecessors to completed list */
+ list_cut_position(&list, &trans_info->pending, &trans->links);
+ list_splice_tail(&list, &trans_info->complete);
+
+ spin_unlock_bh(&trans_info->spinlock);
+}
+
+/* Move a transaction from the completed list to the polled list */
+void gsi_trans_move_polled(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ list_move_tail(&trans->links, &trans_info->polled);
+
+ spin_unlock_bh(&trans_info->spinlock);
+}
+
+/* Reserve some number of TREs on a channel. Returns true if successful */
+static bool
+gsi_trans_tre_reserve(struct gsi_trans_info *trans_info, u32 tre_count)
+{
+ int avail = atomic_read(&trans_info->tre_avail);
+ int new;
+
+ do {
+ new = avail - (int)tre_count;
+ if (unlikely(new < 0))
+ return false;
+ } while (!atomic_try_cmpxchg(&trans_info->tre_avail, &avail, new));
+
+ return true;
+}
+
+/* Release previously-reserved TRE entries to a channel */
+static void
+gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
+{
+ atomic_add(tre_count, &trans_info->tre_avail);
+}
+
+/* Allocate a GSI transaction on a channel */
+struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
+ u32 tre_count,
+ enum dma_data_direction direction)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct gsi_trans_info *trans_info;
+ struct gsi_trans *trans;
+
+ /* assert(tre_count <= gsi_channel_trans_tre_max(gsi, channel_id)); */
+
+ trans_info = &channel->trans_info;
+
+ /* We reserve the TREs now, but consume them at commit time.
+ * If there aren't enough available, we're done.
+ */
+ if (!gsi_trans_tre_reserve(trans_info, tre_count))
+ return NULL;
+
+ /* Allocate and initialize non-zero fields in the the transaction */
+ trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
+ trans->gsi = gsi;
+ trans->channel_id = channel_id;
+ trans->tre_count = tre_count;
+ init_completion(&trans->completion);
+
+ /* Allocate the scatterlist and (if requested) info entries. */
+ trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
+ sg_init_marker(trans->sgl, tre_count);
+
+ trans->direction = direction;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ list_add_tail(&trans->links, &trans_info->alloc);
+
+ spin_unlock_bh(&trans_info->spinlock);
+
+ refcount_set(&trans->refcount, 1);
+
+ return trans;
+}
+
+/* Free a previously-allocated transaction (used only in case of error) */
+void gsi_trans_free(struct gsi_trans *trans)
+{
+ struct gsi_trans_info *trans_info;
+
+ if (!refcount_dec_and_test(&trans->refcount))
+ return;
+
+ trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
+
+ spin_lock_bh(&trans_info->spinlock);
+
+ list_del(&trans->links);
+
+ spin_unlock_bh(&trans_info->spinlock);
+
+ ipa_gsi_trans_release(trans);
+
+ /* Releasing the reserved TREs implicitly frees the sgl[] and
+ * (if present) info[] arrays, plus the transaction itself.
+ */
+ gsi_trans_tre_release(trans_info, trans->tre_count);
+}
+
+/* Add an immediate command to a transaction */
+void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
+ dma_addr_t addr, enum dma_data_direction direction,
+ enum ipa_cmd_opcode opcode)
+{
+ struct ipa_cmd_info *info;
+ u32 which = trans->used++;
+ struct scatterlist *sg;
+
+ /* assert(which < trans->tre_count); */
+
+ /* Set the page information for the buffer. We also need to fill in
+ * the DMA address for the buffer (something dma_map_sg() normally
+ * does).
+ */
+ sg = &trans->sgl[which];
+
+ sg_set_buf(sg, buf, size);
+ sg_dma_address(sg) = addr;
+
+ info = &trans->info[which];
+ info->opcode = opcode;
+ info->direction = direction;
+}
+
+/* Add a page transfer to a transaction. It will fill the only TRE. */
+int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
+ u32 offset)
+{
+ struct scatterlist *sg = &trans->sgl[0];
+ int ret;
+
+ /* assert(trans->tre_count == 1); */
+ /* assert(!trans->used); */
+
+ sg_set_page(sg, page, size, offset);
+ ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
+ if (!ret)
+ return -ENOMEM;
+
+ trans->used++; /* Transaction now owns the (DMA mapped) page */
+
+ return 0;
+}
+
+/* Add an SKB transfer to a transaction. No other TREs will be used. */
+int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
+{
+ struct scatterlist *sg = &trans->sgl[0];
+ u32 used;
+ int ret;
+
+ /* assert(trans->tre_count == 1); */
+ /* assert(!trans->used); */
+
+ /* skb->len will not be 0 (checked early) */
+ ret = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (ret < 0)
+ return ret;
+ used = ret;
+
+ ret = dma_map_sg(trans->gsi->dev, sg, used, trans->direction);
+ if (!ret)
+ return -ENOMEM;
+
+ trans->used += used; /* Transaction now owns the (DMA mapped) skb */
+
+ return 0;
+}
+
+/* Compute the length/opcode value to use for a TRE */
+static __le16 gsi_tre_len_opcode(enum ipa_cmd_opcode opcode, u32 len)
+{
+ return opcode == IPA_CMD_NONE ? cpu_to_le16((u16)len)
+ : cpu_to_le16((u16)opcode);
+}
+
+/* Compute the flags value to use for a given TRE */
+static __le32 gsi_tre_flags(bool last_tre, bool bei, enum ipa_cmd_opcode opcode)
+{
+ enum gsi_tre_type tre_type;
+ u32 tre_flags;
+
+ tre_type = opcode == IPA_CMD_NONE ? GSI_RE_XFER : GSI_RE_IMMD_CMD;
+ tre_flags = u32_encode_bits(tre_type, TRE_FLAGS_TYPE_FMASK);
+
+ /* Last TRE contains interrupt flags */
+ if (last_tre) {
+ /* All transactions end in a transfer completion interrupt */
+ tre_flags |= TRE_FLAGS_IEOT_FMASK;
+ /* Don't interrupt when outbound commands are acknowledged */
+ if (bei)
+ tre_flags |= TRE_FLAGS_BEI_FMASK;
+ } else { /* All others indicate there's more to come */
+ tre_flags |= TRE_FLAGS_CHAIN_FMASK;
+ }
+
+ return cpu_to_le32(tre_flags);
+}
+
+static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
+ u32 len, bool last_tre, bool bei,
+ enum ipa_cmd_opcode opcode)
+{
+ struct gsi_tre tre;
+
+ tre.addr = cpu_to_le64(addr);
+ tre.len_opcode = gsi_tre_len_opcode(opcode, len);
+ tre.reserved = 0;
+ tre.flags = gsi_tre_flags(last_tre, bei, opcode);
+
+ /* ARM64 can write 16 bytes as a unit with a single instruction.
+ * Doing the assignment this way is an attempt to make that happen.
+ */
+ *dest_tre = tre;
+}
+
+/**
+ * __gsi_trans_commit() - Common GSI transaction commit code
+ * @trans: Transaction to commit
+ * @ring_db: Whether to tell the hardware about these queued transfers
+ *
+ * Formats channel ring TRE entries based on the content of the scatterlist.
+ * Maps a transaction pointer to the last ring entry used for the transaction,
+ * so it can be recovered when it completes. Moves the transaction to the
+ * pending list. Finally, updates the channel ring pointer and optionally
+ * rings the doorbell.
+ */
+static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_ring *ring = &channel->tre_ring;
+ enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
+ bool bei = channel->toward_ipa;
+ struct ipa_cmd_info *info;
+ struct gsi_tre *dest_tre;
+ struct scatterlist *sg;
+ u32 byte_count = 0;
+ u32 avail;
+ u32 i;
+
+ /* assert(trans->used > 0); */
+
+ /* Consume the entries. If we cross the end of the ring while
+ * filling them we'll switch to the beginning to finish.
+ * If there is no info array we're doing a simple data
+ * transfer request, whose opcode is IPA_CMD_NONE.
+ */
+ info = trans->info ? &trans->info[0] : NULL;
+ avail = ring->count - ring->index % ring->count;
+ dest_tre = gsi_ring_virt(ring, ring->index);
+ for_each_sg(trans->sgl, sg, trans->used, i) {
+ bool last_tre = i == trans->used - 1;
+ dma_addr_t addr = sg_dma_address(sg);
+ u32 len = sg_dma_len(sg);
+
+ byte_count += len;
+ if (!avail--)
+ dest_tre = gsi_ring_virt(ring, 0);
+ if (info)
+ opcode = info++->opcode;
+
+ gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
+ dest_tre++;
+ }
+ ring->index += trans->used;
+
+ if (channel->toward_ipa) {
+ /* We record TX bytes when they are sent */
+ trans->len = byte_count;
+ trans->trans_count = channel->trans_count;
+ trans->byte_count = channel->byte_count;
+ channel->trans_count++;
+ channel->byte_count += byte_count;
+ }
+
+ /* Associate the last TRE with the transaction */
+ gsi_channel_trans_map(channel, ring->index - 1, trans);
+
+ gsi_trans_move_pending(trans);
+
+ /* Ring doorbell if requested, or if all TREs are allocated */
+ if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
+ /* Report what we're handing off to hardware for TX channels */
+ if (channel->toward_ipa)
+ gsi_channel_tx_queued(channel);
+ gsi_channel_doorbell(channel);
+ }
+}
+
+/* Commit a GSI transaction */
+void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
+{
+ if (trans->used)
+ __gsi_trans_commit(trans, ring_db);
+ else
+ gsi_trans_free(trans);
+}
+
+/* Commit a GSI transaction and wait for it to complete */
+void gsi_trans_commit_wait(struct gsi_trans *trans)
+{
+ if (!trans->used)
+ goto out_trans_free;
+
+ refcount_inc(&trans->refcount);
+
+ __gsi_trans_commit(trans, true);
+
+ wait_for_completion(&trans->completion);
+
+out_trans_free:
+ gsi_trans_free(trans);
+}
+
+/* Commit a GSI transaction and wait for it to complete, with timeout */
+int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
+ unsigned long timeout)
+{
+ unsigned long timeout_jiffies = msecs_to_jiffies(timeout);
+ unsigned long remaining = 1; /* In case of empty transaction */
+
+ if (!trans->used)
+ goto out_trans_free;
+
+ refcount_inc(&trans->refcount);
+
+ __gsi_trans_commit(trans, true);
+
+ remaining = wait_for_completion_timeout(&trans->completion,
+ timeout_jiffies);
+out_trans_free:
+ gsi_trans_free(trans);
+
+ return remaining ? 0 : -ETIMEDOUT;
+}
+
+/* Process the completion of a transaction; called while polling */
+void gsi_trans_complete(struct gsi_trans *trans)
+{
+ /* If the entire SGL was mapped when added, unmap it now */
+ if (trans->direction != DMA_NONE)
+ dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used,
+ trans->direction);
+
+ ipa_gsi_trans_complete(trans);
+
+ complete(&trans->completion);
+
+ gsi_trans_free(trans);
+}
+
+/* Cancel a channel's pending transactions */
+void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct gsi_trans *trans;
+ bool cancelled;
+
+ /* channel->gsi->mutex is held by caller */
+ spin_lock_bh(&trans_info->spinlock);
+
+ cancelled = !list_empty(&trans_info->pending);
+ list_for_each_entry(trans, &trans_info->pending, links)
+ trans->cancelled = true;
+
+ list_splice_tail_init(&trans_info->pending, &trans_info->complete);
+
+ spin_unlock_bh(&trans_info->spinlock);
+
+ /* Schedule NAPI polling to complete the cancelled transactions */
+ if (cancelled)
+ napi_schedule(&channel->napi);
+}
+
+/* Issue a command to read a single byte from a channel */
+int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct gsi_ring *ring = &channel->tre_ring;
+ struct gsi_trans_info *trans_info;
+ struct gsi_tre *dest_tre;
+
+ trans_info = &channel->trans_info;
+
+ /* First reserve the TRE, if possible */
+ if (!gsi_trans_tre_reserve(trans_info, 1))
+ return -EBUSY;
+
+ /* Now fill the the reserved TRE and tell the hardware */
+
+ dest_tre = gsi_ring_virt(ring, ring->index);
+ gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
+
+ ring->index++;
+ gsi_channel_doorbell(channel);
+
+ return 0;
+}
+
+/* Mark a gsi_trans_read_byte() request done */
+void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+
+ gsi_trans_tre_release(&channel->trans_info, 1);
+}
+
+/* Initialize a channel's GSI transaction info */
+int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct gsi_trans_info *trans_info;
+ u32 tre_max;
+ int ret;
+
+ /* Ensure the size of a channel element is what's expected */
+ BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
+
+ /* The map array is used to determine what transaction is associated
+ * with a TRE that the hardware reports has completed. We need one
+ * map entry per TRE.
+ */
+ trans_info = &channel->trans_info;
+ trans_info->map = kcalloc(channel->tre_count, sizeof(*trans_info->map),
+ GFP_KERNEL);
+ if (!trans_info->map)
+ return -ENOMEM;
+
+ /* We can't use more TREs than there are available in the ring.
+ * This limits the number of transactions that can be oustanding.
+ * Worst case is one TRE per transaction (but we actually limit
+ * it to something a little less than that). We allocate resources
+ * for transactions (including transaction structures) based on
+ * this maximum number.
+ */
+ tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
+
+ /* Transactions are allocated one at a time. */
+ ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
+ tre_max, 1);
+ if (ret)
+ goto err_kfree;
+
+ /* A transaction uses a scatterlist array to represent the data
+ * transfers implemented by the transaction. Each scatterlist
+ * element is used to fill a single TRE when the transaction is
+ * committed. So we need as many scatterlist elements as the
+ * maximum number of TREs that can be outstanding.
+ *
+ * All TREs in a transaction must fit within the channel's TLV FIFO.
+ * A transaction on a channel can allocate as many TREs as that but
+ * no more.
+ */
+ ret = gsi_trans_pool_init(&trans_info->sg_pool,
+ sizeof(struct scatterlist),
+ tre_max, channel->tlv_count);
+ if (ret)
+ goto err_trans_pool_exit;
+
+ /* Finally, the tre_avail field is what ultimately limits the number
+ * of outstanding transactions and their resources. A transaction
+ * allocation succeeds only if the TREs available are sufficient for
+ * what the transaction might need. Transaction resource pools are
+ * sized based on the maximum number of outstanding TREs, so there
+ * will always be resources available if there are TREs available.
+ */
+ atomic_set(&trans_info->tre_avail, tre_max);
+
+ spin_lock_init(&trans_info->spinlock);
+ INIT_LIST_HEAD(&trans_info->alloc);
+ INIT_LIST_HEAD(&trans_info->pending);
+ INIT_LIST_HEAD(&trans_info->complete);
+ INIT_LIST_HEAD(&trans_info->polled);
+
+ return 0;
+
+err_trans_pool_exit:
+ gsi_trans_pool_exit(&trans_info->pool);
+err_kfree:
+ kfree(trans_info->map);
+
+ dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
+ ret, channel_id);
+
+ return ret;
+}
+
+/* Inverse of gsi_channel_trans_init() */
+void gsi_channel_trans_exit(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+
+ gsi_trans_pool_exit(&trans_info->sg_pool);
+ gsi_trans_pool_exit(&trans_info->pool);
+ kfree(trans_info->map);
+}
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
new file mode 100644
index 000000000000..1477fc15b30a
--- /dev/null
+++ b/drivers/net/ipa/gsi_trans.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _GSI_TRANS_H_
+#define _GSI_TRANS_H_
+
+#include <linux/types.h>
+#include <linux/refcount.h>
+#include <linux/completion.h>
+#include <linux/dma-direction.h>
+
+#include "ipa_cmd.h"
+
+struct scatterlist;
+struct device;
+struct sk_buff;
+
+struct gsi;
+struct gsi_trans;
+struct gsi_trans_pool;
+
+/**
+ * struct gsi_trans - a GSI transaction
+ *
+ * Most fields in this structure for internal use by the transaction core code:
+ * @links: Links for channel transaction lists by state
+ * @gsi: GSI pointer
+ * @channel_id: Channel number transaction is associated with
+ * @cancelled: If set by the core code, transaction was cancelled
+ * @tre_count: Number of TREs reserved for this transaction
+ * @used: Number of TREs *used* (could be less than tre_count)
+ * @len: Total # of transfer bytes represented in sgl[] (set by core)
+ * @data: Preserved but not touched by the core transaction code
+ * @sgl: An array of scatter/gather entries managed by core code
+ * @info: Array of command information structures (command channel)
+ * @direction: DMA transfer direction (DMA_NONE for commands)
+ * @refcount: Reference count used for destruction
+ * @completion: Completed when the transaction completes
+ * @byte_count: TX channel byte count recorded when transaction committed
+ * @trans_count: Channel transaction count when committed (for BQL accounting)
+ *
+ * The size used for some fields in this structure were chosen to ensure
+ * the full structure size is no larger than 128 bytes.
+ */
+struct gsi_trans {
+ struct list_head links; /* gsi_channel lists */
+
+ struct gsi *gsi;
+ u8 channel_id;
+
+ bool cancelled; /* true if transaction was cancelled */
+
+ u8 tre_count; /* # TREs requested */
+ u8 used; /* # entries used in sgl[] */
+ u32 len; /* total # bytes across sgl[] */
+
+ void *data;
+ struct scatterlist *sgl;
+ struct ipa_cmd_info *info; /* array of entries, or null */
+ enum dma_data_direction direction;
+
+ refcount_t refcount;
+ struct completion completion;
+
+ u64 byte_count; /* channel byte_count when committed */
+ u64 trans_count; /* channel trans_count when committed */
+};
+
+/**
+ * gsi_trans_pool_init() - Initialize a pool of structures for transactions
+ * @gsi: GSI pointer
+ * @size: Size of elements in the pool
+ * @count: Minimum number of elements in the pool
+ * @max_alloc: Maximum number of elements allocated at a time from pool
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
+ u32 max_alloc);
+
+/**
+ * gsi_trans_pool_alloc() - Allocate one or more elements from a pool
+ * @pool: Pool pointer
+ * @count: Number of elements to allocate from the pool
+ *
+ * @Return: Virtual address of element(s) allocated from the pool
+ */
+void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count);
+
+/**
+ * gsi_trans_pool_exit() - Inverse of gsi_trans_pool_init()
+ * @pool: Pool pointer
+ */
+void gsi_trans_pool_exit(struct gsi_trans_pool *pool);
+
+/**
+ * gsi_trans_pool_init_dma() - Initialize a pool of DMA-able structures
+ * @dev: Device used for DMA
+ * @pool: Pool pointer
+ * @size: Size of elements in the pool
+ * @count: Minimum number of elements in the pool
+ * @max_alloc: Maximum number of elements allocated at a time from pool
+ *
+ * @Return: 0 if successful, or a negative error code
+ *
+ * Structures in this pool reside in DMA-coherent memory.
+ */
+int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
+ size_t size, u32 count, u32 max_alloc);
+
+/**
+ * gsi_trans_pool_alloc_dma() - Allocate an element from a DMA pool
+ * @pool: DMA pool pointer
+ * @addr: DMA address "handle" associated with the allocation
+ *
+ * @Return: Virtual address of element allocated from the pool
+ *
+ * Only one element at a time may be allocated from a DMA pool.
+ */
+void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr);
+
+/**
+ * gsi_trans_pool_exit() - Inverse of gsi_trans_pool_init()
+ * @pool: Pool pointer
+ */
+void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool);
+
+/**
+ * gsi_channel_trans_alloc() - Allocate a GSI transaction on a channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel the transaction is associated with
+ * @tre_count: Number of elements in the transaction
+ * @direction: DMA direction for entire SGL (or DMA_NONE)
+ *
+ * @Return: A GSI transaction structure, or a null pointer if all
+ * available transactions are in use
+ */
+struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
+ u32 tre_count,
+ enum dma_data_direction direction);
+
+/**
+ * gsi_trans_free() - Free a previously-allocated GSI transaction
+ * @trans: Transaction to be freed
+ */
+void gsi_trans_free(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_cmd_add() - Add an immediate command to a transaction
+ * @trans: Transaction
+ * @buf: Buffer pointer for command payload
+ * @size: Number of bytes in buffer
+ * @addr: DMA address for payload
+ * @direction: Direction of DMA transfer (or DMA_NONE if none required)
+ * @opcode: IPA immediate command opcode
+ */
+void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
+ dma_addr_t addr, enum dma_data_direction direction,
+ enum ipa_cmd_opcode opcode);
+
+/**
+ * gsi_trans_page_add() - Add a page transfer to a transaction
+ * @trans: Transaction
+ * @page: Page pointer
+ * @size: Number of bytes (starting at offset) to transfer
+ * @offset: Offset within page for start of transfer
+ */
+int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
+ u32 offset);
+
+/**
+ * gsi_trans_skb_add() - Add a socket transfer to a transaction
+ * @trans: Transaction
+ * @skb: Socket buffer for transfer (outbound)
+ *
+ * @Return: 0, or -EMSGSIZE if socket data won't fit in transaction.
+ */
+int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb);
+
+/**
+ * gsi_trans_commit() - Commit a GSI transaction
+ * @trans: Transaction to commit
+ * @ring_db: Whether to tell the hardware about these queued transfers
+ */
+void gsi_trans_commit(struct gsi_trans *trans, bool ring_db);
+
+/**
+ * gsi_trans_commit_wait() - Commit a GSI transaction and wait for it
+ * to complete
+ * @trans: Transaction to commit
+ */
+void gsi_trans_commit_wait(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_commit_wait_timeout() - Commit a GSI transaction and wait for
+ * it to complete, with timeout
+ * @trans: Transaction to commit
+ * @timeout: Timeout period (in milliseconds)
+ */
+int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
+ unsigned long timeout);
+
+/**
+ * gsi_trans_read_byte() - Issue a single byte read TRE on a channel
+ * @gsi: GSI pointer
+ * @channel_id: Channel on which to read a byte
+ * @addr: DMA address into which to transfer the one byte
+ *
+ * This is not a transaction operation at all. It's defined here because
+ * it needs to be done in coordination with other transaction activity.
+ */
+int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr);
+
+/**
+ * gsi_trans_read_byte_done() - Clean up after a single byte read TRE
+ * @gsi: GSI pointer
+ * @channel_id: Channel on which byte was read
+ *
+ * This function needs to be called to signal that the work related
+ * to reading a byte initiated by gsi_trans_read_byte() is complete.
+ */
+void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id);
+
+#endif /* _GSI_TRANS_H_ */
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
new file mode 100644
index 000000000000..23fb29889e5a
--- /dev/null
+++ b/drivers/net/ipa/ipa.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_H_
+#define _IPA_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/pm_wakeup.h>
+
+#include "ipa_version.h"
+#include "gsi.h"
+#include "ipa_mem.h"
+#include "ipa_qmi.h"
+#include "ipa_endpoint.h"
+#include "ipa_interrupt.h"
+
+struct clk;
+struct icc_path;
+struct net_device;
+struct platform_device;
+
+struct ipa_clock;
+struct ipa_smp2p;
+struct ipa_interrupt;
+
+/**
+ * struct ipa - IPA information
+ * @gsi: Embedded GSI structure
+ * @version: IPA hardware version
+ * @pdev: Platform device
+ * @modem_rproc: Remoteproc handle for modem subsystem
+ * @smp2p: SMP2P information
+ * @clock: IPA clocking information
+ * @suspend_ref: Whether clock reference preventing suspend taken
+ * @table_addr: DMA address of filter/route table content
+ * @table_virt: Virtual address of filter/route table content
+ * @interrupt: IPA Interrupt information
+ * @uc_loaded: true after microcontroller has reported it's ready
+ * @reg_addr: DMA address used for IPA register access
+ * @reg_virt: Virtual address used for IPA register access
+ * @mem_addr: DMA address of IPA-local memory space
+ * @mem_virt: Virtual address of IPA-local memory space
+ * @mem_offset: Offset from @mem_virt used for access to IPA memory
+ * @mem_size: Total size (bytes) of memory at @mem_virt
+ * @mem: Array of IPA-local memory region descriptors
+ * @zero_addr: DMA address of preallocated zero-filled memory
+ * @zero_virt: Virtual address of preallocated zero-filled memory
+ * @zero_size: Size (bytes) of preallocated zero-filled memory
+ * @wakeup_source: Wakeup source information
+ * @available: Bit mask indicating endpoints hardware supports
+ * @filter_map: Bit mask indicating endpoints that support filtering
+ * @initialized: Bit mask indicating endpoints initialized
+ * @set_up: Bit mask indicating endpoints set up
+ * @enabled: Bit mask indicating endpoints enabled
+ * @endpoint: Array of endpoint information
+ * @channel_map: Mapping of GSI channel to IPA endpoint
+ * @name_map: Mapping of IPA endpoint name to IPA endpoint
+ * @setup_complete: Flag indicating whether setup stage has completed
+ * @modem_state: State of modem (stopped, running)
+ * @modem_netdev: Network device structure used for modem
+ * @qmi: QMI information
+ */
+struct ipa {
+ struct gsi gsi;
+ enum ipa_version version;
+ struct platform_device *pdev;
+ struct rproc *modem_rproc;
+ struct ipa_smp2p *smp2p;
+ struct ipa_clock *clock;
+ atomic_t suspend_ref;
+
+ dma_addr_t table_addr;
+ __le64 *table_virt;
+
+ struct ipa_interrupt *interrupt;
+ bool uc_loaded;
+
+ dma_addr_t reg_addr;
+ void __iomem *reg_virt;
+
+ dma_addr_t mem_addr;
+ void *mem_virt;
+ u32 mem_offset;
+ u32 mem_size;
+ const struct ipa_mem *mem;
+
+ dma_addr_t zero_addr;
+ void *zero_virt;
+ size_t zero_size;
+
+ struct wakeup_source *wakeup_source;
+
+ /* Bit masks indicating endpoint state */
+ u32 available; /* supported by hardware */
+ u32 filter_map;
+ u32 initialized;
+ u32 set_up;
+ u32 enabled;
+
+ struct ipa_endpoint endpoint[IPA_ENDPOINT_MAX];
+ struct ipa_endpoint *channel_map[GSI_CHANNEL_COUNT_MAX];
+ struct ipa_endpoint *name_map[IPA_ENDPOINT_COUNT];
+
+ bool setup_complete;
+
+ atomic_t modem_state; /* enum ipa_modem_state */
+ struct net_device *modem_netdev;
+ struct ipa_qmi qmi;
+};
+
+/**
+ * ipa_setup() - Perform IPA setup
+ * @ipa: IPA pointer
+ *
+ * IPA initialization is broken into stages: init; config; and setup.
+ * (These have inverses exit, deconfig, and teardown.)
+ *
+ * Activities performed at the init stage can be done without requiring
+ * any access to IPA hardware. Activities performed at the config stage
+ * require the IPA clock to be running, because they involve access
+ * to IPA registers. The setup stage is performed only after the GSI
+ * hardware is ready (more on this below). The setup stage allows
+ * the AP to perform more complex initialization by issuing "immediate
+ * commands" using a special interface to the IPA.
+ *
+ * This function, @ipa_setup(), starts the setup stage.
+ *
+ * In order for the GSI hardware to be functional it needs firmware to be
+ * loaded (in addition to some other low-level initialization). This early
+ * GSI initialization can be done either by Trust Zone on the AP or by the
+ * modem.
+ *
+ * If it's done by Trust Zone, the AP loads the GSI firmware and supplies
+ * it to Trust Zone to verify and install. When this completes, if
+ * verification was successful, the GSI layer is ready and ipa_setup()
+ * implements the setup phase of initialization.
+ *
+ * If the modem performs early GSI initialization, the AP needs to know
+ * when this has occurred. An SMP2P interrupt is used for this purpose,
+ * and receipt of that interrupt triggers the call to ipa_setup().
+ */
+int ipa_setup(struct ipa *ipa);
+
+#endif /* _IPA_H_ */
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
new file mode 100644
index 000000000000..374491ea11cf
--- /dev/null
+++ b/drivers/net/ipa/ipa_clock.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect.h>
+
+#include "ipa.h"
+#include "ipa_clock.h"
+#include "ipa_modem.h"
+
+/**
+ * DOC: IPA Clocking
+ *
+ * The "IPA Clock" manages both the IPA core clock and the interconnects
+ * (buses) the IPA depends on as a single logical entity. A reference count
+ * is incremented by "get" operations and decremented by "put" operations.
+ * Transitions of that count from 0 to 1 result in the clock and interconnects
+ * being enabled, and transitions of the count from 1 to 0 cause them to be
+ * disabled. We currently operate the core clock at a fixed clock rate, and
+ * all buses at a fixed average and peak bandwidth. As more advanced IPA
+ * features are enabled, we can make better use of clock and bus scaling.
+ *
+ * An IPA clock reference must be held for any access to IPA hardware.
+ */
+
+#define IPA_CORE_CLOCK_RATE (75UL * 1000 * 1000) /* Hz */
+
+/* Interconnect path bandwidths (each times 1000 bytes per second) */
+#define IPA_MEMORY_AVG (80 * 1000) /* 80 MBps */
+#define IPA_MEMORY_PEAK (600 * 1000)
+
+#define IPA_IMEM_AVG (80 * 1000)
+#define IPA_IMEM_PEAK (350 * 1000)
+
+#define IPA_CONFIG_AVG (40 * 1000)
+#define IPA_CONFIG_PEAK (40 * 1000)
+
+/**
+ * struct ipa_clock - IPA clocking information
+ * @count: Clocking reference count
+ * @mutex; Protects clock enable/disable
+ * @core: IPA core clock
+ * @memory_path: Memory interconnect
+ * @imem_path: Internal memory interconnect
+ * @config_path: Configuration space interconnect
+ */
+struct ipa_clock {
+ atomic_t count;
+ struct mutex mutex; /* protects clock enable/disable */
+ struct clk *core;
+ struct icc_path *memory_path;
+ struct icc_path *imem_path;
+ struct icc_path *config_path;
+};
+
+static struct icc_path *
+ipa_interconnect_init_one(struct device *dev, const char *name)
+{
+ struct icc_path *path;
+
+ path = of_icc_get(dev, name);
+ if (IS_ERR(path))
+ dev_err(dev, "error %ld getting memory interconnect\n",
+ PTR_ERR(path));
+
+ return path;
+}
+
+/* Initialize interconnects required for IPA operation */
+static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev)
+{
+ struct icc_path *path;
+
+ path = ipa_interconnect_init_one(dev, "memory");
+ if (IS_ERR(path))
+ goto err_return;
+ clock->memory_path = path;
+
+ path = ipa_interconnect_init_one(dev, "imem");
+ if (IS_ERR(path))
+ goto err_memory_path_put;
+ clock->imem_path = path;
+
+ path = ipa_interconnect_init_one(dev, "config");
+ if (IS_ERR(path))
+ goto err_imem_path_put;
+ clock->config_path = path;
+
+ return 0;
+
+err_imem_path_put:
+ icc_put(clock->imem_path);
+err_memory_path_put:
+ icc_put(clock->memory_path);
+err_return:
+ return PTR_ERR(path);
+}
+
+/* Inverse of ipa_interconnect_init() */
+static void ipa_interconnect_exit(struct ipa_clock *clock)
+{
+ icc_put(clock->config_path);
+ icc_put(clock->imem_path);
+ icc_put(clock->memory_path);
+}
+
+/* Currently we only use one bandwidth level, so just "enable" interconnects */
+static int ipa_interconnect_enable(struct ipa *ipa)
+{
+ struct ipa_clock *clock = ipa->clock;
+ int ret;
+
+ ret = icc_set_bw(clock->memory_path, IPA_MEMORY_AVG, IPA_MEMORY_PEAK);
+ if (ret)
+ return ret;
+
+ ret = icc_set_bw(clock->imem_path, IPA_IMEM_AVG, IPA_IMEM_PEAK);
+ if (ret)
+ goto err_memory_path_disable;
+
+ ret = icc_set_bw(clock->config_path, IPA_CONFIG_AVG, IPA_CONFIG_PEAK);
+ if (ret)
+ goto err_imem_path_disable;
+
+ return 0;
+
+err_imem_path_disable:
+ (void)icc_set_bw(clock->imem_path, 0, 0);
+err_memory_path_disable:
+ (void)icc_set_bw(clock->memory_path, 0, 0);
+
+ return ret;
+}
+
+/* To disable an interconnect, we just its bandwidth to 0 */
+static int ipa_interconnect_disable(struct ipa *ipa)
+{
+ struct ipa_clock *clock = ipa->clock;
+ int ret;
+
+ ret = icc_set_bw(clock->memory_path, 0, 0);
+ if (ret)
+ return ret;
+
+ ret = icc_set_bw(clock->imem_path, 0, 0);
+ if (ret)
+ goto err_memory_path_reenable;
+
+ ret = icc_set_bw(clock->config_path, 0, 0);
+ if (ret)
+ goto err_imem_path_reenable;
+
+ return 0;
+
+err_imem_path_reenable:
+ (void)icc_set_bw(clock->imem_path, IPA_IMEM_AVG, IPA_IMEM_PEAK);
+err_memory_path_reenable:
+ (void)icc_set_bw(clock->memory_path, IPA_MEMORY_AVG, IPA_MEMORY_PEAK);
+
+ return ret;
+}
+
+/* Turn on IPA clocks, including interconnects */
+static int ipa_clock_enable(struct ipa *ipa)
+{
+ int ret;
+
+ ret = ipa_interconnect_enable(ipa);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(ipa->clock->core);
+ if (ret)
+ ipa_interconnect_disable(ipa);
+
+ return ret;
+}
+
+/* Inverse of ipa_clock_enable() */
+static void ipa_clock_disable(struct ipa *ipa)
+{
+ clk_disable_unprepare(ipa->clock->core);
+ (void)ipa_interconnect_disable(ipa);
+}
+
+/* Get an IPA clock reference, but only if the reference count is
+ * already non-zero. Returns true if the additional reference was
+ * added successfully, or false otherwise.
+ */
+bool ipa_clock_get_additional(struct ipa *ipa)
+{
+ return !!atomic_inc_not_zero(&ipa->clock->count);
+}
+
+/* Get an IPA clock reference. If the reference count is non-zero, it is
+ * incremented and return is immediate. Otherwise it is checked again
+ * under protection of the mutex, and if appropriate the clock (and
+ * interconnects) are enabled suspended endpoints (if any) are resumed
+ * before returning.
+ *
+ * Incrementing the reference count is intentionally deferred until
+ * after the clock is running and endpoints are resumed.
+ */
+void ipa_clock_get(struct ipa *ipa)
+{
+ struct ipa_clock *clock = ipa->clock;
+ int ret;
+
+ /* If the clock is running, just bump the reference count */
+ if (ipa_clock_get_additional(ipa))
+ return;
+
+ /* Otherwise get the mutex and check again */
+ mutex_lock(&clock->mutex);
+
+ /* A reference might have been added before we got the mutex. */
+ if (ipa_clock_get_additional(ipa))
+ goto out_mutex_unlock;
+
+ ret = ipa_clock_enable(ipa);
+ if (ret) {
+ dev_err(&ipa->pdev->dev, "error %d enabling IPA clock\n", ret);
+ goto out_mutex_unlock;
+ }
+
+ ipa_endpoint_resume(ipa);
+
+ atomic_inc(&clock->count);
+
+out_mutex_unlock:
+ mutex_unlock(&clock->mutex);
+}
+
+/* Attempt to remove an IPA clock reference. If this represents the last
+ * reference, suspend endpoints and disable the clock (and interconnects)
+ * under protection of a mutex.
+ */
+void ipa_clock_put(struct ipa *ipa)
+{
+ struct ipa_clock *clock = ipa->clock;
+
+ /* If this is not the last reference there's nothing more to do */
+ if (!atomic_dec_and_mutex_lock(&clock->count, &clock->mutex))
+ return;
+
+ ipa_endpoint_suspend(ipa);
+
+ ipa_clock_disable(ipa);
+
+ mutex_unlock(&clock->mutex);
+}
+
+/* Initialize IPA clocking */
+struct ipa_clock *ipa_clock_init(struct device *dev)
+{
+ struct ipa_clock *clock;
+ struct clk *clk;
+ int ret;
+
+ clk = clk_get(dev, "core");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "error %ld getting core clock\n", PTR_ERR(clk));
+ return ERR_CAST(clk);
+ }
+
+ ret = clk_set_rate(clk, IPA_CORE_CLOCK_RATE);
+ if (ret) {
+ dev_err(dev, "error %d setting core clock rate to %lu\n",
+ ret, IPA_CORE_CLOCK_RATE);
+ goto err_clk_put;
+ }
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ ret = -ENOMEM;
+ goto err_clk_put;
+ }
+ clock->core = clk;
+
+ ret = ipa_interconnect_init(clock, dev);
+ if (ret)
+ goto err_kfree;
+
+ mutex_init(&clock->mutex);
+ atomic_set(&clock->count, 0);
+
+ return clock;
+
+err_kfree:
+ kfree(clock);
+err_clk_put:
+ clk_put(clk);
+
+ return ERR_PTR(ret);
+}
+
+/* Inverse of ipa_clock_init() */
+void ipa_clock_exit(struct ipa_clock *clock)
+{
+ struct clk *clk = clock->core;
+
+ WARN_ON(atomic_read(&clock->count) != 0);
+ mutex_destroy(&clock->mutex);
+ ipa_interconnect_exit(clock);
+ kfree(clock);
+ clk_put(clk);
+}
diff --git a/drivers/net/ipa/ipa_clock.h b/drivers/net/ipa/ipa_clock.h
new file mode 100644
index 000000000000..bc52b35e6bb2
--- /dev/null
+++ b/drivers/net/ipa/ipa_clock.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_CLOCK_H_
+#define _IPA_CLOCK_H_
+
+struct device;
+
+struct ipa;
+
+/**
+ * ipa_clock_init() - Initialize IPA clocking
+ * @dev: IPA device
+ *
+ * @Return: A pointer to an ipa_clock structure, or a pointer-coded error
+ */
+struct ipa_clock *ipa_clock_init(struct device *dev);
+
+/**
+ * ipa_clock_exit() - Inverse of ipa_clock_init()
+ * @clock: IPA clock pointer
+ */
+void ipa_clock_exit(struct ipa_clock *clock);
+
+/**
+ * ipa_clock_get() - Get an IPA clock reference
+ * @ipa: IPA pointer
+ *
+ * This call blocks if this is the first reference.
+ */
+void ipa_clock_get(struct ipa *ipa);
+
+/**
+ * ipa_clock_get_additional() - Get an IPA clock reference if not first
+ * @ipa: IPA pointer
+ *
+ * This returns immediately, and only takes a reference if not the first
+ */
+bool ipa_clock_get_additional(struct ipa *ipa);
+
+/**
+ * ipa_clock_put() - Drop an IPA clock reference
+ * @ipa: IPA pointer
+ *
+ * This drops a clock reference. If the last reference is being dropped,
+ * the clock is stopped and RX endpoints are suspended. This call will
+ * not block unless the last reference is dropped.
+ */
+void ipa_clock_put(struct ipa *ipa);
+
+#endif /* _IPA_CLOCK_H_ */
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
new file mode 100644
index 000000000000..d226b858742d
--- /dev/null
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/bitfield.h>
+#include <linux/dma-direction.h>
+
+#include "gsi.h"
+#include "gsi_trans.h"
+#include "ipa.h"
+#include "ipa_endpoint.h"
+#include "ipa_table.h"
+#include "ipa_cmd.h"
+#include "ipa_mem.h"
+
+/**
+ * DOC: IPA Immediate Commands
+ *
+ * The AP command TX endpoint is used to issue immediate commands to the IPA.
+ * An immediate command is generally used to request the IPA do something
+ * other than data transfer to another endpoint.
+ *
+ * Immediate commands are represented by GSI transactions just like other
+ * transfer requests, represented by a single GSI TRE. Each immediate
+ * command has a well-defined format, having a payload of a known length.
+ * This allows the transfer element's length field to be used to hold an
+ * immediate command's opcode. The payload for a command resides in DRAM
+ * and is described by a single scatterlist entry in its transaction.
+ * Commands do not require a transaction completion callback. To commit
+ * an immediate command transaction, either gsi_trans_commit_wait() or
+ * gsi_trans_commit_wait_timeout() is used.
+ */
+
+/* Some commands can wait until indicated pipeline stages are clear */
+enum pipeline_clear_options {
+ pipeline_clear_hps = 0,
+ pipeline_clear_src_grp = 1,
+ pipeline_clear_full = 2,
+};
+
+/* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
+
+struct ipa_cmd_hw_ip_fltrt_init {
+ __le64 hash_rules_addr;
+ __le64 flags;
+ __le64 nhash_rules_addr;
+};
+
+/* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */
+#define IP_FLTRT_FLAGS_HASH_SIZE_FMASK GENMASK_ULL(11, 0)
+#define IP_FLTRT_FLAGS_HASH_ADDR_FMASK GENMASK_ULL(27, 12)
+#define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK GENMASK_ULL(39, 28)
+#define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK GENMASK_ULL(55, 40)
+
+/* IPA_CMD_HDR_INIT_LOCAL */
+
+struct ipa_cmd_hw_hdr_init_local {
+ __le64 hdr_table_addr;
+ __le32 flags;
+ __le32 reserved;
+};
+
+/* Field masks for ipa_cmd_hw_hdr_init_local structure fields */
+#define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK GENMASK(11, 0)
+#define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK GENMASK(27, 12)
+
+/* IPA_CMD_REGISTER_WRITE */
+
+/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
+
+#define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
+#define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
+
+struct ipa_cmd_register_write {
+ __le16 flags; /* Unused/reserved for IPA v3.5.1 */
+ __le16 offset;
+ __le32 value;
+ __le32 value_mask;
+ __le32 clear_options; /* Unused/reserved for IPA v4.0+ */
+};
+
+/* Field masks for ipa_cmd_register_write structure fields */
+/* The next field is present for IPA v4.0 and above */
+#define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11)
+/* The next field is present for IPA v3.5.1 only */
+#define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15)
+
+/* The next field and its values are present for IPA v3.5.1 only */
+#define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0)
+
+/* IPA_CMD_IP_PACKET_INIT */
+
+struct ipa_cmd_ip_packet_init {
+ u8 dest_endpoint;
+ u8 reserved[7];
+};
+
+/* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
+#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
+
+/* IPA_CMD_DMA_TASK_32B_ADDR */
+
+/* This opcode gets modified with a DMA operation count */
+
+#define DMA_TASK_32B_ADDR_OPCODE_COUNT_FMASK GENMASK(15, 8)
+
+struct ipa_cmd_hw_dma_task_32b_addr {
+ __le16 flags;
+ __le16 size;
+ __le32 addr;
+ __le16 packet_size;
+ u8 reserved[6];
+};
+
+/* Field masks for ipa_cmd_hw_dma_task_32b_addr flags field */
+#define DMA_TASK_32B_ADDR_FLAGS_SW_RSVD_FMASK GENMASK(10, 0)
+#define DMA_TASK_32B_ADDR_FLAGS_CMPLT_FMASK GENMASK(11, 11)
+#define DMA_TASK_32B_ADDR_FLAGS_EOF_FMASK GENMASK(12, 12)
+#define DMA_TASK_32B_ADDR_FLAGS_FLSH_FMASK GENMASK(13, 13)
+#define DMA_TASK_32B_ADDR_FLAGS_LOCK_FMASK GENMASK(14, 14)
+#define DMA_TASK_32B_ADDR_FLAGS_UNLOCK_FMASK GENMASK(15, 15)
+
+/* IPA_CMD_DMA_SHARED_MEM */
+
+/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
+
+#define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
+#define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
+
+struct ipa_cmd_hw_dma_mem_mem {
+ __le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */
+ __le16 size;
+ __le16 local_addr;
+ __le16 flags;
+ __le64 system_addr;
+};
+
+/* Flag allowing atomic clear of target region after reading data (v4.0+)*/
+#define DMA_SHARED_MEM_CLEAR_AFTER_READ GENMASK(15, 15)
+
+/* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
+#define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0)
+/* The next two fields are present for IPA v3.5.1 only. */
+#define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1)
+#define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2)
+
+/* IPA_CMD_IP_PACKET_TAG_STATUS */
+
+struct ipa_cmd_ip_packet_tag_status {
+ __le64 tag;
+};
+
+#define IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 16)
+
+/* Immediate command payload */
+union ipa_cmd_payload {
+ struct ipa_cmd_hw_ip_fltrt_init table_init;
+ struct ipa_cmd_hw_hdr_init_local hdr_init_local;
+ struct ipa_cmd_register_write register_write;
+ struct ipa_cmd_ip_packet_init ip_packet_init;
+ struct ipa_cmd_hw_dma_task_32b_addr dma_task_32b_addr;
+ struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
+ struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
+};
+
+static void ipa_cmd_validate_build(void)
+{
+ /* The sizes of a filter and route tables need to fit into fields
+ * in the ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables
+ * might not be used, non-hashed and hashed tables have the same
+ * maximum size. IPv4 and IPv6 filter tables have the same number
+ * of entries, as and IPv4 and IPv6 route tables have the same number
+ * of entries.
+ */
+#define TABLE_SIZE (TABLE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE)
+#define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
+ BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK));
+ BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
+#undef TABLE_COUNT_MAX
+#undef TABLE_SIZE
+}
+
+#ifdef IPA_VALIDATE
+
+/* Validate a memory region holding a table */
+bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
+ bool route, bool ipv6, bool hashed)
+{
+ struct device *dev = &ipa->pdev->dev;
+ u32 offset_max;
+
+ offset_max = hashed ? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK)
+ : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
+ if (mem->offset > offset_max ||
+ ipa->mem_offset > offset_max - mem->offset) {
+ dev_err(dev, "IPv%c %s%s table region offset too large "
+ "(0x%04x + 0x%04x > 0x%04x)\n",
+ ipv6 ? '6' : '4', hashed ? "hashed " : "",
+ route ? "route" : "filter",
+ ipa->mem_offset, mem->offset, offset_max);
+ return false;
+ }
+
+ if (mem->offset > ipa->mem_size ||
+ mem->size > ipa->mem_size - mem->offset) {
+ dev_err(dev, "IPv%c %s%s table region out of range "
+ "(0x%04x + 0x%04x > 0x%04x)\n",
+ ipv6 ? '6' : '4', hashed ? "hashed " : "",
+ route ? "route" : "filter",
+ mem->offset, mem->size, ipa->mem_size);
+ return false;
+ }
+
+ return true;
+}
+
+/* Validate the memory region that holds headers */
+static bool ipa_cmd_header_valid(struct ipa *ipa)
+{
+ const struct ipa_mem *mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
+ struct device *dev = &ipa->pdev->dev;
+ u32 offset_max;
+ u32 size_max;
+ u32 size;
+
+ offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
+ if (mem->offset > offset_max ||
+ ipa->mem_offset > offset_max - mem->offset) {
+ dev_err(dev, "header table region offset too large "
+ "(0x%04x + 0x%04x > 0x%04x)\n",
+ ipa->mem_offset + mem->offset, offset_max);
+ return false;
+ }
+
+ size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
+ size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
+ size += ipa->mem[IPA_MEM_AP_HEADER].size;
+ if (mem->offset > ipa->mem_size || size > ipa->mem_size - mem->offset) {
+ dev_err(dev, "header table region out of range "
+ "(0x%04x + 0x%04x > 0x%04x)\n",
+ mem->offset, size, ipa->mem_size);
+ return false;
+ }
+
+ return true;
+}
+
+/* Indicate whether an offset can be used with a register_write command */
+static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
+ const char *name, u32 offset)
+{
+ struct ipa_cmd_register_write *payload;
+ struct device *dev = &ipa->pdev->dev;
+ u32 offset_max;
+ u32 bit_count;
+
+ /* The maximum offset in a register_write immediate command depends
+ * on the version of IPA. IPA v3.5.1 supports a 16 bit offset, but
+ * newer versions allow some additional high-order bits.
+ */
+ bit_count = BITS_PER_BYTE * sizeof(payload->offset);
+ if (ipa->version != IPA_VERSION_3_5_1)
+ bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
+ BUILD_BUG_ON(bit_count > 32);
+ offset_max = ~0 >> (32 - bit_count);
+
+ if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
+ dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
+ ipa->mem_offset + offset, offset_max);
+ return false;
+ }
+
+ return true;
+}
+
+/* Check whether offsets passed to register_write are valid */
+static bool ipa_cmd_register_write_valid(struct ipa *ipa)
+{
+ const char *name;
+ u32 offset;
+
+ offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ name = "filter/route hash flush";
+ if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
+ return false;
+
+ offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT);
+ name = "maximal endpoint status";
+ if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
+ return false;
+
+ return true;
+}
+
+bool ipa_cmd_data_valid(struct ipa *ipa)
+{
+ if (!ipa_cmd_header_valid(ipa))
+ return false;
+
+ if (!ipa_cmd_register_write_valid(ipa))
+ return false;
+
+ return true;
+}
+
+#endif /* IPA_VALIDATE */
+
+int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct device *dev = channel->gsi->dev;
+ int ret;
+
+ /* This is as good a place as any to validate build constants */
+ ipa_cmd_validate_build();
+
+ /* Even though command payloads are allocated one at a time,
+ * a single transaction can require up to tlv_count of them,
+ * so we treat them as if that many can be allocated at once.
+ */
+ ret = gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
+ sizeof(union ipa_cmd_payload),
+ tre_max, channel->tlv_count);
+ if (ret)
+ return ret;
+
+ /* Each TRE needs a command info structure */
+ ret = gsi_trans_pool_init(&trans_info->info_pool,
+ sizeof(struct ipa_cmd_info),
+ tre_max, channel->tlv_count);
+ if (ret)
+ gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
+
+ return ret;
+}
+
+void ipa_cmd_pool_exit(struct gsi_channel *channel)
+{
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ struct device *dev = channel->gsi->dev;
+
+ gsi_trans_pool_exit(&trans_info->info_pool);
+ gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
+}
+
+static union ipa_cmd_payload *
+ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr)
+{
+ struct gsi_trans_info *trans_info;
+ struct ipa_endpoint *endpoint;
+
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info;
+
+ return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr);
+}
+
+/* If hash_size is 0, hash_offset and hash_addr ignored. */
+void ipa_cmd_table_init_add(struct gsi_trans *trans,
+ enum ipa_cmd_opcode opcode, u16 size, u32 offset,
+ dma_addr_t addr, u16 hash_size, u32 hash_offset,
+ dma_addr_t hash_addr)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum dma_data_direction direction = DMA_TO_DEVICE;
+ struct ipa_cmd_hw_ip_fltrt_init *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+ u64 val;
+
+ /* Record the non-hash table offset and size */
+ offset += ipa->mem_offset;
+ val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
+ val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
+
+ /* The hash table offset and address are zero if its size is 0 */
+ if (hash_size) {
+ /* Record the hash table offset and size */
+ hash_offset += ipa->mem_offset;
+ val |= u64_encode_bits(hash_offset,
+ IP_FLTRT_FLAGS_HASH_ADDR_FMASK);
+ val |= u64_encode_bits(hash_size,
+ IP_FLTRT_FLAGS_HASH_SIZE_FMASK);
+ }
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->table_init;
+
+ /* Fill in all offsets and sizes and the non-hash table address */
+ if (hash_size)
+ payload->hash_rules_addr = cpu_to_le64(hash_addr);
+ payload->flags = cpu_to_le64(val);
+ payload->nhash_rules_addr = cpu_to_le64(addr);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+/* Initialize header space in IPA-local memory */
+void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
+ dma_addr_t addr)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
+ enum dma_data_direction direction = DMA_TO_DEVICE;
+ struct ipa_cmd_hw_hdr_init_local *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+ u32 flags;
+
+ offset += ipa->mem_offset;
+
+ /* With this command we tell the IPA where in its local memory the
+ * header tables reside. The content of the buffer provided is
+ * also written via DMA into that space. The IPA hardware owns
+ * the table, but the AP must initialize it.
+ */
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->hdr_init_local;
+
+ payload->hdr_table_addr = cpu_to_le64(addr);
+ flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
+ flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
+ payload->flags = cpu_to_le32(flags);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
+ u32 mask, bool clear_full)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ struct ipa_cmd_register_write *payload;
+ union ipa_cmd_payload *cmd_payload;
+ u32 opcode = IPA_CMD_REGISTER_WRITE;
+ dma_addr_t payload_addr;
+ u32 clear_option;
+ u32 options;
+ u16 flags;
+
+ /* pipeline_clear_src_grp is not used */
+ clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
+
+ if (ipa->version != IPA_VERSION_3_5_1) {
+ u16 offset_high;
+ u32 val;
+
+ /* Opcode encodes pipeline clear options */
+ /* SKIP_CLEAR is always 0 (don't skip pipeline clear) */
+ val = u16_encode_bits(clear_option,
+ REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK);
+ opcode |= val;
+
+ /* Extract the high 4 bits from the offset */
+ offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16));
+ offset &= (1 << 16) - 1;
+
+ /* Extract the top 4 bits and encode it into the flags field */
+ flags = u16_encode_bits(offset_high,
+ REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
+ options = 0; /* reserved */
+
+ } else {
+ flags = 0; /* SKIP_CLEAR flag is always 0 */
+ options = u16_encode_bits(clear_option,
+ REGISTER_WRITE_CLEAR_OPTIONS_FMASK);
+ }
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->register_write;
+
+ payload->flags = cpu_to_le16(flags);
+ payload->offset = cpu_to_le16((u16)offset);
+ payload->value = cpu_to_le32(value);
+ payload->value_mask = cpu_to_le32(mask);
+ payload->clear_options = cpu_to_le32(options);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ DMA_NONE, opcode);
+}
+
+/* Skip IP packet processing on the next data transfer on a TX channel */
+static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
+ enum dma_data_direction direction = DMA_TO_DEVICE;
+ struct ipa_cmd_ip_packet_init *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+
+ /* assert(endpoint_id <
+ field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK)); */
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->ip_packet_init;
+
+ payload->dest_endpoint = u8_encode_bits(endpoint_id,
+ IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+/* Use a 32-bit DMA command to zero a block of memory */
+void ipa_cmd_dma_task_32b_addr_add(struct gsi_trans *trans, u16 size,
+ dma_addr_t addr, bool toward_ipa)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_DMA_TASK_32B_ADDR;
+ struct ipa_cmd_hw_dma_task_32b_addr *payload;
+ union ipa_cmd_payload *cmd_payload;
+ enum dma_data_direction direction;
+ dma_addr_t payload_addr;
+ u16 flags;
+
+ /* assert(addr <= U32_MAX); */
+ addr &= GENMASK_ULL(31, 0);
+
+ /* The opcode encodes the number of DMA operations in the high byte */
+ opcode |= u16_encode_bits(1, DMA_TASK_32B_ADDR_OPCODE_COUNT_FMASK);
+
+ direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ /* complete: 0 = don't interrupt; eof: 0 = don't assert eot */
+ flags = DMA_TASK_32B_ADDR_FLAGS_FLSH_FMASK;
+ /* lock: 0 = don't lock endpoint; unlock: 0 = don't unlock */
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->dma_task_32b_addr;
+
+ payload->flags = cpu_to_le16(flags);
+ payload->size = cpu_to_le16(size);
+ payload->addr = cpu_to_le32((u32)addr);
+ payload->packet_size = cpu_to_le16(size);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+/* Use a DMA command to read or write a block of IPA-resident memory */
+void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
+ dma_addr_t addr, bool toward_ipa)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
+ struct ipa_cmd_hw_dma_mem_mem *payload;
+ union ipa_cmd_payload *cmd_payload;
+ enum dma_data_direction direction;
+ dma_addr_t payload_addr;
+ u16 flags;
+
+ /* size and offset must fit in 16 bit fields */
+ /* assert(size > 0 && size <= U16_MAX); */
+ /* assert(offset <= U16_MAX && ipa->mem_offset <= U16_MAX - offset); */
+
+ offset += ipa->mem_offset;
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->dma_shared_mem;
+
+ /* payload->clear_after_read was reserved prior to IPA v4.0. It's
+ * never needed for current code, so it's 0 regardless of version.
+ */
+ payload->size = cpu_to_le16(size);
+ payload->local_addr = cpu_to_le16(offset);
+ /* payload->flags:
+ * direction: 0 = write to IPA, 1 read from IPA
+ * Starting at v4.0 these are reserved; either way, all zero:
+ * pipeline clear: 0 = wait for pipeline clear (don't skip)
+ * clear_options: 0 = pipeline_clear_hps
+ * Instead, for v4.0+ these are encoded in the opcode. But again
+ * since both values are 0 we won't bother OR'ing them in.
+ */
+ flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK;
+ payload->flags = cpu_to_le16(flags);
+ payload->system_addr = cpu_to_le64(addr);
+
+ direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans, u64 tag)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
+ enum dma_data_direction direction = DMA_TO_DEVICE;
+ struct ipa_cmd_ip_packet_tag_status *payload;
+ union ipa_cmd_payload *cmd_payload;
+ dma_addr_t payload_addr;
+
+ /* assert(tag <= field_max(IP_PACKET_TAG_STATUS_TAG_FMASK)); */
+
+ cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+ payload = &cmd_payload->ip_packet_tag_status;
+
+ payload->tag = u64_encode_bits(tag, IP_PACKET_TAG_STATUS_TAG_FMASK);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+/* Issue a small command TX data transfer */
+static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ enum dma_data_direction direction = DMA_TO_DEVICE;
+ enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
+ union ipa_cmd_payload *payload;
+ dma_addr_t payload_addr;
+
+ /* assert(size <= sizeof(*payload)); */
+
+ /* Just transfer a zero-filled payload structure */
+ payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
+
+ gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
+ direction, opcode);
+}
+
+void ipa_cmd_tag_process_add(struct gsi_trans *trans)
+{
+ ipa_cmd_register_write_add(trans, 0, 0, 0, true);
+#if 1
+ /* Reference these functions to avoid a compile error */
+ (void)ipa_cmd_ip_packet_init_add;
+ (void)ipa_cmd_ip_tag_status_add;
+ (void) ipa_cmd_transfer_add;
+#else
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ struct gsi_endpoint *endpoint;
+
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
+ ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
+
+ ipa_cmd_ip_tag_status_add(trans, 0xcba987654321);
+
+ ipa_cmd_transfer_add(trans, 4);
+#endif
+}
+
+/* Returns the number of commands required for the tag process */
+u32 ipa_cmd_tag_process_count(void)
+{
+ return 4;
+}
+
+static struct ipa_cmd_info *
+ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
+{
+ struct gsi_channel *channel;
+
+ channel = &endpoint->ipa->gsi.channel[endpoint->channel_id];
+
+ return gsi_trans_pool_alloc(&channel->trans_info.info_pool, tre_count);
+}
+
+/* Allocate a transaction for the command TX endpoint */
+struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
+{
+ struct ipa_endpoint *endpoint;
+ struct gsi_trans *trans;
+
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+
+ trans = gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
+ tre_count, DMA_NONE);
+ if (trans)
+ trans->info = ipa_cmd_info_alloc(endpoint, tre_count);
+
+ return trans;
+}
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
new file mode 100644
index 000000000000..4917525b3a47
--- /dev/null
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_CMD_H_
+#define _IPA_CMD_H_
+
+#include <linux/types.h>
+#include <linux/dma-direction.h>
+
+struct sk_buff;
+struct scatterlist;
+
+struct ipa;
+struct ipa_mem;
+struct gsi_trans;
+struct gsi_channel;
+
+/**
+ * enum ipa_cmd_opcode: IPA immediate commands
+ *
+ * All immediate commands are issued using the AP command TX endpoint.
+ * The numeric values here are the opcodes for IPA v3.5.1 hardware.
+ *
+ * IPA_CMD_NONE is a special (invalid) value that's used to indicate
+ * a request is *not* an immediate command.
+ */
+enum ipa_cmd_opcode {
+ IPA_CMD_NONE = 0,
+ IPA_CMD_IP_V4_FILTER_INIT = 3,
+ IPA_CMD_IP_V6_FILTER_INIT = 4,
+ IPA_CMD_IP_V4_ROUTING_INIT = 7,
+ IPA_CMD_IP_V6_ROUTING_INIT = 8,
+ IPA_CMD_HDR_INIT_LOCAL = 9,
+ IPA_CMD_REGISTER_WRITE = 12,
+ IPA_CMD_IP_PACKET_INIT = 16,
+ IPA_CMD_DMA_TASK_32B_ADDR = 17,
+ IPA_CMD_DMA_SHARED_MEM = 19,
+ IPA_CMD_IP_PACKET_TAG_STATUS = 20,
+};
+
+/**
+ * struct ipa_cmd_info - information needed for an IPA immediate command
+ *
+ * @opcode: The command opcode.
+ * @direction: Direction of data transfer for DMA commands
+ */
+struct ipa_cmd_info {
+ enum ipa_cmd_opcode opcode;
+ enum dma_data_direction direction;
+};
+
+
+#ifdef IPA_VALIDATE
+
+/**
+ * ipa_cmd_table_valid() - Validate a memory region holding a table
+ * @ipa: - IPA pointer
+ * @mem: - IPA memory region descriptor
+ * @route: - Whether the region holds a route or filter table
+ * @ipv6: - Whether the table is for IPv6 or IPv4
+ * @hashed: - Whether the table is hashed or non-hashed
+ *
+ * @Return: true if region is valid, false otherwise
+ */
+bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
+ bool route, bool ipv6, bool hashed);
+
+/**
+ * ipa_cmd_data_valid() - Validate command-realted configuration is valid
+ * @ipa: - IPA pointer
+ *
+ * @Return: true if assumptions required for command are valid
+ */
+bool ipa_cmd_data_valid(struct ipa *ipa);
+
+#else /* !IPA_VALIDATE */
+
+static inline bool ipa_cmd_table_valid(struct ipa *ipa,
+ const struct ipa_mem *mem, bool route,
+ bool ipv6, bool hashed)
+{
+ return true;
+}
+
+static inline bool ipa_cmd_data_valid(struct ipa *ipa)
+{
+ return true;
+}
+
+#endif /* !IPA_VALIDATE */
+
+/**
+ * ipa_cmd_pool_init() - initialize command channel pools
+ * @channel: AP->IPA command TX GSI channel pointer
+ * @tre_count: Number of pool elements to allocate
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int ipa_cmd_pool_init(struct gsi_channel *gsi_channel, u32 tre_count);
+
+/**
+ * ipa_cmd_pool_exit() - Inverse of ipa_cmd_pool_init()
+ * @channel: AP->IPA command TX GSI channel pointer
+ */
+void ipa_cmd_pool_exit(struct gsi_channel *channel);
+
+/**
+ * ipa_cmd_table_init_add() - Add table init command to a transaction
+ * @trans: GSI transaction
+ * @opcode: IPA immediate command opcode
+ * @size: Size of non-hashed routing table memory
+ * @offset: Offset in IPA shared memory of non-hashed routing table memory
+ * @addr: DMA address of non-hashed table data to write
+ * @hash_size: Size of hashed routing table memory
+ * @hash_offset: Offset in IPA shared memory of hashed routing table memory
+ * @hash_addr: DMA address of hashed table data to write
+ *
+ * If hash_size is 0, hash_offset and hash_addr are ignored.
+ */
+void ipa_cmd_table_init_add(struct gsi_trans *trans, enum ipa_cmd_opcode opcode,
+ u16 size, u32 offset, dma_addr_t addr,
+ u16 hash_size, u32 hash_offset,
+ dma_addr_t hash_addr);
+
+/**
+ * ipa_cmd_hdr_init_local_add() - Add a header init command to a transaction
+ * @ipa: IPA structure
+ * @offset: Offset of header memory in IPA local space
+ * @size: Size of header memory
+ * @addr: DMA address of buffer to be written from
+ *
+ * Defines and fills the location in IPA memory to use for headers.
+ */
+void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
+ dma_addr_t addr);
+
+/**
+ * ipa_cmd_register_write_add() - Add a register write command to a transaction
+ * @trans: GSI transaction
+ * @offset: Offset of register to be written
+ * @value: Value to be written
+ * @mask: Mask of bits in register to update with bits from value
+ * @clear_full: Pipeline clear option; true means full pipeline clear
+ */
+void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
+ u32 mask, bool clear_full);
+
+/**
+ * ipa_cmd_dma_task_32b_addr_add() - Add a 32-bit DMA command to a transaction
+ * @trans: GSi transaction
+ * @size: Number of bytes to be memory to be transferred
+ * @addr: DMA address of buffer to be read into or written from
+ * @toward_ipa: true means write to IPA memory; false means read
+ */
+void ipa_cmd_dma_task_32b_addr_add(struct gsi_trans *trans, u16 size,
+ dma_addr_t addr, bool toward_ipa);
+
+/**
+ * ipa_cmd_dma_shared_mem_add() - Add a DMA memory command to a transaction
+ * @trans: GSI transaction
+ * @offset: Offset of IPA memory to be read or written
+ * @size: Number of bytes of memory to be transferred
+ * @addr: DMA address of buffer to be read into or written from
+ * @toward_ipa: true means write to IPA memory; false means read
+ */
+void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset,
+ u16 size, dma_addr_t addr, bool toward_ipa);
+
+/**
+ * ipa_cmd_tag_process_add() - Add IPA tag process commands to a transaction
+ * @trans: GSI transaction
+ */
+void ipa_cmd_tag_process_add(struct gsi_trans *trans);
+
+/**
+ * ipa_cmd_tag_process_add_count() - Number of commands in a tag process
+ *
+ * @Return: The number of elements to allocate in a transaction
+ * to hold tag process commands
+ */
+u32 ipa_cmd_tag_process_count(void);
+
+/**
+ * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
+ * @ipa: IPA pointer
+ * @tre_count: Number of elements in the transaction
+ *
+ * @Return: A GSI transaction structure, or a null pointer if all
+ * available transactions are in use
+ */
+struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count);
+
+#endif /* _IPA_CMD_H_ */
diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c
new file mode 100644
index 000000000000..042b5fc3c135
--- /dev/null
+++ b/drivers/net/ipa/ipa_data-sc7180.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2019-2020 Linaro Ltd. */
+
+#include <linux/log2.h>
+
+#include "gsi.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+
+/* Endpoint configuration for the SC7180 SoC. */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 1,
+ .endpoint_id = 6,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_DMA_ONLY,
+ .config = {
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 2,
+ .endpoint_id = 8,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 6,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_INVALID,
+ .config = {
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .pad_align = ilog2(sizeof(u32)),
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 0,
+ .endpoint_id = 1,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .seq_type =
+ IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP,
+ .config = {
+ .checksum = true,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 3,
+ .endpoint_id = 9,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 6,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_INVALID,
+ .config = {
+ .checksum = true,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_COMMAND_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 1,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ },
+ [IPA_ENDPOINT_MODEM_LAN_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 3,
+ .endpoint_id = 13,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 4,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 10,
+ .toward_ipa = false,
+ },
+};
+
+/* For the SC7180, resource groups are allocated this way:
+ * group 0: UL_DL
+ */
+static const struct ipa_resource_src ipa_resource_src[] = {
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS,
+ .limits[0] = {
+ .min = 3,
+ .max = 63,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ .limits[0] = {
+ .min = 3,
+ .max = 3,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ .limits[0] = {
+ .min = 10,
+ .max = 10,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ .limits[0] = {
+ .min = 1,
+ .max = 1,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+ .limits[0] = {
+ .min = 5,
+ .max = 5,
+ },
+ },
+};
+
+static const struct ipa_resource_dst ipa_resource_dst[] = {
+ {
+ .type = IPA_RESOURCE_TYPE_DST_DATA_SECTORS,
+ .limits[0] = {
+ .min = 3,
+ .max = 3,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+ .limits[0] = {
+ .min = 1,
+ .max = 63,
+ },
+ },
+};
+
+/* Resource configuration for the SC7180 SoC. */
+static const struct ipa_resource_data ipa_resource_data = {
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region configuration for the SC7180 SoC. */
+static const struct ipa_mem ipa_mem_data[] = {
+ [IPA_MEM_UC_SHARED] = {
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ [IPA_MEM_UC_INFO] = {
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_FILTER_HASHED] = {
+ .offset = 0x0288,
+ .size = 0,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_FILTER] = {
+ .offset = 0x0290,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER_HASHED] = {
+ .offset = 0x0310,
+ .size = 0,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER] = {
+ .offset = 0x0318,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE_HASHED] = {
+ .offset = 0x0398,
+ .size = 0,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE] = {
+ .offset = 0x03a0,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE_HASHED] = {
+ .offset = 0x0420,
+ .size = 0,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE] = {
+ .offset = 0x0428,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_MODEM_HEADER] = {
+ .offset = 0x04a8,
+ .size = 0x0140,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_HEADER] = {
+ .offset = 0x05e8,
+ .size = 0x0000,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM_PROC_CTX] = {
+ .offset = 0x05f0,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_PROC_CTX] = {
+ .offset = 0x07f0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_PDN_CONFIG] = {
+ .offset = 0x09f8,
+ .size = 0x0050,
+ .canary_count = 2,
+ },
+ [IPA_MEM_STATS_QUOTA] = {
+ .offset = 0x0a50,
+ .size = 0x0060,
+ .canary_count = 2,
+ },
+ [IPA_MEM_STATS_TETHERING] = {
+ .offset = 0x0ab0,
+ .size = 0x0140,
+ .canary_count = 0,
+ },
+ [IPA_MEM_STATS_DROP] = {
+ .offset = 0x0bf0,
+ .size = 0,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM] = {
+ .offset = 0x0bf0,
+ .size = 0x140c,
+ .canary_count = 0,
+ },
+ [IPA_MEM_UC_EVENT_RING] = {
+ .offset = 0x2000,
+ .size = 0,
+ .canary_count = 1,
+ },
+};
+
+/* Configuration data for the SC7180 SoC. */
+const struct ipa_data ipa_data_sc7180 = {
+ .version = IPA_VERSION_4_2,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_count = ARRAY_SIZE(ipa_mem_data),
+ .mem_data = ipa_mem_data,
+};
diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c
new file mode 100644
index 000000000000..0d9c36e1e806
--- /dev/null
+++ b/drivers/net/ipa/ipa_data-sdm845.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/log2.h>
+
+#include "gsi.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+
+/* Endpoint configuration for the SDM845 SoC. */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 4,
+ .endpoint_id = 5,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_DMA_ONLY,
+ .config = {
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 5,
+ .endpoint_id = 9,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_INVALID,
+ .config = {
+ .checksum = true,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .pad_align = ilog2(sizeof(u32)),
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 3,
+ .endpoint_id = 2,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 16,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .seq_type =
+ IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ .config = {
+ .checksum = true,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ .delay = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 6,
+ .endpoint_id = 10,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 8,
+ },
+ .endpoint = {
+ .seq_type = IPA_SEQ_INVALID,
+ .config = {
+ .checksum = true,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_COMMAND_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 1,
+ .endpoint_id = 4,
+ .toward_ipa = true,
+ },
+ [IPA_ENDPOINT_MODEM_LAN_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 3,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_LAN_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 3,
+ .endpoint_id = 13,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 4,
+ .endpoint_id = 6,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 12,
+ .toward_ipa = false,
+ },
+};
+
+/* For the SDM845, resource groups are allocated this way:
+ * group 0: LWA_DL
+ * group 1: UL_DL
+ */
+static const struct ipa_resource_src ipa_resource_src[] = {
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS,
+ .limits[0] = {
+ .min = 1,
+ .max = 63,
+ },
+ .limits[1] = {
+ .min = 1,
+ .max = 63,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ .limits[0] = {
+ .min = 10,
+ .max = 10,
+ },
+ .limits[1] = {
+ .min = 10,
+ .max = 10,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ .limits[0] = {
+ .min = 12,
+ .max = 12,
+ },
+ .limits[1] = {
+ .min = 14,
+ .max = 14,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ .limits[0] = {
+ .min = 0,
+ .max = 63,
+ },
+ .limits[1] = {
+ .min = 0,
+ .max = 63,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+ .limits[0] = {
+ .min = 14,
+ .max = 14,
+ },
+ .limits[1] = {
+ .min = 20,
+ .max = 20,
+ },
+ },
+};
+
+static const struct ipa_resource_dst ipa_resource_dst[] = {
+ {
+ .type = IPA_RESOURCE_TYPE_DST_DATA_SECTORS,
+ .limits[0] = {
+ .min = 4,
+ .max = 4,
+ },
+ .limits[1] = {
+ .min = 4,
+ .max = 4,
+ },
+ },
+ {
+ .type = IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+ .limits[0] = {
+ .min = 2,
+ .max = 63,
+ },
+ .limits[1] = {
+ .min = 1,
+ .max = 63,
+ },
+ },
+};
+
+/* Resource configuration for the SDM845 SoC. */
+static const struct ipa_resource_data ipa_resource_data = {
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region configuration for the SDM845 SoC. */
+static const struct ipa_mem ipa_mem_data[] = {
+ [IPA_MEM_UC_SHARED] = {
+ .offset = 0x0000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ [IPA_MEM_UC_INFO] = {
+ .offset = 0x0080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_V4_FILTER_HASHED] = {
+ .offset = 0x0288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_FILTER] = {
+ .offset = 0x0308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER_HASHED] = {
+ .offset = 0x0388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_FILTER] = {
+ .offset = 0x0408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE_HASHED] = {
+ .offset = 0x0488,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V4_ROUTE] = {
+ .offset = 0x0508,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE_HASHED] = {
+ .offset = 0x0588,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_V6_ROUTE] = {
+ .offset = 0x0608,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ [IPA_MEM_MODEM_HEADER] = {
+ .offset = 0x0688,
+ .size = 0x0140,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_HEADER] = {
+ .offset = 0x07c8,
+ .size = 0x0000,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM_PROC_CTX] = {
+ .offset = 0x07d0,
+ .size = 0x0200,
+ .canary_count = 2,
+ },
+ [IPA_MEM_AP_PROC_CTX] = {
+ .offset = 0x09d0,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ [IPA_MEM_MODEM] = {
+ .offset = 0x0bd8,
+ .size = 0x1024,
+ .canary_count = 0,
+ },
+ [IPA_MEM_UC_EVENT_RING] = {
+ .offset = 0x1c00,
+ .size = 0x0400,
+ .canary_count = 1,
+ },
+};
+
+/* Configuration data for the SDM845 SoC. */
+const struct ipa_data ipa_data_sdm845 = {
+ .version = IPA_VERSION_3_5_1,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_count = ARRAY_SIZE(ipa_mem_data),
+ .mem_data = ipa_mem_data,
+};
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
new file mode 100644
index 000000000000..7110de2de817
--- /dev/null
+++ b/drivers/net/ipa/ipa_data.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_DATA_H_
+#define _IPA_DATA_H_
+
+#include <linux/types.h>
+
+#include "ipa_version.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+
+/**
+ * DOC: IPA/GSI Configuration Data
+ *
+ * Boot-time configuration data is used to define the configuration of the
+ * IPA and GSI resources to use for a given platform. This data is supplied
+ * via the Device Tree match table, associated with a particular compatible
+ * string. The data defines information about resources, endpoints, and
+ * channels.
+ *
+ * Resources are data structures used internally by the IPA hardware. The
+ * configuration data defines the number (or limits of the number) of various
+ * types of these resources.
+ *
+ * Endpoint configuration data defines properties of both IPA endpoints and
+ * GSI channels. A channel is a GSI construct, and represents a single
+ * communication path between the IPA and a particular execution environment
+ * (EE), such as the AP or Modem. Each EE has a set of channels associated
+ * with it, and each channel has an ID unique for that EE. For the most part
+ * the only GSI channels of concern to this driver belong to the AP
+ *
+ * An endpoint is an IPA construct representing a single channel anywhere
+ * in the system. An IPA endpoint ID maps directly to an (EE, channel_id)
+ * pair. Generally, this driver is concerned with only endpoints associated
+ * with the AP, however this will change when support for routing (etc.) is
+ * added. IPA endpoint and GSI channel configuration data are defined
+ * together, establishing the endpoint_id->(EE, channel_id) mapping.
+ *
+ * Endpoint configuration data consists of three parts: properties that
+ * are common to IPA and GSI (EE ID, channel ID, endpoint ID, and direction);
+ * properties associated with the GSI channel; and properties associated with
+ * the IPA endpoint.
+ */
+
+/* The maximum value returned by ipa_resource_group_count() */
+#define IPA_RESOURCE_GROUP_COUNT 4
+
+/** enum ipa_resource_type_src - source resource types */
+/**
+ * struct gsi_channel_data - GSI channel configuration data
+ * @tre_count: number of TREs in the channel ring
+ * @event_count: number of slots in the associated event ring
+ * @tlv_count: number of entries in channel's TLV FIFO
+ *
+ * A GSI channel is a unidirectional means of transferring data to or
+ * from (and through) the IPA. A GSI channel has a ring buffer made
+ * up of "transfer elements" (TREs) that specify individual data transfers
+ * or IPA immediate commands. TREs are filled by the AP, and control
+ * is passed to IPA hardware by writing the last written element
+ * into a doorbell register.
+ *
+ * When data transfer commands have completed the GSI generates an
+ * event (a structure of data) and optionally signals the AP with
+ * an interrupt. Event structures are implemented by another ring
+ * buffer, directed toward the AP from the IPA.
+ *
+ * The input to a GSI channel is a FIFO of type/length/value (TLV)
+ * elements, and the size of this FIFO limits the number of TREs
+ * that can be included in a single transaction.
+ */
+struct gsi_channel_data {
+ u16 tre_count;
+ u16 event_count;
+ u8 tlv_count;
+};
+
+/**
+ * struct ipa_endpoint_tx_data - configuration data for TX endpoints
+ * @status_endpoint: endpoint to which status elements are sent
+ * @delay: whether endpoint starts in delay mode
+ *
+ * Delay mode prevents a TX endpoint from transmitting anything, even if
+ * commands have been presented to the hardware. Once the endpoint exits
+ * delay mode, queued transfer commands are sent.
+ *
+ * The @status_endpoint is only valid if the endpoint's @status_enable
+ * flag is set.
+ */
+struct ipa_endpoint_tx_data {
+ enum ipa_endpoint_name status_endpoint;
+ bool delay;
+};
+
+/**
+ * struct ipa_endpoint_rx_data - configuration data for RX endpoints
+ * @pad_align: power-of-2 boundary to which packet payload is aligned
+ * @aggr_close_eof: whether aggregation closes on end-of-frame
+ *
+ * With each packet it transfers, the IPA hardware can perform certain
+ * transformations of its packet data. One of these is adding pad bytes
+ * to the end of the packet data so the result ends on a power-of-2 boundary.
+ *
+ * It is also able to aggregate multiple packets into a single receive buffer.
+ * Aggregation is "open" while a buffer is being filled, and "closes" when
+ * certain criteria are met. One of those criteria is the sender indicating
+ * a "frame" consisting of several transfers has ended.
+ */
+struct ipa_endpoint_rx_data {
+ u32 pad_align;
+ bool aggr_close_eof;
+};
+
+/**
+ * struct ipa_endpoint_config_data - IPA endpoint hardware configuration
+ * @checksum: whether checksum offload is enabled
+ * @qmap: whether endpoint uses QMAP protocol
+ * @aggregation: whether endpoint supports aggregation
+ * @status_enable: whether endpoint uses status elements
+ * @dma_mode: whether endpoint operates in DMA mode
+ * @dma_endpoint: peer endpoint, if operating in DMA mode
+ * @tx: TX-specific endpoint information (see above)
+ * @rx: RX-specific endpoint information (see above)
+ */
+struct ipa_endpoint_config_data {
+ bool checksum;
+ bool qmap;
+ bool aggregation;
+ bool status_enable;
+ bool dma_mode;
+ enum ipa_endpoint_name dma_endpoint;
+ union {
+ struct ipa_endpoint_tx_data tx;
+ struct ipa_endpoint_rx_data rx;
+ };
+};
+
+/**
+ * struct ipa_endpoint_data - IPA endpoint configuration data
+ * @filter_support: whether endpoint supports filtering
+ * @seq_type: hardware sequencer type used for endpoint
+ * @config: hardware configuration (see above)
+ *
+ * Not all endpoints support the IPA filtering capability. A filter table
+ * defines the filters to apply for those endpoints that support it. The
+ * AP is responsible for initializing this table, and it must include entries
+ * for non-AP endpoints. For this reason we define *all* endpoints used
+ * in the system, and indicate whether they support filtering.
+ *
+ * The remaining endpoint configuration data applies only to AP endpoints.
+ * The IPA hardware is implemented by sequencers, and the AP must program
+ * the type(s) of these sequencers at initialization time. The remaining
+ * endpoint configuration data is defined above.
+ */
+struct ipa_endpoint_data {
+ bool filter_support;
+ /* The next two are specified only for AP endpoints */
+ enum ipa_seq_type seq_type;
+ struct ipa_endpoint_config_data config;
+};
+
+/**
+ * struct ipa_gsi_endpoint_data - GSI channel/IPA endpoint data
+ * ee: GSI execution environment ID
+ * channel_id: GSI channel ID
+ * endpoint_id: IPA endpoint ID
+ * toward_ipa: direction of data transfer
+ * gsi: GSI channel configuration data (see above)
+ * ipa: IPA endpoint configuration data (see above)
+ */
+struct ipa_gsi_endpoint_data {
+ u8 ee_id; /* enum gsi_ee_id */
+ u8 channel_id;
+ u8 endpoint_id;
+ bool toward_ipa;
+
+ struct gsi_channel_data channel;
+ struct ipa_endpoint_data endpoint;
+};
+
+/** enum ipa_resource_type_src - source resource types */
+enum ipa_resource_type_src {
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+};
+
+/** enum ipa_resource_type_dst - destination resource types */
+enum ipa_resource_type_dst {
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+};
+
+/**
+ * struct ipa_resource_limits - minimum and maximum resource counts
+ * @min: minimum number of resources of a given type
+ * @max: maximum number of resources of a given type
+ */
+struct ipa_resource_limits {
+ u32 min;
+ u32 max;
+};
+
+/**
+ * struct ipa_resource_src - source endpoint group resource usage
+ * @type: source group resource type
+ * @limits: array of limits to use for each resource group
+ */
+struct ipa_resource_src {
+ enum ipa_resource_type_src type;
+ struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_COUNT];
+};
+
+/**
+ * struct ipa_resource_dst - destination endpoint group resource usage
+ * @type: destination group resource type
+ * @limits: array of limits to use for each resource group
+ */
+struct ipa_resource_dst {
+ enum ipa_resource_type_dst type;
+ struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_COUNT];
+};
+
+/**
+ * struct ipa_resource_data - IPA resource configuration data
+ * @resource_src_count: number of entries in the resource_src array
+ * @resource_src: source endpoint group resources
+ * @resource_dst_count: number of entries in the resource_dst array
+ * @resource_dst: destination endpoint group resources
+ *
+ * In order to manage quality of service between endpoints, certain resources
+ * required for operation are allocated to groups of endpoints. Generally
+ * this information is invisible to the AP, but the AP is responsible for
+ * programming it at initialization time, so we specify it here.
+ */
+struct ipa_resource_data {
+ u32 resource_src_count;
+ const struct ipa_resource_src *resource_src;
+ u32 resource_dst_count;
+ const struct ipa_resource_dst *resource_dst;
+};
+
+/**
+ * struct ipa_mem - IPA-local memory region description
+ * @offset: offset in IPA memory space to base of the region
+ * @size: size in bytes base of the region
+ * @canary_count: number of 32-bit "canary" values that precede region
+ */
+struct ipa_mem_data {
+ u32 offset;
+ u16 size;
+ u16 canary_count;
+};
+
+/**
+ * struct ipa_data - combined IPA/GSI configuration data
+ * @version: IPA hardware version
+ * @endpoint_count: number of entries in endpoint_data array
+ * @endpoint_data: IPA endpoint/GSI channel data
+ * @resource_data: IPA resource configuration data
+ * @mem_count: number of entries in mem_data array
+ * @mem_data: IPA-local shared memory region data
+ */
+struct ipa_data {
+ enum ipa_version version;
+ u32 endpoint_count; /* # entries in endpoint_data[] */
+ const struct ipa_gsi_endpoint_data *endpoint_data;
+ const struct ipa_resource_data *resource_data;
+ u32 mem_count; /* # entries in mem_data[] */
+ const struct ipa_mem *mem_data;
+};
+
+extern const struct ipa_data ipa_data_sdm845;
+extern const struct ipa_data ipa_data_sc7180;
+
+#endif /* _IPA_DATA_H_ */
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
new file mode 100644
index 000000000000..6de03be28784
--- /dev/null
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -0,0 +1,1706 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/bitfield.h>
+#include <linux/if_rmnet.h>
+#include <linux/dma-direction.h>
+
+#include "gsi.h"
+#include "gsi_trans.h"
+#include "ipa.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_cmd.h"
+#include "ipa_mem.h"
+#include "ipa_modem.h"
+#include "ipa_table.h"
+#include "ipa_gsi.h"
+
+#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
+
+#define IPA_REPLENISH_BATCH 16
+
+/* RX buffer is 1 page (or a power-of-2 contiguous pages) */
+#define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */
+
+/* The amount of RX buffer space consumed by standard skb overhead */
+#define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
+
+#define IPA_ENDPOINT_STOP_RX_RETRIES 10
+#define IPA_ENDPOINT_STOP_RX_SIZE 1 /* bytes */
+
+#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
+#define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */
+
+#define ENDPOINT_STOP_DMA_TIMEOUT 15 /* milliseconds */
+
+/** enum ipa_status_opcode - status element opcode hardware values */
+enum ipa_status_opcode {
+ IPA_STATUS_OPCODE_PACKET = 0x01,
+ IPA_STATUS_OPCODE_NEW_FRAG_RULE = 0x02,
+ IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
+ IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
+ IPA_STATUS_OPCODE_LOG = 0x10,
+ IPA_STATUS_OPCODE_DCMP = 0x20,
+ IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
+};
+
+/** enum ipa_status_exception - status element exception type */
+enum ipa_status_exception {
+ /* 0 means no exception */
+ IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
+ IPA_STATUS_EXCEPTION_IPTYPE = 0x04,
+ IPA_STATUS_EXCEPTION_PACKET_LENGTH = 0x08,
+ IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10,
+ IPA_STATUS_EXCEPTION_SW_FILT = 0x20,
+ /* The meaning of the next value depends on whether the IP version */
+ IPA_STATUS_EXCEPTION_NAT = 0x40, /* IPv4 */
+ IPA_STATUS_EXCEPTION_IPV6CT = IPA_STATUS_EXCEPTION_NAT,
+};
+
+/* Status element provided by hardware */
+struct ipa_status {
+ u8 opcode; /* enum ipa_status_opcode */
+ u8 exception; /* enum ipa_status_exception */
+ __le16 mask;
+ __le16 pkt_len;
+ u8 endp_src_idx;
+ u8 endp_dst_idx;
+ __le32 metadata;
+ __le32 flags1;
+ __le64 flags2;
+ __le32 flags3;
+ __le32 flags4;
+};
+
+/* Field masks for struct ipa_status structure fields */
+
+#define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
+
+#define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
+
+#define IPA_STATUS_FLAGS1_FLT_LOCAL_FMASK GENMASK(0, 0)
+#define IPA_STATUS_FLAGS1_FLT_HASH_FMASK GENMASK(1, 1)
+#define IPA_STATUS_FLAGS1_FLT_GLOBAL_FMASK GENMASK(2, 2)
+#define IPA_STATUS_FLAGS1_FLT_RET_HDR_FMASK GENMASK(3, 3)
+#define IPA_STATUS_FLAGS1_FLT_RULE_ID_FMASK GENMASK(13, 4)
+#define IPA_STATUS_FLAGS1_RT_LOCAL_FMASK GENMASK(14, 14)
+#define IPA_STATUS_FLAGS1_RT_HASH_FMASK GENMASK(15, 15)
+#define IPA_STATUS_FLAGS1_UCP_FMASK GENMASK(16, 16)
+#define IPA_STATUS_FLAGS1_RT_TBL_IDX_FMASK GENMASK(21, 17)
+#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
+
+#define IPA_STATUS_FLAGS2_NAT_HIT_FMASK GENMASK_ULL(0, 0)
+#define IPA_STATUS_FLAGS2_NAT_ENTRY_IDX_FMASK GENMASK_ULL(13, 1)
+#define IPA_STATUS_FLAGS2_NAT_TYPE_FMASK GENMASK_ULL(15, 14)
+#define IPA_STATUS_FLAGS2_TAG_INFO_FMASK GENMASK_ULL(63, 16)
+
+#define IPA_STATUS_FLAGS3_SEQ_NUM_FMASK GENMASK(7, 0)
+#define IPA_STATUS_FLAGS3_TOD_CTR_FMASK GENMASK(31, 8)
+
+#define IPA_STATUS_FLAGS4_HDR_LOCAL_FMASK GENMASK(0, 0)
+#define IPA_STATUS_FLAGS4_HDR_OFFSET_FMASK GENMASK(10, 1)
+#define IPA_STATUS_FLAGS4_FRAG_HIT_FMASK GENMASK(11, 11)
+#define IPA_STATUS_FLAGS4_FRAG_RULE_FMASK GENMASK(15, 12)
+#define IPA_STATUS_FLAGS4_HW_SPECIFIC_FMASK GENMASK(31, 16)
+
+#ifdef IPA_VALIDATE
+
+static void ipa_endpoint_validate_build(void)
+{
+ /* The aggregation byte limit defines the point at which an
+ * aggregation window will close. It is programmed into the
+ * IPA hardware as a number of KB. We don't use "hard byte
+ * limit" aggregation, which means that we need to supply
+ * enough space in a receive buffer to hold a complete MTU
+ * plus normal skb overhead *after* that aggregation byte
+ * limit has been crossed.
+ *
+ * This check just ensures we don't define a receive buffer
+ * size that would exceed what we can represent in the field
+ * that is used to program its size.
+ */
+ BUILD_BUG_ON(IPA_RX_BUFFER_SIZE >
+ field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K +
+ IPA_MTU + IPA_RX_BUFFER_OVERHEAD);
+
+ /* I honestly don't know where this requirement comes from. But
+ * it holds, and if we someday need to loosen the constraint we
+ * can try to track it down.
+ */
+ BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
+}
+
+static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *all_data,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ const struct ipa_gsi_endpoint_data *other_data;
+ struct device *dev = &ipa->pdev->dev;
+ enum ipa_endpoint_name other_name;
+
+ if (ipa_gsi_endpoint_data_empty(data))
+ return true;
+
+ if (!data->toward_ipa) {
+ if (data->endpoint.filter_support) {
+ dev_err(dev, "filtering not supported for "
+ "RX endpoint %u\n",
+ data->endpoint_id);
+ return false;
+ }
+
+ return true; /* Nothing more to check for RX */
+ }
+
+ if (data->endpoint.config.status_enable) {
+ other_name = data->endpoint.config.tx.status_endpoint;
+ if (other_name >= count) {
+ dev_err(dev, "status endpoint name %u out of range "
+ "for endpoint %u\n",
+ other_name, data->endpoint_id);
+ return false;
+ }
+
+ /* Status endpoint must be defined... */
+ other_data = &all_data[other_name];
+ if (ipa_gsi_endpoint_data_empty(other_data)) {
+ dev_err(dev, "DMA endpoint name %u undefined "
+ "for endpoint %u\n",
+ other_name, data->endpoint_id);
+ return false;
+ }
+
+ /* ...and has to be an RX endpoint... */
+ if (other_data->toward_ipa) {
+ dev_err(dev,
+ "status endpoint for endpoint %u not RX\n",
+ data->endpoint_id);
+ return false;
+ }
+
+ /* ...and if it's to be an AP endpoint... */
+ if (other_data->ee_id == GSI_EE_AP) {
+ /* ...make sure it has status enabled. */
+ if (!other_data->endpoint.config.status_enable) {
+ dev_err(dev,
+ "status not enabled for endpoint %u\n",
+ other_data->endpoint_id);
+ return false;
+ }
+ }
+ }
+
+ if (data->endpoint.config.dma_mode) {
+ other_name = data->endpoint.config.dma_endpoint;
+ if (other_name >= count) {
+ dev_err(dev, "DMA endpoint name %u out of range "
+ "for endpoint %u\n",
+ other_name, data->endpoint_id);
+ return false;
+ }
+
+ other_data = &all_data[other_name];
+ if (ipa_gsi_endpoint_data_empty(other_data)) {
+ dev_err(dev, "DMA endpoint name %u undefined "
+ "for endpoint %u\n",
+ other_name, data->endpoint_id);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ const struct ipa_gsi_endpoint_data *dp = data;
+ struct device *dev = &ipa->pdev->dev;
+ enum ipa_endpoint_name name;
+
+ ipa_endpoint_validate_build();
+
+ if (count > IPA_ENDPOINT_COUNT) {
+ dev_err(dev, "too many endpoints specified (%u > %u)\n",
+ count, IPA_ENDPOINT_COUNT);
+ return false;
+ }
+
+ /* Make sure needed endpoints have defined data */
+ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
+ dev_err(dev, "command TX endpoint not defined\n");
+ return false;
+ }
+ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
+ dev_err(dev, "LAN RX endpoint not defined\n");
+ return false;
+ }
+ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
+ dev_err(dev, "AP->modem TX endpoint not defined\n");
+ return false;
+ }
+ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
+ dev_err(dev, "AP<-modem RX endpoint not defined\n");
+ return false;
+ }
+
+ for (name = 0; name < count; name++, dp++)
+ if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
+ return false;
+
+ return true;
+}
+
+#else /* !IPA_VALIDATE */
+
+static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ return true;
+}
+
+#endif /* !IPA_VALIDATE */
+
+/* Allocate a transaction to use on a non-command endpoint */
+static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
+ u32 tre_count)
+{
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ u32 channel_id = endpoint->channel_id;
+ enum dma_data_direction direction;
+
+ direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
+}
+
+/* suspend_delay represents suspend for RX, delay for TX endpoints.
+ * Note that suspend is not supported starting with IPA v4.0.
+ */
+static int
+ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
+{
+ u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ u32 mask;
+ u32 val;
+
+ /* assert(ipa->version == IPA_VERSION_3_5_1 */
+ mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
+
+ val = ioread32(ipa->reg_virt + offset);
+ if (suspend_delay == !!(val & mask))
+ return -EALREADY; /* Already set to desired state */
+
+ val ^= mask;
+ iowrite32(val, ipa->reg_virt + offset);
+
+ return 0;
+}
+
+/* Enable or disable delay or suspend mode on all modem endpoints */
+void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
+{
+ bool support_suspend;
+ u32 endpoint_id;
+
+ /* DELAY mode doesn't work right on IPA v4.2 */
+ if (ipa->version == IPA_VERSION_4_2)
+ return;
+
+ /* Only IPA v3.5.1 supports SUSPEND mode on RX endpoints */
+ support_suspend = ipa->version == IPA_VERSION_3_5_1;
+
+ for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
+ struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
+
+ if (endpoint->ee_id != GSI_EE_MODEM)
+ continue;
+
+ /* Set TX delay mode, or for IPA v3.5.1 RX suspend mode */
+ if (endpoint->toward_ipa || support_suspend)
+ (void)ipa_endpoint_init_ctrl(endpoint, enable);
+ }
+}
+
+/* Reset all modem endpoints to use the default exception endpoint */
+int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
+{
+ u32 initialized = ipa->initialized;
+ struct gsi_trans *trans;
+ u32 count;
+
+ /* We need one command per modem TX endpoint. We can get an upper
+ * bound on that by assuming all initialized endpoints are modem->IPA.
+ * That won't happen, and we could be more precise, but this is fine
+ * for now. We need to end the transactio with a "tag process."
+ */
+ count = hweight32(initialized) + ipa_cmd_tag_process_count();
+ trans = ipa_cmd_trans_alloc(ipa, count);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction to reset modem exception endpoints\n");
+ return -EBUSY;
+ }
+
+ while (initialized) {
+ u32 endpoint_id = __ffs(initialized);
+ struct ipa_endpoint *endpoint;
+ u32 offset;
+
+ initialized ^= BIT(endpoint_id);
+
+ /* We only reset modem TX endpoints */
+ endpoint = &ipa->endpoint[endpoint_id];
+ if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
+ continue;
+
+ offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
+
+ /* Value written is 0, and all bits are updated. That
+ * means status is disabled on the endpoint, and as a
+ * result all other fields in the register are ignored.
+ */
+ ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
+ }
+
+ ipa_cmd_tag_process_add(trans);
+
+ /* XXX This should have a 1 second timeout */
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
+ u32 val = 0;
+
+ /* FRAG_OFFLOAD_EN is 0 */
+ if (endpoint->data->checksum) {
+ if (endpoint->toward_ipa) {
+ u32 checksum_offset;
+
+ val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
+ CS_OFFLOAD_EN_FMASK);
+ /* Checksum header offset is in 4-byte units */
+ checksum_offset = sizeof(struct rmnet_map_header);
+ checksum_offset /= sizeof(u32);
+ val |= u32_encode_bits(checksum_offset,
+ CS_METADATA_HDR_OFFSET_FMASK);
+ } else {
+ val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
+ CS_OFFLOAD_EN_FMASK);
+ }
+ } else {
+ val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
+ CS_OFFLOAD_EN_FMASK);
+ }
+ /* CS_GEN_QMB_MASTER_SEL is 0 */
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
+ u32 val = 0;
+
+ if (endpoint->data->qmap) {
+ size_t header_size = sizeof(struct rmnet_map_header);
+
+ if (endpoint->toward_ipa && endpoint->data->checksum)
+ header_size += sizeof(struct rmnet_map_ul_csum_header);
+
+ val |= u32_encode_bits(header_size, HDR_LEN_FMASK);
+ /* metadata is the 4 byte rmnet_map header itself */
+ val |= HDR_OFST_METADATA_VALID_FMASK;
+ val |= u32_encode_bits(0, HDR_OFST_METADATA_FMASK);
+ /* HDR_ADDITIONAL_CONST_LEN is 0; (IPA->AP only) */
+ if (!endpoint->toward_ipa) {
+ u32 size_offset = offsetof(struct rmnet_map_header,
+ pkt_len);
+
+ val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
+ val |= u32_encode_bits(size_offset,
+ HDR_OFST_PKT_SIZE_FMASK);
+ }
+ /* HDR_A5_MUX is 0 */
+ /* HDR_LEN_INC_DEAGG_HDR is 0 */
+ /* HDR_METADATA_REG_VALID is 0; (AP->IPA only) */
+ }
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
+ u32 pad_align = endpoint->data->rx.pad_align;
+ u32 val = 0;
+
+ val |= HDR_ENDIANNESS_FMASK; /* big endian */
+ val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
+ /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
+ /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
+ /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
+ if (!endpoint->toward_ipa)
+ val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+/**
+ * Generate a metadata mask value that will select only the mux_id
+ * field in an rmnet_map header structure. The mux_id is at offset
+ * 1 byte from the beginning of the structure, but the metadata
+ * value is treated as a 4-byte unit. So this mask must be computed
+ * with endianness in mind. Note that ipa_endpoint_init_hdr_metadata_mask()
+ * will convert this value to the proper byte order.
+ *
+ * Marked __always_inline because this is really computing a
+ * constant value.
+ */
+static __always_inline __be32 ipa_rmnet_mux_id_metadata_mask(void)
+{
+ size_t mux_id_offset = offsetof(struct rmnet_map_header, mux_id);
+ u32 mux_id_mask = 0;
+ u8 *bytes;
+
+ bytes = (u8 *)&mux_id_mask;
+ bytes[mux_id_offset] = 0xff; /* mux_id is 1 byte */
+
+ return cpu_to_be32(mux_id_mask);
+}
+
+static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ u32 val = 0;
+ u32 offset;
+
+ offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
+
+ if (!endpoint->toward_ipa && endpoint->data->qmap)
+ val = ipa_rmnet_mux_id_metadata_mask();
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
+ u32 val;
+
+ if (endpoint->toward_ipa && endpoint->data->dma_mode) {
+ enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
+ u32 dma_endpoint_id;
+
+ dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
+
+ val = u32_encode_bits(IPA_DMA, MODE_FMASK);
+ val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
+ } else {
+ val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
+ }
+ /* Other bitfields unspecified (and 0) */
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+/* Compute the aggregation size value to use for a given buffer size */
+static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
+{
+ /* We don't use "hard byte limit" aggregation, so we define the
+ * aggregation limit such that our buffer has enough space *after*
+ * that limit to receive a full MTU of data, plus overhead.
+ */
+ rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
+
+ return rx_buffer_size / SZ_1K;
+}
+
+static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
+ u32 val = 0;
+
+ if (endpoint->data->aggregation) {
+ if (!endpoint->toward_ipa) {
+ u32 aggr_size = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
+ u32 limit;
+
+ val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
+ val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
+ val |= u32_encode_bits(aggr_size,
+ AGGR_BYTE_LIMIT_FMASK);
+ limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
+ val |= u32_encode_bits(limit / IPA_AGGR_GRANULARITY,
+ AGGR_TIME_LIMIT_FMASK);
+ val |= u32_encode_bits(0, AGGR_PKT_LIMIT_FMASK);
+ if (endpoint->data->rx.aggr_close_eof)
+ val |= AGGR_SW_EOF_ACTIVE_FMASK;
+ /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
+ } else {
+ val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
+ AGGR_EN_FMASK);
+ val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
+ /* other fields ignored */
+ }
+ /* AGGR_FORCE_CLOSE is 0 */
+ } else {
+ val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
+ /* other fields ignored */
+ }
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+/* A return value of 0 indicates an error */
+static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
+{
+ u32 scale;
+ u32 base;
+ u32 val;
+
+ if (!microseconds)
+ return 0; /* invalid delay */
+
+ /* Timer is represented in units of clock ticks. */
+ if (ipa->version < IPA_VERSION_4_2)
+ return microseconds; /* XXX Needs to be computed */
+
+ /* IPA v4.2 represents the tick count as base * scale */
+ scale = 1; /* XXX Needs to be computed */
+ if (scale > field_max(SCALE_FMASK))
+ return 0; /* scale too big */
+
+ base = DIV_ROUND_CLOSEST(microseconds, scale);
+ if (base > field_max(BASE_VALUE_FMASK))
+ return 0; /* microseconds too big */
+
+ val = u32_encode_bits(scale, SCALE_FMASK);
+ val |= u32_encode_bits(base, BASE_VALUE_FMASK);
+
+ return val;
+}
+
+static int ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
+ u32 microseconds)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ u32 offset;
+ u32 val;
+
+ /* XXX We'll fix this when the register definition is clear */
+ if (microseconds) {
+ struct device *dev = &ipa->pdev->dev;
+
+ dev_err(dev, "endpoint %u non-zero HOLB period (ignoring)\n",
+ endpoint_id);
+ microseconds = 0;
+ }
+
+ if (microseconds) {
+ val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
+ if (!val)
+ return -EINVAL;
+ } else {
+ val = 0; /* timeout is immediate */
+ }
+ offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
+ iowrite32(val, ipa->reg_virt + offset);
+
+ return 0;
+}
+
+static void
+ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ u32 offset;
+ u32 val;
+
+ val = u32_encode_bits(enable ? 1 : 0, HOL_BLOCK_EN_FMASK);
+ offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
+{
+ u32 i;
+
+ for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
+ struct ipa_endpoint *endpoint = &ipa->endpoint[i];
+
+ if (endpoint->ee_id != GSI_EE_MODEM)
+ continue;
+
+ (void)ipa_endpoint_init_hol_block_timer(endpoint, 0);
+ ipa_endpoint_init_hol_block_enable(endpoint, true);
+ }
+}
+
+static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
+ u32 val = 0;
+
+ /* DEAGGR_HDR_LEN is 0 */
+ /* PACKET_OFFSET_VALID is 0 */
+ /* PACKET_OFFSET_LOCATION is ignored (not valid) */
+ /* MAX_PACKET_LEN is 0 (not enforced) */
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
+ u32 seq_type = endpoint->seq_type;
+ u32 val = 0;
+
+ val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK);
+ val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK);
+ /* HPS_REP_SEQ_TYPE is 0 */
+ /* DPS_REP_SEQ_TYPE is 0 */
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+/**
+ * ipa_endpoint_skb_tx() - Transmit a socket buffer
+ * @endpoint: Endpoint pointer
+ * @skb: Socket buffer to send
+ *
+ * Returns: 0 if successful, or a negative error code
+ */
+int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
+{
+ struct gsi_trans *trans;
+ u32 nr_frags;
+ int ret;
+
+ /* Make sure source endpoint's TLV FIFO has enough entries to
+ * hold the linear portion of the skb and all its fragments.
+ * If not, see if we can linearize it before giving up.
+ */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (1 + nr_frags > endpoint->trans_tre_max) {
+ if (skb_linearize(skb))
+ return -E2BIG;
+ nr_frags = 0;
+ }
+
+ trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
+ if (!trans)
+ return -EBUSY;
+
+ ret = gsi_trans_skb_add(trans, skb);
+ if (ret)
+ goto err_trans_free;
+ trans->data = skb; /* transaction owns skb now */
+
+ gsi_trans_commit(trans, !netdev_xmit_more());
+
+ return 0;
+
+err_trans_free:
+ gsi_trans_free(trans);
+
+ return -ENOMEM;
+}
+
+static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ u32 val = 0;
+ u32 offset;
+
+ offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
+
+ if (endpoint->data->status_enable) {
+ val |= STATUS_EN_FMASK;
+ if (endpoint->toward_ipa) {
+ enum ipa_endpoint_name name;
+ u32 status_endpoint_id;
+
+ name = endpoint->data->tx.status_endpoint;
+ status_endpoint_id = ipa->name_map[name]->endpoint_id;
+
+ val |= u32_encode_bits(status_endpoint_id,
+ STATUS_ENDP_FMASK);
+ }
+ /* STATUS_LOCATION is 0 (status element precedes packet) */
+ /* The next field is present for IPA v4.0 and above */
+ /* STATUS_PKT_SUPPRESS_FMASK is 0 */
+ }
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
+{
+ struct gsi_trans *trans;
+ bool doorbell = false;
+ struct page *page;
+ u32 offset;
+ u32 len;
+ int ret;
+
+ page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
+ if (!page)
+ return -ENOMEM;
+
+ trans = ipa_endpoint_trans_alloc(endpoint, 1);
+ if (!trans)
+ goto err_free_pages;
+
+ /* Offset the buffer to make space for skb headroom */
+ offset = NET_SKB_PAD;
+ len = IPA_RX_BUFFER_SIZE - offset;
+
+ ret = gsi_trans_page_add(trans, page, len, offset);
+ if (ret)
+ goto err_trans_free;
+ trans->data = page; /* transaction owns page now */
+
+ if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
+ doorbell = true;
+ endpoint->replenish_ready = 0;
+ }
+
+ gsi_trans_commit(trans, doorbell);
+
+ return 0;
+
+err_trans_free:
+ gsi_trans_free(trans);
+err_free_pages:
+ __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
+
+ return -ENOMEM;
+}
+
+/**
+ * ipa_endpoint_replenish() - Replenish the Rx packets cache.
+ *
+ * Allocate RX packet wrapper structures with maximal socket buffers
+ * for an endpoint. These are supplied to the hardware, which fills
+ * them with incoming data.
+ */
+static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
+{
+ struct gsi *gsi;
+ u32 backlog;
+
+ if (!endpoint->replenish_enabled) {
+ if (count)
+ atomic_add(count, &endpoint->replenish_saved);
+ return;
+ }
+
+
+ while (atomic_dec_not_zero(&endpoint->replenish_backlog))
+ if (ipa_endpoint_replenish_one(endpoint))
+ goto try_again_later;
+ if (count)
+ atomic_add(count, &endpoint->replenish_backlog);
+
+ return;
+
+try_again_later:
+ /* The last one didn't succeed, so fix the backlog */
+ backlog = atomic_inc_return(&endpoint->replenish_backlog);
+
+ if (count)
+ atomic_add(count, &endpoint->replenish_backlog);
+
+ /* Whenever a receive buffer transaction completes we'll try to
+ * replenish again. It's unlikely, but if we fail to supply even
+ * one buffer, nothing will trigger another replenish attempt.
+ * Receive buffer transactions use one TRE, so schedule work to
+ * try replenishing again if our backlog is *all* available TREs.
+ */
+ gsi = &endpoint->ipa->gsi;
+ if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
+ schedule_delayed_work(&endpoint->replenish_work,
+ msecs_to_jiffies(1));
+}
+
+static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
+{
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ u32 max_backlog;
+ u32 saved;
+
+ endpoint->replenish_enabled = true;
+ while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
+ atomic_add(saved, &endpoint->replenish_backlog);
+
+ /* Start replenishing if hardware currently has no buffers */
+ max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
+ if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
+ ipa_endpoint_replenish(endpoint, 0);
+}
+
+static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
+{
+ u32 backlog;
+
+ endpoint->replenish_enabled = false;
+ while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
+ atomic_add(backlog, &endpoint->replenish_saved);
+}
+
+static void ipa_endpoint_replenish_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct ipa_endpoint *endpoint;
+
+ endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
+
+ ipa_endpoint_replenish(endpoint, 0);
+}
+
+static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
+ void *data, u32 len, u32 extra)
+{
+ struct sk_buff *skb;
+
+ skb = __dev_alloc_skb(len, GFP_ATOMIC);
+ if (skb) {
+ skb_put(skb, len);
+ memcpy(skb->data, data, len);
+ skb->truesize += extra;
+ }
+
+ /* Now receive it, or drop it if there's no netdev */
+ if (endpoint->netdev)
+ ipa_modem_skb_rx(endpoint->netdev, skb);
+ else if (skb)
+ dev_kfree_skb_any(skb);
+}
+
+static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
+ struct page *page, u32 len)
+{
+ struct sk_buff *skb;
+
+ /* Nothing to do if there's no netdev */
+ if (!endpoint->netdev)
+ return false;
+
+ /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
+ skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
+ if (skb) {
+ /* Reserve the headroom and account for the data */
+ skb_reserve(skb, NET_SKB_PAD);
+ skb_put(skb, len);
+ }
+
+ /* Receive the buffer (or record drop if unable to build it) */
+ ipa_modem_skb_rx(endpoint->netdev, skb);
+
+ return skb != NULL;
+}
+
+/* The format of a packet status element is the same for several status
+ * types (opcodes). The NEW_FRAG_RULE, LOG, DCMP (decompression) types
+ * aren't currently supported
+ */
+static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
+{
+ switch (opcode) {
+ case IPA_STATUS_OPCODE_PACKET:
+ case IPA_STATUS_OPCODE_DROPPED_PACKET:
+ case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
+ case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
+ const struct ipa_status *status)
+{
+ u32 endpoint_id;
+
+ if (!ipa_status_format_packet(status->opcode))
+ return true;
+ if (!status->pkt_len)
+ return true;
+ endpoint_id = u32_get_bits(status->endp_dst_idx,
+ IPA_STATUS_DST_IDX_FMASK);
+ if (endpoint_id != endpoint->endpoint_id)
+ return true;
+
+ return false; /* Don't skip this packet, process it */
+}
+
+/* Return whether the status indicates the packet should be dropped */
+static bool ipa_status_drop_packet(const struct ipa_status *status)
+{
+ u32 val;
+
+ /* Deaggregation exceptions we drop; others we consume */
+ if (status->exception)
+ return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
+
+ /* Drop the packet if it fails to match a routing rule; otherwise no */
+ val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
+
+ return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
+}
+
+static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
+ struct page *page, u32 total_len)
+{
+ void *data = page_address(page) + NET_SKB_PAD;
+ u32 unused = IPA_RX_BUFFER_SIZE - total_len;
+ u32 resid = total_len;
+
+ while (resid) {
+ const struct ipa_status *status = data;
+ u32 align;
+ u32 len;
+
+ if (resid < sizeof(*status)) {
+ dev_err(&endpoint->ipa->pdev->dev,
+ "short message (%u bytes < %zu byte status)\n",
+ resid, sizeof(*status));
+ break;
+ }
+
+ /* Skip over status packets that lack packet data */
+ if (ipa_endpoint_status_skip(endpoint, status)) {
+ data += sizeof(*status);
+ resid -= sizeof(*status);
+ continue;
+ }
+
+ /* Compute the amount of buffer space consumed by the
+ * packet, including the status element. If the hardware
+ * is configured to pad packet data to an aligned boundary,
+ * account for that. And if checksum offload is is enabled
+ * a trailer containing computed checksum information will
+ * be appended.
+ */
+ align = endpoint->data->rx.pad_align ? : 1;
+ len = le16_to_cpu(status->pkt_len);
+ len = sizeof(*status) + ALIGN(len, align);
+ if (endpoint->data->checksum)
+ len += sizeof(struct rmnet_map_dl_csum_trailer);
+
+ /* Charge the new packet with a proportional fraction of
+ * the unused space in the original receive buffer.
+ * XXX Charge a proportion of the *whole* receive buffer?
+ */
+ if (!ipa_status_drop_packet(status)) {
+ u32 extra = unused * len / total_len;
+ void *data2 = data + sizeof(*status);
+ u32 len2 = le16_to_cpu(status->pkt_len);
+
+ /* Client receives only packet data (no status) */
+ ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
+ }
+
+ /* Consume status and the full packet it describes */
+ data += len;
+ resid -= len;
+ }
+}
+
+/* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
+static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
+{
+}
+
+/* Complete transaction initiated in ipa_endpoint_replenish_one() */
+static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
+{
+ struct page *page;
+
+ ipa_endpoint_replenish(endpoint, 1);
+
+ if (trans->cancelled)
+ return;
+
+ /* Parse or build a socket buffer using the actual received length */
+ page = trans->data;
+ if (endpoint->data->status_enable)
+ ipa_endpoint_status_parse(endpoint, page, trans->len);
+ else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
+ trans->data = NULL; /* Pages have been consumed */
+}
+
+void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
+{
+ if (endpoint->toward_ipa)
+ ipa_endpoint_tx_complete(endpoint, trans);
+ else
+ ipa_endpoint_rx_complete(endpoint, trans);
+}
+
+void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
+{
+ if (endpoint->toward_ipa) {
+ struct ipa *ipa = endpoint->ipa;
+
+ /* Nothing to do for command transactions */
+ if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
+ struct sk_buff *skb = trans->data;
+
+ if (skb)
+ dev_kfree_skb_any(skb);
+ }
+ } else {
+ struct page *page = trans->data;
+
+ if (page)
+ __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
+ }
+}
+
+void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
+{
+ u32 val;
+
+ /* ROUTE_DIS is 0 */
+ val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
+ val |= ROUTE_DEF_HDR_TABLE_FMASK;
+ val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
+ val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
+ val |= ROUTE_DEF_RETAIN_HDR_FMASK;
+
+ iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
+}
+
+void ipa_endpoint_default_route_clear(struct ipa *ipa)
+{
+ ipa_endpoint_default_route_set(ipa, 0);
+}
+
+static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
+{
+ u32 mask = BIT(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ u32 offset;
+ u32 val;
+
+ /* assert(mask & ipa->available); */
+ offset = ipa_reg_state_aggr_active_offset(ipa->version);
+ val = ioread32(ipa->reg_virt + offset);
+
+ return !!(val & mask);
+}
+
+static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
+{
+ u32 mask = BIT(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+
+ /* assert(mask & ipa->available); */
+ iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
+}
+
+/**
+ * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
+ * @endpoint: Endpoint to be reset
+ *
+ * If aggregation is active on an RX endpoint when a reset is performed
+ * on its underlying GSI channel, a special sequence of actions must be
+ * taken to ensure the IPA pipeline is properly cleared.
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
+{
+ struct device *dev = &endpoint->ipa->pdev->dev;
+ struct ipa *ipa = endpoint->ipa;
+ bool endpoint_suspended = false;
+ struct gsi *gsi = &ipa->gsi;
+ dma_addr_t addr;
+ bool db_enable;
+ u32 retries;
+ u32 len = 1;
+ void *virt;
+ int ret;
+
+ virt = kzalloc(len, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, addr)) {
+ ret = -ENOMEM;
+ goto out_kfree;
+ }
+
+ /* Force close aggregation before issuing the reset */
+ ipa_endpoint_force_close(endpoint);
+
+ /* Reset and reconfigure the channel with the doorbell engine
+ * disabled. Then poll until we know aggregation is no longer
+ * active. We'll re-enable the doorbell (if appropriate) when
+ * we reset again below.
+ */
+ gsi_channel_reset(gsi, endpoint->channel_id, false);
+
+ /* Make sure the channel isn't suspended */
+ if (endpoint->ipa->version == IPA_VERSION_3_5_1)
+ if (!ipa_endpoint_init_ctrl(endpoint, false))
+ endpoint_suspended = true;
+
+ /* Start channel and do a 1 byte read */
+ ret = gsi_channel_start(gsi, endpoint->channel_id);
+ if (ret)
+ goto out_suspend_again;
+
+ ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
+ if (ret)
+ goto err_endpoint_stop;
+
+ /* Wait for aggregation to be closed on the channel */
+ retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
+ do {
+ if (!ipa_endpoint_aggr_active(endpoint))
+ break;
+ msleep(1);
+ } while (retries--);
+
+ /* Check one last time */
+ if (ipa_endpoint_aggr_active(endpoint))
+ dev_err(dev, "endpoint %u still active during reset\n",
+ endpoint->endpoint_id);
+
+ gsi_trans_read_byte_done(gsi, endpoint->channel_id);
+
+ ret = ipa_endpoint_stop(endpoint);
+ if (ret)
+ goto out_suspend_again;
+
+ /* Finally, reset and reconfigure the channel again (re-enabling the
+ * the doorbell engine if appropriate). Sleep for 1 millisecond to
+ * complete the channel reset sequence. Finish by suspending the
+ * channel again (if necessary).
+ */
+ db_enable = ipa->version == IPA_VERSION_3_5_1;
+ gsi_channel_reset(gsi, endpoint->channel_id, db_enable);
+
+ msleep(1);
+
+ goto out_suspend_again;
+
+err_endpoint_stop:
+ ipa_endpoint_stop(endpoint);
+out_suspend_again:
+ if (endpoint_suspended)
+ (void)ipa_endpoint_init_ctrl(endpoint, true);
+ dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
+out_kfree:
+ kfree(virt);
+
+ return ret;
+}
+
+static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
+{
+ u32 channel_id = endpoint->channel_id;
+ struct ipa *ipa = endpoint->ipa;
+ bool db_enable;
+ bool special;
+ int ret = 0;
+
+ /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
+ * is active, we need to handle things specially to recover.
+ * All other cases just need to reset the underlying GSI channel.
+ *
+ * IPA v3.5.1 enables the doorbell engine. Newer versions do not.
+ */
+ db_enable = ipa->version == IPA_VERSION_3_5_1;
+ special = !endpoint->toward_ipa && endpoint->data->aggregation;
+ if (special && ipa_endpoint_aggr_active(endpoint))
+ ret = ipa_endpoint_reset_rx_aggr(endpoint);
+ else
+ gsi_channel_reset(&ipa->gsi, channel_id, db_enable);
+
+ if (ret)
+ dev_err(&ipa->pdev->dev,
+ "error %d resetting channel %u for endpoint %u\n",
+ ret, endpoint->channel_id, endpoint->endpoint_id);
+}
+
+static int ipa_endpoint_stop_rx_dma(struct ipa *ipa)
+{
+ u16 size = IPA_ENDPOINT_STOP_RX_SIZE;
+ struct gsi_trans *trans;
+ dma_addr_t addr;
+ int ret;
+
+ trans = ipa_cmd_trans_alloc(ipa, 1);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction for RX endpoint STOP workaround\n");
+ return -EBUSY;
+ }
+
+ /* Read into the highest part of the zero memory area */
+ addr = ipa->zero_addr + ipa->zero_size - size;
+
+ ipa_cmd_dma_task_32b_addr_add(trans, size, addr, false);
+
+ ret = gsi_trans_commit_wait_timeout(trans, ENDPOINT_STOP_DMA_TIMEOUT);
+ if (ret)
+ gsi_trans_free(trans);
+
+ return ret;
+}
+
+/**
+ * ipa_endpoint_stop() - Stops a GSI channel in IPA
+ * @client: Client whose endpoint should be stopped
+ *
+ * This function implements the sequence to stop a GSI channel
+ * in IPA. This function returns when the channel is is STOP state.
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa_endpoint_stop(struct ipa_endpoint *endpoint)
+{
+ u32 retries = endpoint->toward_ipa ? 0 : IPA_ENDPOINT_STOP_RX_RETRIES;
+ int ret;
+
+ do {
+ struct ipa *ipa = endpoint->ipa;
+ struct gsi *gsi = &ipa->gsi;
+
+ ret = gsi_channel_stop(gsi, endpoint->channel_id);
+ if (ret != -EAGAIN)
+ break;
+
+ if (endpoint->toward_ipa)
+ continue;
+
+ /* For IPA v3.5.1, send a DMA read task and check again */
+ if (ipa->version == IPA_VERSION_3_5_1) {
+ ret = ipa_endpoint_stop_rx_dma(ipa);
+ if (ret)
+ break;
+ }
+
+ msleep(1);
+ } while (retries--);
+
+ return retries ? ret : -EIO;
+}
+
+static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
+{
+ struct device *dev = &endpoint->ipa->pdev->dev;
+ int ret;
+
+ if (endpoint->toward_ipa) {
+ bool delay_mode = endpoint->data->tx.delay;
+
+ ret = ipa_endpoint_init_ctrl(endpoint, delay_mode);
+ /* Endpoint is expected to not be in delay mode */
+ if (!ret != delay_mode) {
+ dev_warn(dev,
+ "TX endpoint %u was %sin delay mode\n",
+ endpoint->endpoint_id,
+ delay_mode ? "already " : "");
+ }
+ ipa_endpoint_init_hdr_ext(endpoint);
+ ipa_endpoint_init_aggr(endpoint);
+ ipa_endpoint_init_deaggr(endpoint);
+ ipa_endpoint_init_seq(endpoint);
+ } else {
+ if (endpoint->ipa->version == IPA_VERSION_3_5_1) {
+ if (!ipa_endpoint_init_ctrl(endpoint, false))
+ dev_warn(dev,
+ "RX endpoint %u was suspended\n",
+ endpoint->endpoint_id);
+ }
+ ipa_endpoint_init_hdr_ext(endpoint);
+ ipa_endpoint_init_aggr(endpoint);
+ }
+ ipa_endpoint_init_cfg(endpoint);
+ ipa_endpoint_init_hdr(endpoint);
+ ipa_endpoint_init_hdr_metadata_mask(endpoint);
+ ipa_endpoint_init_mode(endpoint);
+ ipa_endpoint_status(endpoint);
+}
+
+int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
+{
+ struct ipa *ipa = endpoint->ipa;
+ struct gsi *gsi = &ipa->gsi;
+ int ret;
+
+ ret = gsi_channel_start(gsi, endpoint->channel_id);
+ if (ret) {
+ dev_err(&ipa->pdev->dev,
+ "error %d starting %cX channel %u for endpoint %u\n",
+ ret, endpoint->toward_ipa ? 'T' : 'R',
+ endpoint->channel_id, endpoint->endpoint_id);
+ return ret;
+ }
+
+ if (!endpoint->toward_ipa) {
+ ipa_interrupt_suspend_enable(ipa->interrupt,
+ endpoint->endpoint_id);
+ ipa_endpoint_replenish_enable(endpoint);
+ }
+
+ ipa->enabled |= BIT(endpoint->endpoint_id);
+
+ return 0;
+}
+
+void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
+{
+ u32 mask = BIT(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ int ret;
+
+ if (!(endpoint->ipa->enabled & mask))
+ return;
+
+ endpoint->ipa->enabled ^= mask;
+
+ if (!endpoint->toward_ipa) {
+ ipa_endpoint_replenish_disable(endpoint);
+ ipa_interrupt_suspend_disable(ipa->interrupt,
+ endpoint->endpoint_id);
+ }
+
+ /* Note that if stop fails, the channel's state is not well-defined */
+ ret = ipa_endpoint_stop(endpoint);
+ if (ret)
+ dev_err(&ipa->pdev->dev,
+ "error %d attempting to stop endpoint %u\n", ret,
+ endpoint->endpoint_id);
+}
+
+/**
+ * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
+ * @endpoint_id: Endpoint on which to emulate a suspend
+ *
+ * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
+ * with an open aggregation frame. This is to work around a hardware
+ * issue in IPA version 3.5.1 where the suspend interrupt will not be
+ * generated when it should be.
+ */
+static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
+{
+ struct ipa *ipa = endpoint->ipa;
+
+ /* assert(ipa->version == IPA_VERSION_3_5_1); */
+
+ if (!endpoint->data->aggregation)
+ return;
+
+ /* Nothing to do if the endpoint doesn't have aggregation open */
+ if (!ipa_endpoint_aggr_active(endpoint))
+ return;
+
+ /* Force close aggregation */
+ ipa_endpoint_force_close(endpoint);
+
+ ipa_interrupt_simulate_suspend(ipa->interrupt);
+}
+
+void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
+{
+ struct device *dev = &endpoint->ipa->pdev->dev;
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ bool stop_channel;
+ int ret;
+
+ if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
+ return;
+
+ if (!endpoint->toward_ipa)
+ ipa_endpoint_replenish_disable(endpoint);
+
+ /* IPA v3.5.1 doesn't use channel stop for suspend */
+ stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
+ if (!endpoint->toward_ipa && !stop_channel) {
+ /* Due to a hardware bug, a client suspended with an open
+ * aggregation frame will not generate a SUSPEND IPA
+ * interrupt. We work around this by force-closing the
+ * aggregation frame, then simulating the arrival of such
+ * an interrupt.
+ */
+ WARN_ON(ipa_endpoint_init_ctrl(endpoint, true));
+ ipa_endpoint_suspend_aggr(endpoint);
+ }
+
+ ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
+ if (ret)
+ dev_err(dev, "error %d suspending channel %u\n", ret,
+ endpoint->channel_id);
+}
+
+void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
+{
+ struct device *dev = &endpoint->ipa->pdev->dev;
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ bool start_channel;
+ int ret;
+
+ if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
+ return;
+
+ /* IPA v3.5.1 doesn't use channel start for resume */
+ start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
+ if (!endpoint->toward_ipa && !start_channel)
+ WARN_ON(ipa_endpoint_init_ctrl(endpoint, false));
+
+ ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
+ if (ret)
+ dev_err(dev, "error %d resuming channel %u\n", ret,
+ endpoint->channel_id);
+ else if (!endpoint->toward_ipa)
+ ipa_endpoint_replenish_enable(endpoint);
+}
+
+void ipa_endpoint_suspend(struct ipa *ipa)
+{
+ if (ipa->modem_netdev)
+ ipa_modem_suspend(ipa->modem_netdev);
+
+ ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
+ ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
+}
+
+void ipa_endpoint_resume(struct ipa *ipa)
+{
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
+
+ if (ipa->modem_netdev)
+ ipa_modem_resume(ipa->modem_netdev);
+}
+
+static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
+{
+ struct gsi *gsi = &endpoint->ipa->gsi;
+ u32 channel_id = endpoint->channel_id;
+
+ /* Only AP endpoints get set up */
+ if (endpoint->ee_id != GSI_EE_AP)
+ return;
+
+ endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
+ if (!endpoint->toward_ipa) {
+ /* RX transactions require a single TRE, so the maximum
+ * backlog is the same as the maximum outstanding TREs.
+ */
+ endpoint->replenish_enabled = false;
+ atomic_set(&endpoint->replenish_saved,
+ gsi_channel_tre_max(gsi, endpoint->channel_id));
+ atomic_set(&endpoint->replenish_backlog, 0);
+ INIT_DELAYED_WORK(&endpoint->replenish_work,
+ ipa_endpoint_replenish_work);
+ }
+
+ ipa_endpoint_program(endpoint);
+
+ endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
+}
+
+static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
+{
+ endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
+
+ if (!endpoint->toward_ipa)
+ cancel_delayed_work_sync(&endpoint->replenish_work);
+
+ ipa_endpoint_reset(endpoint);
+}
+
+void ipa_endpoint_setup(struct ipa *ipa)
+{
+ u32 initialized = ipa->initialized;
+
+ ipa->set_up = 0;
+ while (initialized) {
+ u32 endpoint_id = __ffs(initialized);
+
+ initialized ^= BIT(endpoint_id);
+
+ ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
+ }
+}
+
+void ipa_endpoint_teardown(struct ipa *ipa)
+{
+ u32 set_up = ipa->set_up;
+
+ while (set_up) {
+ u32 endpoint_id = __fls(set_up);
+
+ set_up ^= BIT(endpoint_id);
+
+ ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
+ }
+ ipa->set_up = 0;
+}
+
+int ipa_endpoint_config(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ u32 initialized;
+ u32 rx_base;
+ u32 rx_mask;
+ u32 tx_mask;
+ int ret = 0;
+ u32 max;
+ u32 val;
+
+ /* Find out about the endpoints supplied by the hardware, and ensure
+ * the highest one doesn't exceed the number we support.
+ */
+ val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
+
+ /* Our RX is an IPA producer */
+ rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK);
+ max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK);
+ if (max > IPA_ENDPOINT_MAX) {
+ dev_err(dev, "too many endpoints (%u > %u)\n",
+ max, IPA_ENDPOINT_MAX);
+ return -EINVAL;
+ }
+ rx_mask = GENMASK(max - 1, rx_base);
+
+ /* Our TX is an IPA consumer */
+ max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK);
+ tx_mask = GENMASK(max - 1, 0);
+
+ ipa->available = rx_mask | tx_mask;
+
+ /* Check for initialized endpoints not supported by the hardware */
+ if (ipa->initialized & ~ipa->available) {
+ dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
+ ipa->initialized & ~ipa->available);
+ ret = -EINVAL; /* Report other errors too */
+ }
+
+ initialized = ipa->initialized;
+ while (initialized) {
+ u32 endpoint_id = __ffs(initialized);
+ struct ipa_endpoint *endpoint;
+
+ initialized ^= BIT(endpoint_id);
+
+ /* Make sure it's pointing in the right direction */
+ endpoint = &ipa->endpoint[endpoint_id];
+ if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
+ dev_err(dev, "endpoint id %u wrong direction\n",
+ endpoint_id);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+void ipa_endpoint_deconfig(struct ipa *ipa)
+{
+ ipa->available = 0; /* Nothing more to do */
+}
+
+static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ struct ipa_endpoint *endpoint;
+
+ endpoint = &ipa->endpoint[data->endpoint_id];
+
+ if (data->ee_id == GSI_EE_AP)
+ ipa->channel_map[data->channel_id] = endpoint;
+ ipa->name_map[name] = endpoint;
+
+ endpoint->ipa = ipa;
+ endpoint->ee_id = data->ee_id;
+ endpoint->seq_type = data->endpoint.seq_type;
+ endpoint->channel_id = data->channel_id;
+ endpoint->endpoint_id = data->endpoint_id;
+ endpoint->toward_ipa = data->toward_ipa;
+ endpoint->data = &data->endpoint.config;
+
+ ipa->initialized |= BIT(endpoint->endpoint_id);
+}
+
+void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
+{
+ endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
+
+ memset(endpoint, 0, sizeof(*endpoint));
+}
+
+void ipa_endpoint_exit(struct ipa *ipa)
+{
+ u32 initialized = ipa->initialized;
+
+ while (initialized) {
+ u32 endpoint_id = __fls(initialized);
+
+ initialized ^= BIT(endpoint_id);
+
+ ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
+ }
+ memset(ipa->name_map, 0, sizeof(ipa->name_map));
+ memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
+}
+
+/* Returns a bitmask of endpoints that support filtering, or 0 on error */
+u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
+{
+ enum ipa_endpoint_name name;
+ u32 filter_map;
+
+ if (!ipa_endpoint_data_valid(ipa, count, data))
+ return 0; /* Error */
+
+ ipa->initialized = 0;
+
+ filter_map = 0;
+ for (name = 0; name < count; name++, data++) {
+ if (ipa_gsi_endpoint_data_empty(data))
+ continue; /* Skip over empty slots */
+
+ ipa_endpoint_init_one(ipa, name, data);
+
+ if (data->endpoint.filter_support)
+ filter_map |= BIT(data->endpoint_id);
+ }
+
+ if (!ipa_filter_map_valid(ipa, filter_map))
+ goto err_endpoint_exit;
+
+ return filter_map; /* Non-zero bitmask */
+
+err_endpoint_exit:
+ ipa_endpoint_exit(ipa);
+
+ return 0; /* Error */
+}
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
new file mode 100644
index 000000000000..4b336a1f759d
--- /dev/null
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_ENDPOINT_H_
+#define _IPA_ENDPOINT_H_
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/if_ether.h>
+
+#include "gsi.h"
+#include "ipa_reg.h"
+
+struct net_device;
+struct sk_buff;
+
+struct ipa;
+struct ipa_gsi_endpoint_data;
+
+/* Non-zero granularity of counter used to implement aggregation timeout */
+#define IPA_AGGR_GRANULARITY 500 /* microseconds */
+
+#define IPA_MTU ETH_DATA_LEN
+
+enum ipa_endpoint_name {
+ IPA_ENDPOINT_AP_MODEM_TX = 0,
+ IPA_ENDPOINT_MODEM_LAN_TX,
+ IPA_ENDPOINT_MODEM_COMMAND_TX,
+ IPA_ENDPOINT_AP_COMMAND_TX,
+ IPA_ENDPOINT_MODEM_AP_TX,
+ IPA_ENDPOINT_AP_LAN_RX,
+ IPA_ENDPOINT_AP_MODEM_RX,
+ IPA_ENDPOINT_MODEM_AP_RX,
+ IPA_ENDPOINT_MODEM_LAN_RX,
+ IPA_ENDPOINT_COUNT, /* Number of names (not an index) */
+};
+
+#define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
+
+/**
+ * struct ipa_endpoint - IPA endpoint information
+ * @client: Client associated with the endpoint
+ * @channel_id: EP's GSI channel
+ * @evt_ring_id: EP's GSI channel event ring
+ */
+struct ipa_endpoint {
+ struct ipa *ipa;
+ enum ipa_seq_type seq_type;
+ enum gsi_ee_id ee_id;
+ u32 channel_id;
+ u32 endpoint_id;
+ bool toward_ipa;
+ const struct ipa_endpoint_config_data *data;
+
+ u32 trans_tre_max; /* maximum descriptors per transaction */
+ u32 evt_ring_id;
+
+ /* Net device this endpoint is associated with, if any */
+ struct net_device *netdev;
+
+ /* Receive buffer replenishing for RX endpoints */
+ bool replenish_enabled;
+ u32 replenish_ready;
+ atomic_t replenish_saved;
+ atomic_t replenish_backlog;
+ struct delayed_work replenish_work; /* global wq */
+};
+
+void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa);
+
+void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable);
+
+int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa);
+
+int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb);
+
+int ipa_endpoint_stop(struct ipa_endpoint *endpoint);
+
+void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint);
+
+int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint);
+void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint);
+
+void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint);
+void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint);
+
+void ipa_endpoint_suspend(struct ipa *ipa);
+void ipa_endpoint_resume(struct ipa *ipa);
+
+void ipa_endpoint_setup(struct ipa *ipa);
+void ipa_endpoint_teardown(struct ipa *ipa);
+
+int ipa_endpoint_config(struct ipa *ipa);
+void ipa_endpoint_deconfig(struct ipa *ipa);
+
+void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id);
+void ipa_endpoint_default_route_clear(struct ipa *ipa);
+
+u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
+ const struct ipa_gsi_endpoint_data *data);
+void ipa_endpoint_exit(struct ipa *ipa);
+
+void ipa_endpoint_trans_complete(struct ipa_endpoint *ipa,
+ struct gsi_trans *trans);
+void ipa_endpoint_trans_release(struct ipa_endpoint *ipa,
+ struct gsi_trans *trans);
+
+#endif /* _IPA_ENDPOINT_H_ */
diff --git a/drivers/net/ipa/ipa_gsi.c b/drivers/net/ipa/ipa_gsi.c
new file mode 100644
index 000000000000..dc4a5c2196ae
--- /dev/null
+++ b/drivers/net/ipa/ipa_gsi.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+
+#include "gsi_trans.h"
+#include "ipa.h"
+#include "ipa_endpoint.h"
+#include "ipa_data.h"
+
+void ipa_gsi_trans_complete(struct gsi_trans *trans)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+
+ ipa_endpoint_trans_complete(ipa->channel_map[trans->channel_id], trans);
+}
+
+void ipa_gsi_trans_release(struct gsi_trans *trans)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+
+ ipa_endpoint_trans_release(ipa->channel_map[trans->channel_id], trans);
+}
+
+void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count,
+ u32 byte_count)
+{
+ struct ipa *ipa = container_of(gsi, struct ipa, gsi);
+ struct ipa_endpoint *endpoint;
+
+ endpoint = ipa->channel_map[channel_id];
+ if (endpoint->netdev)
+ netdev_sent_queue(endpoint->netdev, byte_count);
+}
+
+void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count,
+ u32 byte_count)
+{
+ struct ipa *ipa = container_of(gsi, struct ipa, gsi);
+ struct ipa_endpoint *endpoint;
+
+ endpoint = ipa->channel_map[channel_id];
+ if (endpoint->netdev)
+ netdev_completed_queue(endpoint->netdev, count, byte_count);
+}
+
+/* Indicate whether an endpoint config data entry is "empty" */
+bool ipa_gsi_endpoint_data_empty(const struct ipa_gsi_endpoint_data *data)
+{
+ return data->ee_id == GSI_EE_AP && !data->channel.tlv_count;
+}
diff --git a/drivers/net/ipa/ipa_gsi.h b/drivers/net/ipa/ipa_gsi.h
new file mode 100644
index 000000000000..3cf18600c68e
--- /dev/null
+++ b/drivers/net/ipa/ipa_gsi.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_GSI_TRANS_H_
+#define _IPA_GSI_TRANS_H_
+
+#include <linux/types.h>
+
+struct gsi_trans;
+
+/**
+ * ipa_gsi_trans_complete() - GSI transaction completion callback
+ * @trans: Transaction that has completed
+ *
+ * This called from the GSI layer to notify the IPA layer that a
+ * transaction has completed.
+ */
+void ipa_gsi_trans_complete(struct gsi_trans *trans);
+
+/**
+ * ipa_gsi_trans_release() - GSI transaction release callback
+ * @trans: Transaction whose resources should be freed
+ *
+ * This called from the GSI layer to notify the IPA layer that a
+ * transaction is about to be freed, so any resources associated
+ * with it should be released.
+ */
+void ipa_gsi_trans_release(struct gsi_trans *trans);
+
+/**
+ * ipa_gsi_channel_tx_queued() - GSI queued to hardware notification
+ * @gsi: GSI pointer
+ * @channel_id: Channel number
+ * @count: Number of transactions queued
+ * @byte_count: Number of bytes to transfer represented by transactions
+ *
+ * This called from the GSI layer to notify the IPA layer that some
+ * number of transactions have been queued to hardware for execution.
+ */
+void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count,
+ u32 byte_count);
+/**
+ * ipa_gsi_trans_complete() - GSI transaction completion callback
+ipa_gsi_channel_tx_completed()
+ * @gsi: GSI pointer
+ * @channel_id: Channel number
+ * @count: Number of transactions completed since last report
+ * @byte_count: Number of bytes transferred represented by transactions
+ *
+ * This called from the GSI layer to notify the IPA layer that the hardware
+ * has reported the completion of some number of transactions.
+ */
+void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count,
+ u32 byte_count);
+
+bool ipa_gsi_endpoint_data_empty(const struct ipa_gsi_endpoint_data *data);
+
+#endif /* _IPA_GSI_TRANS_H_ */
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
new file mode 100644
index 000000000000..90353987c45f
--- /dev/null
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+/* DOC: IPA Interrupts
+ *
+ * The IPA has an interrupt line distinct from the interrupt used by the GSI
+ * code. Whereas GSI interrupts are generally related to channel events (like
+ * transfer completions), IPA interrupts are related to other events related
+ * to the IPA. Some of the IPA interrupts come from a microcontroller
+ * embedded in the IPA. Each IPA interrupt type can be both masked and
+ * acknowledged independent of the others.
+ *
+ * Two of the IPA interrupts are initiated by the microcontroller. A third
+ * can be generated to signal the need for a wakeup/resume when an IPA
+ * endpoint has been suspended. There are other IPA events, but at this
+ * time only these three are supported.
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+
+#include "ipa.h"
+#include "ipa_clock.h"
+#include "ipa_reg.h"
+#include "ipa_endpoint.h"
+#include "ipa_interrupt.h"
+
+/**
+ * struct ipa_interrupt - IPA interrupt information
+ * @ipa: IPA pointer
+ * @irq: Linux IRQ number used for IPA interrupts
+ * @enabled: Mask indicating which interrupts are enabled
+ * @handler: Array of handlers indexed by IPA interrupt ID
+ */
+struct ipa_interrupt {
+ struct ipa *ipa;
+ u32 irq;
+ u32 enabled;
+ ipa_irq_handler_t handler[IPA_IRQ_COUNT];
+};
+
+/* Returns true if the interrupt type is associated with the microcontroller */
+static bool ipa_interrupt_uc(struct ipa_interrupt *interrupt, u32 irq_id)
+{
+ return irq_id == IPA_IRQ_UC_0 || irq_id == IPA_IRQ_UC_1;
+}
+
+/* Process a particular interrupt type that has been received */
+static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
+{
+ bool uc_irq = ipa_interrupt_uc(interrupt, irq_id);
+ struct ipa *ipa = interrupt->ipa;
+ u32 mask = BIT(irq_id);
+
+ /* For microcontroller interrupts, clear the interrupt right away,
+ * "to avoid clearing unhandled interrupts."
+ */
+ if (uc_irq)
+ iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET);
+
+ if (irq_id < IPA_IRQ_COUNT && interrupt->handler[irq_id])
+ interrupt->handler[irq_id](interrupt->ipa, irq_id);
+
+ /* Clearing the SUSPEND_TX interrupt also clears the register
+ * that tells us which suspended endpoint(s) caused the interrupt,
+ * so defer clearing until after the handler has been called.
+ */
+ if (!uc_irq)
+ iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET);
+}
+
+/* Process all IPA interrupt types that have been signaled */
+static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt)
+{
+ struct ipa *ipa = interrupt->ipa;
+ u32 enabled = interrupt->enabled;
+ u32 mask;
+
+ /* The status register indicates which conditions are present,
+ * including conditions whose interrupt is not enabled. Handle
+ * only the enabled ones.
+ */
+ mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET);
+ while ((mask &= enabled)) {
+ do {
+ u32 irq_id = __ffs(mask);
+
+ mask ^= BIT(irq_id);
+
+ ipa_interrupt_process(interrupt, irq_id);
+ } while (mask);
+ mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET);
+ }
+}
+
+/* Threaded part of the IPA IRQ handler */
+static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
+{
+ struct ipa_interrupt *interrupt = dev_id;
+
+ ipa_clock_get(interrupt->ipa);
+
+ ipa_interrupt_process_all(interrupt);
+
+ ipa_clock_put(interrupt->ipa);
+
+ return IRQ_HANDLED;
+}
+
+/* Hard part (i.e., "real" IRQ handler) of the IRQ handler */
+static irqreturn_t ipa_isr(int irq, void *dev_id)
+{
+ struct ipa_interrupt *interrupt = dev_id;
+ struct ipa *ipa = interrupt->ipa;
+ u32 mask;
+
+ mask = ioread32(ipa->reg_virt + IPA_REG_IRQ_STTS_OFFSET);
+ if (mask & interrupt->enabled)
+ return IRQ_WAKE_THREAD;
+
+ /* Nothing in the mask was supposed to cause an interrupt */
+ iowrite32(mask, ipa->reg_virt + IPA_REG_IRQ_CLR_OFFSET);
+
+ dev_err(&ipa->pdev->dev, "%s: unexpected interrupt, mask 0x%08x\n",
+ __func__, mask);
+
+ return IRQ_HANDLED;
+}
+
+/* Common function used to enable/disable TX_SUSPEND for an endpoint */
+static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
+ u32 endpoint_id, bool enable)
+{
+ struct ipa *ipa = interrupt->ipa;
+ u32 mask = BIT(endpoint_id);
+ u32 val;
+
+ /* assert(mask & ipa->available); */
+ val = ioread32(ipa->reg_virt + IPA_REG_SUSPEND_IRQ_EN_OFFSET);
+ if (enable)
+ val |= mask;
+ else
+ val &= ~mask;
+ iowrite32(val, ipa->reg_virt + IPA_REG_SUSPEND_IRQ_EN_OFFSET);
+}
+
+/* Enable TX_SUSPEND for an endpoint */
+void
+ipa_interrupt_suspend_enable(struct ipa_interrupt *interrupt, u32 endpoint_id)
+{
+ ipa_interrupt_suspend_control(interrupt, endpoint_id, true);
+}
+
+/* Disable TX_SUSPEND for an endpoint */
+void
+ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
+{
+ ipa_interrupt_suspend_control(interrupt, endpoint_id, false);
+}
+
+/* Clear the suspend interrupt for all endpoints that signaled it */
+void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
+{
+ struct ipa *ipa = interrupt->ipa;
+ u32 val;
+
+ val = ioread32(ipa->reg_virt + IPA_REG_IRQ_SUSPEND_INFO_OFFSET);
+ iowrite32(val, ipa->reg_virt + IPA_REG_SUSPEND_IRQ_CLR_OFFSET);
+}
+
+/* Simulate arrival of an IPA TX_SUSPEND interrupt */
+void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
+{
+ ipa_interrupt_process(interrupt, IPA_IRQ_TX_SUSPEND);
+}
+
+/* Add a handler for an IPA interrupt */
+void ipa_interrupt_add(struct ipa_interrupt *interrupt,
+ enum ipa_irq_id ipa_irq, ipa_irq_handler_t handler)
+{
+ struct ipa *ipa = interrupt->ipa;
+
+ /* assert(ipa_irq < IPA_IRQ_COUNT); */
+ interrupt->handler[ipa_irq] = handler;
+
+ /* Update the IPA interrupt mask to enable it */
+ interrupt->enabled |= BIT(ipa_irq);
+ iowrite32(interrupt->enabled, ipa->reg_virt + IPA_REG_IRQ_EN_OFFSET);
+}
+
+/* Remove the handler for an IPA interrupt type */
+void
+ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
+{
+ struct ipa *ipa = interrupt->ipa;
+
+ /* assert(ipa_irq < IPA_IRQ_COUNT); */
+ /* Update the IPA interrupt mask to disable it */
+ interrupt->enabled &= ~BIT(ipa_irq);
+ iowrite32(interrupt->enabled, ipa->reg_virt + IPA_REG_IRQ_EN_OFFSET);
+
+ interrupt->handler[ipa_irq] = NULL;
+}
+
+/* Set up the IPA interrupt framework */
+struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct ipa_interrupt *interrupt;
+ unsigned int irq;
+ int ret;
+
+ ret = platform_get_irq_byname(ipa->pdev, "ipa");
+ if (ret <= 0) {
+ dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n",
+ ret);
+ return ERR_PTR(ret ? : -EINVAL);
+ }
+ irq = ret;
+
+ interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL);
+ if (!interrupt)
+ return ERR_PTR(-ENOMEM);
+ interrupt->ipa = ipa;
+ interrupt->irq = irq;
+
+ /* Start with all IPA interrupts disabled */
+ iowrite32(0, ipa->reg_virt + IPA_REG_IRQ_EN_OFFSET);
+
+ ret = request_threaded_irq(irq, ipa_isr, ipa_isr_thread, IRQF_ONESHOT,
+ "ipa", interrupt);
+ if (ret) {
+ dev_err(dev, "error %d requesting \"ipa\" IRQ\n", ret);
+ goto err_kfree;
+ }
+
+ return interrupt;
+
+err_kfree:
+ kfree(interrupt);
+
+ return ERR_PTR(ret);
+}
+
+/* Tear down the IPA interrupt framework */
+void ipa_interrupt_teardown(struct ipa_interrupt *interrupt)
+{
+ free_irq(interrupt->irq, interrupt);
+ kfree(interrupt);
+}
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
new file mode 100644
index 000000000000..d4f4c1c9f0b1
--- /dev/null
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_INTERRUPT_H_
+#define _IPA_INTERRUPT_H_
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+struct ipa;
+struct ipa_interrupt;
+
+/**
+ * enum ipa_irq_id - IPA interrupt type
+ * @IPA_IRQ_UC_0: Microcontroller event interrupt
+ * @IPA_IRQ_UC_1: Microcontroller response interrupt
+ * @IPA_IRQ_TX_SUSPEND: Data ready interrupt
+ *
+ * The data ready interrupt is signaled if data has arrived that is destined
+ * for an AP RX endpoint whose underlying GSI channel is suspended/stopped.
+ */
+enum ipa_irq_id {
+ IPA_IRQ_UC_0 = 2,
+ IPA_IRQ_UC_1 = 3,
+ IPA_IRQ_TX_SUSPEND = 14,
+ IPA_IRQ_COUNT, /* Number of interrupt types (not an index) */
+};
+
+/**
+ * typedef ipa_irq_handler_t - IPA interrupt handler function type
+ * @ipa: IPA pointer
+ * @irq_id: interrupt type
+ *
+ * Callback function registered by ipa_interrupt_add() to handle a specific
+ * IPA interrupt type
+ */
+typedef void (*ipa_irq_handler_t)(struct ipa *ipa, enum ipa_irq_id irq_id);
+
+/**
+ * ipa_interrupt_add() - Register a handler for an IPA interrupt type
+ * @irq_id: IPA interrupt type
+ * @handler: Handler function for the interrupt
+ *
+ * Add a handler for an IPA interrupt and enable it. IPA interrupt
+ * handlers are run in threaded interrupt context, so are allowed to
+ * block.
+ */
+void ipa_interrupt_add(struct ipa_interrupt *interrupt, enum ipa_irq_id irq_id,
+ ipa_irq_handler_t handler);
+
+/**
+ * ipa_interrupt_remove() - Remove the handler for an IPA interrupt type
+ * @interrupt: IPA interrupt structure
+ * @irq_id: IPA interrupt type
+ *
+ * Remove an IPA interrupt handler and disable it.
+ */
+void ipa_interrupt_remove(struct ipa_interrupt *interrupt,
+ enum ipa_irq_id irq_id);
+
+/**
+ * ipa_interrupt_suspend_enable - Enable TX_SUSPEND for an endpoint
+ * @interrupt: IPA interrupt structure
+ * @endpoint_id: Endpoint whose interrupt should be enabled
+ *
+ * Note: The "TX" in the name is from the perspective of the IPA hardware.
+ * A TX_SUSPEND interrupt arrives on an AP RX enpoint when packet data can't
+ * be delivered to the endpoint because it is suspended (or its underlying
+ * channel is stopped).
+ */
+void ipa_interrupt_suspend_enable(struct ipa_interrupt *interrupt,
+ u32 endpoint_id);
+
+/**
+ * ipa_interrupt_suspend_disable - Disable TX_SUSPEND for an endpoint
+ * @interrupt: IPA interrupt structure
+ * @endpoint_id: Endpoint whose interrupt should be disabled
+ */
+void ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt,
+ u32 endpoint_id);
+
+/**
+ * ipa_interrupt_suspend_clear_all - clear all suspend interrupts
+ * @interrupt: IPA interrupt structure
+ *
+ * Clear the TX_SUSPEND interrupt for all endpoints that signaled it.
+ */
+void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
+
+/**
+ * ipa_interrupt_simulate_suspend() - Simulate TX_SUSPEND IPA interrupt
+ * @interrupt: IPA interrupt structure
+ *
+ * This calls the TX_SUSPEND interrupt handler, as if such an interrupt
+ * had been signaled. This is needed to work around a hardware quirk
+ * that occurs if aggregation is active on an endpoint when its underlying
+ * channel is suspended.
+ */
+void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt);
+
+/**
+ * ipa_interrupt_setup() - Set up the IPA interrupt framework
+ * @ipa: IPA pointer
+ *
+ * @Return: Pointer to IPA SMP2P info, or a pointer-coded error
+ */
+struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa);
+
+/**
+ * ipa_interrupt_teardown() - Tear down the IPA interrupt framework
+ * @interrupt: IPA interrupt structure
+ */
+void ipa_interrupt_teardown(struct ipa_interrupt *interrupt);
+
+#endif /* _IPA_INTERRUPT_H_ */
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
new file mode 100644
index 000000000000..28998dcce3d2
--- /dev/null
+++ b/drivers/net/ipa/ipa_main.c
@@ -0,0 +1,953 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/bug.h>
+#include <linux/io.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/remoteproc.h>
+#include <linux/qcom_scm.h>
+#include <linux/soc/qcom/mdt_loader.h>
+
+#include "ipa.h"
+#include "ipa_clock.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_cmd.h"
+#include "ipa_reg.h"
+#include "ipa_mem.h"
+#include "ipa_table.h"
+#include "ipa_modem.h"
+#include "ipa_uc.h"
+#include "ipa_interrupt.h"
+#include "gsi_trans.h"
+
+/**
+ * DOC: The IP Accelerator
+ *
+ * This driver supports the Qualcomm IP Accelerator (IPA), which is a
+ * networking component found in many Qualcomm SoCs. The IPA is connected
+ * to the application processor (AP), but is also connected (and partially
+ * controlled by) other "execution environments" (EEs), such as a modem.
+ *
+ * The IPA is the conduit between the AP and the modem that carries network
+ * traffic. This driver presents a network interface representing the
+ * connection of the modem to external (e.g. LTE) networks.
+ *
+ * The IPA provides protocol checksum calculation, offloading this work
+ * from the AP. The IPA offers additional functionality, including routing,
+ * filtering, and NAT support, but that more advanced functionality is not
+ * currently supported. Despite that, some resources--including routing
+ * tables and filter tables--are defined in this driver because they must
+ * be initialized even when the advanced hardware features are not used.
+ *
+ * There are two distinct layers that implement the IPA hardware, and this
+ * is reflected in the organization of the driver. The generic software
+ * interface (GSI) is an integral component of the IPA, providing a
+ * well-defined communication layer between the AP subsystem and the IPA
+ * core. The GSI implements a set of "channels" used for communication
+ * between the AP and the IPA.
+ *
+ * The IPA layer uses GSI channels to implement its "endpoints". And while
+ * a GSI channel carries data between the AP and the IPA, a pair of IPA
+ * endpoints is used to carry traffic between two EEs. Specifically, the main
+ * modem network interface is implemented by two pairs of endpoints: a TX
+ * endpoint on the AP coupled with an RX endpoint on the modem; and another
+ * RX endpoint on the AP receiving data from a TX endpoint on the modem.
+ */
+
+/* The name of the GSI firmware file relative to /lib/firmware */
+#define IPA_FWS_PATH "ipa_fws.mdt"
+#define IPA_PAS_ID 15
+
+/**
+ * ipa_suspend_handler() - Handle the suspend IPA interrupt
+ * @ipa: IPA pointer
+ * @irq_id: IPA interrupt type (unused)
+ *
+ * When in suspended state, the IPA can trigger a resume by sending a SUSPEND
+ * IPA interrupt.
+ */
+static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
+{
+ /* Take a a single clock reference to prevent suspend. All
+ * endpoints will be resumed as a result. This reference will
+ * be dropped when we get a power management suspend request.
+ */
+ if (!atomic_xchg(&ipa->suspend_ref, 1))
+ ipa_clock_get(ipa);
+
+ /* Acknowledge/clear the suspend interrupt on all endpoints */
+ ipa_interrupt_suspend_clear_all(ipa->interrupt);
+}
+
+/**
+ * ipa_setup() - Set up IPA hardware
+ * @ipa: IPA pointer
+ *
+ * Perform initialization that requires issuing immediate commands on
+ * the command TX endpoint. If the modem is doing GSI firmware load
+ * and initialization, this function will be called when an SMP2P
+ * interrupt has been signaled by the modem. Otherwise it will be
+ * called from ipa_probe() after GSI firmware has been successfully
+ * loaded, authenticated, and started by Trust Zone.
+ */
+int ipa_setup(struct ipa *ipa)
+{
+ struct ipa_endpoint *exception_endpoint;
+ struct ipa_endpoint *command_endpoint;
+ int ret;
+
+ /* IPA v4.0 and above don't use the doorbell engine. */
+ ret = gsi_setup(&ipa->gsi, ipa->version == IPA_VERSION_3_5_1);
+ if (ret)
+ return ret;
+
+ ipa->interrupt = ipa_interrupt_setup(ipa);
+ if (IS_ERR(ipa->interrupt)) {
+ ret = PTR_ERR(ipa->interrupt);
+ goto err_gsi_teardown;
+ }
+ ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND,
+ ipa_suspend_handler);
+
+ ipa_uc_setup(ipa);
+
+ ipa_endpoint_setup(ipa);
+
+ /* We need to use the AP command TX endpoint to perform other
+ * initialization, so we enable first.
+ */
+ command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ ret = ipa_endpoint_enable_one(command_endpoint);
+ if (ret)
+ goto err_endpoint_teardown;
+
+ ret = ipa_mem_setup(ipa);
+ if (ret)
+ goto err_command_disable;
+
+ ret = ipa_table_setup(ipa);
+ if (ret)
+ goto err_mem_teardown;
+
+ /* Enable the exception handling endpoint, and tell the hardware
+ * to use it by default.
+ */
+ exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
+ ret = ipa_endpoint_enable_one(exception_endpoint);
+ if (ret)
+ goto err_table_teardown;
+
+ ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id);
+
+ /* We're all set. Now prepare for communication with the modem */
+ ret = ipa_modem_setup(ipa);
+ if (ret)
+ goto err_default_route_clear;
+
+ ipa->setup_complete = true;
+
+ dev_info(&ipa->pdev->dev, "IPA driver setup completed successfully\n");
+
+ return 0;
+
+err_default_route_clear:
+ ipa_endpoint_default_route_clear(ipa);
+ ipa_endpoint_disable_one(exception_endpoint);
+err_table_teardown:
+ ipa_table_teardown(ipa);
+err_mem_teardown:
+ ipa_mem_teardown(ipa);
+err_command_disable:
+ ipa_endpoint_disable_one(command_endpoint);
+err_endpoint_teardown:
+ ipa_endpoint_teardown(ipa);
+ ipa_uc_teardown(ipa);
+ ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
+ ipa_interrupt_teardown(ipa->interrupt);
+err_gsi_teardown:
+ gsi_teardown(&ipa->gsi);
+
+ return ret;
+}
+
+/**
+ * ipa_teardown() - Inverse of ipa_setup()
+ * @ipa: IPA pointer
+ */
+static void ipa_teardown(struct ipa *ipa)
+{
+ struct ipa_endpoint *exception_endpoint;
+ struct ipa_endpoint *command_endpoint;
+
+ ipa_modem_teardown(ipa);
+ ipa_endpoint_default_route_clear(ipa);
+ exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
+ ipa_endpoint_disable_one(exception_endpoint);
+ ipa_table_teardown(ipa);
+ ipa_mem_teardown(ipa);
+ command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ ipa_endpoint_disable_one(command_endpoint);
+ ipa_endpoint_teardown(ipa);
+ ipa_uc_teardown(ipa);
+ ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
+ ipa_interrupt_teardown(ipa->interrupt);
+ gsi_teardown(&ipa->gsi);
+}
+
+/* Configure QMB Core Master Port selection */
+static void ipa_hardware_config_comp(struct ipa *ipa)
+{
+ u32 val;
+
+ /* Nothing to configure for IPA v3.5.1 */
+ if (ipa->version == IPA_VERSION_3_5_1)
+ return;
+
+ val = ioread32(ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
+
+ if (ipa->version == IPA_VERSION_4_0) {
+ val &= ~IPA_QMB_SELECT_CONS_EN_FMASK;
+ val &= ~IPA_QMB_SELECT_PROD_EN_FMASK;
+ val &= ~IPA_QMB_SELECT_GLOBAL_EN_FMASK;
+ } else {
+ val |= GSI_MULTI_AXI_MASTERS_DIS_FMASK;
+ }
+
+ val |= GSI_MULTI_INORDER_RD_DIS_FMASK;
+ val |= GSI_MULTI_INORDER_WR_DIS_FMASK;
+
+ iowrite32(val, ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
+}
+
+/* Configure DDR and PCIe max read/write QSB values */
+static void ipa_hardware_config_qsb(struct ipa *ipa)
+{
+ u32 val;
+
+ /* QMB_0 represents DDR; QMB_1 represents PCIe (not present in 4.2) */
+ val = u32_encode_bits(8, GEN_QMB_0_MAX_WRITES_FMASK);
+ if (ipa->version == IPA_VERSION_4_2)
+ val |= u32_encode_bits(0, GEN_QMB_1_MAX_WRITES_FMASK);
+ else
+ val |= u32_encode_bits(4, GEN_QMB_1_MAX_WRITES_FMASK);
+ iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET);
+
+ if (ipa->version == IPA_VERSION_3_5_1) {
+ val = u32_encode_bits(8, GEN_QMB_0_MAX_READS_FMASK);
+ val |= u32_encode_bits(12, GEN_QMB_1_MAX_READS_FMASK);
+ } else {
+ val = u32_encode_bits(12, GEN_QMB_0_MAX_READS_FMASK);
+ if (ipa->version == IPA_VERSION_4_2)
+ val |= u32_encode_bits(0, GEN_QMB_1_MAX_READS_FMASK);
+ else
+ val |= u32_encode_bits(12, GEN_QMB_1_MAX_READS_FMASK);
+ /* GEN_QMB_0_MAX_READS_BEATS is 0 */
+ /* GEN_QMB_1_MAX_READS_BEATS is 0 */
+ }
+ iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET);
+}
+
+static void ipa_idle_indication_cfg(struct ipa *ipa,
+ u32 enter_idle_debounce_thresh,
+ bool const_non_idle_enable)
+{
+ u32 offset;
+ u32 val;
+
+ val = u32_encode_bits(enter_idle_debounce_thresh,
+ ENTER_IDLE_DEBOUNCE_THRESH_FMASK);
+ if (const_non_idle_enable)
+ val |= CONST_NON_IDLE_ENABLE_FMASK;
+
+ offset = ipa_reg_idle_indication_cfg_offset(ipa->version);
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+/**
+ * ipa_hardware_dcd_config() - Enable dynamic clock division on IPA
+ *
+ * Configures when the IPA signals it is idle to the global clock
+ * controller, which can respond by scalling down the clock to
+ * save power.
+ */
+static void ipa_hardware_dcd_config(struct ipa *ipa)
+{
+ /* Recommended values for IPA 3.5 according to IPA HPG */
+ ipa_idle_indication_cfg(ipa, 256, false);
+}
+
+static void ipa_hardware_dcd_deconfig(struct ipa *ipa)
+{
+ /* Power-on reset values */
+ ipa_idle_indication_cfg(ipa, 0, true);
+}
+
+/**
+ * ipa_hardware_config() - Primitive hardware initialization
+ * @ipa: IPA pointer
+ */
+static void ipa_hardware_config(struct ipa *ipa)
+{
+ u32 granularity;
+ u32 val;
+
+ /* Fill in backward-compatibility register, based on version */
+ val = ipa_reg_bcr_val(ipa->version);
+ iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET);
+
+ if (ipa->version != IPA_VERSION_3_5_1) {
+ /* Enable open global clocks (hardware workaround) */
+ val = GLOBAL_FMASK;
+ val |= GLOBAL_2X_CLK_FMASK;
+ iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);
+
+ /* Disable PA mask to allow HOLB drop (hardware workaround) */
+ val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
+ val &= ~PA_MASK_EN;
+ iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
+ }
+
+ ipa_hardware_config_comp(ipa);
+
+ /* Configure system bus limits */
+ ipa_hardware_config_qsb(ipa);
+
+ /* Configure aggregation granularity */
+ val = ioread32(ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
+ granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
+ val = u32_encode_bits(granularity, AGGR_GRANULARITY);
+ iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
+
+ /* Disable hashed IPv4 and IPv6 routing and filtering for IPA v4.2 */
+ if (ipa->version == IPA_VERSION_4_2)
+ iowrite32(0, ipa->reg_virt + IPA_REG_FILT_ROUT_HASH_EN_OFFSET);
+
+ /* Enable dynamic clock division */
+ ipa_hardware_dcd_config(ipa);
+}
+
+/**
+ * ipa_hardware_deconfig() - Inverse of ipa_hardware_config()
+ * @ipa: IPA pointer
+ *
+ * This restores the power-on reset values (even if they aren't different)
+ */
+static void ipa_hardware_deconfig(struct ipa *ipa)
+{
+ /* Mostly we just leave things as we set them. */
+ ipa_hardware_dcd_deconfig(ipa);
+}
+
+#ifdef IPA_VALIDATION
+
+/* # IPA resources used based on version (see IPA_RESOURCE_GROUP_COUNT) */
+static int ipa_resource_group_count(struct ipa *ipa)
+{
+ switch (ipa->version) {
+ case IPA_VERSION_3_5_1:
+ return 3;
+
+ case IPA_VERSION_4_0:
+ case IPA_VERSION_4_1:
+ return 4;
+
+ case IPA_VERSION_4_2:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static bool ipa_resource_limits_valid(struct ipa *ipa,
+ const struct ipa_resource_data *data)
+{
+ u32 group_count = ipa_resource_group_count(ipa);
+ u32 i;
+ u32 j;
+
+ if (!group_count)
+ return false;
+
+ /* Return an error if a non-zero resource group limit is specified
+ * for a resource not supported by hardware.
+ */
+ for (i = 0; i < data->resource_src_count; i++) {
+ const struct ipa_resource_src *resource;
+
+ resource = &data->resource_src[i];
+ for (j = group_count; j < IPA_RESOURCE_GROUP_COUNT; j++)
+ if (resource->limits[j].min || resource->limits[j].max)
+ return false;
+ }
+
+ for (i = 0; i < data->resource_dst_count; i++) {
+ const struct ipa_resource_dst *resource;
+
+ resource = &data->resource_dst[i];
+ for (j = group_count; j < IPA_RESOURCE_GROUP_COUNT; j++)
+ if (resource->limits[j].min || resource->limits[j].max)
+ return false;
+ }
+
+ return true;
+}
+
+#else /* !IPA_VALIDATION */
+
+static bool ipa_resource_limits_valid(struct ipa *ipa,
+ const struct ipa_resource_data *data)
+{
+ return true;
+}
+
+#endif /* !IPA_VALIDATION */
+
+static void
+ipa_resource_config_common(struct ipa *ipa, u32 offset,
+ const struct ipa_resource_limits *xlimits,
+ const struct ipa_resource_limits *ylimits)
+{
+ u32 val;
+
+ val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK);
+ val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK);
+ val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK);
+ val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+static void ipa_resource_config_src_01(struct ipa *ipa,
+ const struct ipa_resource_src *resource)
+{
+ u32 offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+
+ ipa_resource_config_common(ipa, offset,
+ &resource->limits[0], &resource->limits[1]);
+}
+
+static void ipa_resource_config_src_23(struct ipa *ipa,
+ const struct ipa_resource_src *resource)
+{
+ u32 offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+
+ ipa_resource_config_common(ipa, offset,
+ &resource->limits[2], &resource->limits[3]);
+}
+
+static void ipa_resource_config_dst_01(struct ipa *ipa,
+ const struct ipa_resource_dst *resource)
+{
+ u32 offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+
+ ipa_resource_config_common(ipa, offset,
+ &resource->limits[0], &resource->limits[1]);
+}
+
+static void ipa_resource_config_dst_23(struct ipa *ipa,
+ const struct ipa_resource_dst *resource)
+{
+ u32 offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+
+ ipa_resource_config_common(ipa, offset,
+ &resource->limits[2], &resource->limits[3]);
+}
+
+static int
+ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data)
+{
+ u32 i;
+
+ if (!ipa_resource_limits_valid(ipa, data))
+ return -EINVAL;
+
+ for (i = 0; i < data->resource_src_count; i++) {
+ ipa_resource_config_src_01(ipa, &data->resource_src[i]);
+ ipa_resource_config_src_23(ipa, &data->resource_src[i]);
+ }
+
+ for (i = 0; i < data->resource_dst_count; i++) {
+ ipa_resource_config_dst_01(ipa, &data->resource_dst[i]);
+ ipa_resource_config_dst_23(ipa, &data->resource_dst[i]);
+ }
+
+ return 0;
+}
+
+static void ipa_resource_deconfig(struct ipa *ipa)
+{
+ /* Nothing to do */
+}
+
+/**
+ * ipa_config() - Configure IPA hardware
+ * @ipa: IPA pointer
+ *
+ * Perform initialization requiring IPA clock to be enabled.
+ */
+static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
+{
+ int ret;
+
+ /* Get a clock reference to allow initialization. This reference
+ * is held after initialization completes, and won't get dropped
+ * unless/until a system suspend request arrives.
+ */
+ atomic_set(&ipa->suspend_ref, 1);
+ ipa_clock_get(ipa);
+
+ ipa_hardware_config(ipa);
+
+ ret = ipa_endpoint_config(ipa);
+ if (ret)
+ goto err_hardware_deconfig;
+
+ ret = ipa_mem_config(ipa);
+ if (ret)
+ goto err_endpoint_deconfig;
+
+ ipa_table_config(ipa);
+
+ /* Assign resource limitation to each group */
+ ret = ipa_resource_config(ipa, data->resource_data);
+ if (ret)
+ goto err_table_deconfig;
+
+ ret = ipa_modem_config(ipa);
+ if (ret)
+ goto err_resource_deconfig;
+
+ return 0;
+
+err_resource_deconfig:
+ ipa_resource_deconfig(ipa);
+err_table_deconfig:
+ ipa_table_deconfig(ipa);
+ ipa_mem_deconfig(ipa);
+err_endpoint_deconfig:
+ ipa_endpoint_deconfig(ipa);
+err_hardware_deconfig:
+ ipa_hardware_deconfig(ipa);
+ ipa_clock_put(ipa);
+ atomic_set(&ipa->suspend_ref, 0);
+
+ return ret;
+}
+
+/**
+ * ipa_deconfig() - Inverse of ipa_config()
+ * @ipa: IPA pointer
+ */
+static void ipa_deconfig(struct ipa *ipa)
+{
+ ipa_modem_deconfig(ipa);
+ ipa_resource_deconfig(ipa);
+ ipa_table_deconfig(ipa);
+ ipa_mem_deconfig(ipa);
+ ipa_endpoint_deconfig(ipa);
+ ipa_hardware_deconfig(ipa);
+ ipa_clock_put(ipa);
+ atomic_set(&ipa->suspend_ref, 0);
+}
+
+static int ipa_firmware_load(struct device *dev)
+{
+ const struct firmware *fw;
+ struct device_node *node;
+ struct resource res;
+ phys_addr_t phys;
+ ssize_t size;
+ void *virt;
+ int ret;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!node) {
+ dev_err(dev, "DT error getting \"memory-region\" property\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "error %d getting \"memory-region\" resource\n",
+ ret);
+ return ret;
+ }
+
+ ret = request_firmware(&fw, IPA_FWS_PATH, dev);
+ if (ret) {
+ dev_err(dev, "error %d requesting \"%s\"\n", ret, IPA_FWS_PATH);
+ return ret;
+ }
+
+ phys = res.start;
+ size = (size_t)resource_size(&res);
+ virt = memremap(phys, size, MEMREMAP_WC);
+ if (!virt) {
+ dev_err(dev, "unable to remap firmware memory\n");
+ ret = -ENOMEM;
+ goto out_release_firmware;
+ }
+
+ ret = qcom_mdt_load(dev, fw, IPA_FWS_PATH, IPA_PAS_ID,
+ virt, phys, size, NULL);
+ if (ret)
+ dev_err(dev, "error %d loading \"%s\"\n", ret, IPA_FWS_PATH);
+ else if ((ret = qcom_scm_pas_auth_and_reset(IPA_PAS_ID)))
+ dev_err(dev, "error %d authenticating \"%s\"\n", ret,
+ IPA_FWS_PATH);
+
+ memunmap(virt);
+out_release_firmware:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static const struct of_device_id ipa_match[] = {
+ {
+ .compatible = "qcom,sdm845-ipa",
+ .data = &ipa_data_sdm845,
+ },
+ {
+ .compatible = "qcom,sc7180-ipa",
+ .data = &ipa_data_sc7180,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ipa_match);
+
+static phandle of_property_read_phandle(const struct device_node *np,
+ const char *name)
+{
+ struct property *prop;
+ int len = 0;
+
+ prop = of_find_property(np, name, &len);
+ if (!prop || len != sizeof(__be32))
+ return 0;
+
+ return be32_to_cpup(prop->value);
+}
+
+/* Check things that can be validated at build time. This just
+ * groups these things BUILD_BUG_ON() calls don't clutter the rest
+ * of the code.
+ * */
+static void ipa_validate_build(void)
+{
+#ifdef IPA_VALIDATE
+ /* We assume we're working on 64-bit hardware */
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT));
+
+ /* Code assumes the EE ID for the AP is 0 (zeroed structure field) */
+ BUILD_BUG_ON(GSI_EE_AP != 0);
+
+ /* There's no point if we have no channels or event rings */
+ BUILD_BUG_ON(!GSI_CHANNEL_COUNT_MAX);
+ BUILD_BUG_ON(!GSI_EVT_RING_COUNT_MAX);
+
+ /* GSI hardware design limits */
+ BUILD_BUG_ON(GSI_CHANNEL_COUNT_MAX > 32);
+ BUILD_BUG_ON(GSI_EVT_RING_COUNT_MAX > 31);
+
+ /* The number of TREs in a transaction is limited by the channel's
+ * TLV FIFO size. A transaction structure uses 8-bit fields
+ * to represents the number of TREs it has allocated and used.
+ */
+ BUILD_BUG_ON(GSI_TLV_MAX > U8_MAX);
+
+ /* Exceeding 128 bytes makes the transaction pool *much* larger */
+ BUILD_BUG_ON(sizeof(struct gsi_trans) > 128);
+
+ /* This is used as a divisor */
+ BUILD_BUG_ON(!IPA_AGGR_GRANULARITY);
+#endif /* IPA_VALIDATE */
+}
+
+/**
+ * ipa_probe() - IPA platform driver probe function
+ * @pdev: Platform device pointer
+ *
+ * @Return: 0 if successful, or a negative error code (possibly
+ * EPROBE_DEFER)
+ *
+ * This is the main entry point for the IPA driver. Initialization proceeds
+ * in several stages:
+ * - The "init" stage involves activities that can be initialized without
+ * access to the IPA hardware.
+ * - The "config" stage requires the IPA clock to be active so IPA registers
+ * can be accessed, but does not require the use of IPA immediate commands.
+ * - The "setup" stage uses IPA immediate commands, and so requires the GSI
+ * layer to be initialized.
+ *
+ * A Boolean Device Tree "modem-init" property determines whether GSI
+ * initialization will be performed by the AP (Trust Zone) or the modem.
+ * If the AP does GSI initialization, the setup phase is entered after
+ * this has completed successfully. Otherwise the modem initializes
+ * the GSI layer and signals it has finished by sending an SMP2P interrupt
+ * to the AP; this triggers the start if IPA setup.
+ */
+static int ipa_probe(struct platform_device *pdev)
+{
+ struct wakeup_source *wakeup_source;
+ struct device *dev = &pdev->dev;
+ const struct ipa_data *data;
+ struct ipa_clock *clock;
+ struct rproc *rproc;
+ bool modem_alloc;
+ bool modem_init;
+ struct ipa *ipa;
+ phandle phandle;
+ bool prefetch;
+ int ret;
+
+ ipa_validate_build();
+
+ /* If we need Trust Zone, make sure it's available */
+ modem_init = of_property_read_bool(dev->of_node, "modem-init");
+ if (!modem_init)
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
+ /* We rely on remoteproc to tell us about modem state changes */
+ phandle = of_property_read_phandle(dev->of_node, "modem-remoteproc");
+ if (!phandle) {
+ dev_err(dev, "DT missing \"modem-remoteproc\" property\n");
+ return -EINVAL;
+ }
+
+ rproc = rproc_get_by_phandle(phandle);
+ if (!rproc)
+ return -EPROBE_DEFER;
+
+ /* The clock and interconnects might not be ready when we're
+ * probed, so might return -EPROBE_DEFER.
+ */
+ clock = ipa_clock_init(dev);
+ if (IS_ERR(clock)) {
+ ret = PTR_ERR(clock);
+ goto err_rproc_put;
+ }
+
+ /* No more EPROBE_DEFER. Get our configuration data */
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ /* This is really IPA_VALIDATE (should never happen) */
+ dev_err(dev, "matched hardware not supported\n");
+ ret = -ENOTSUPP;
+ goto err_clock_exit;
+ }
+
+ /* Create a wakeup source. */
+ wakeup_source = wakeup_source_register(dev, "ipa");
+ if (!wakeup_source) {
+ /* The most likely reason for failure is memory exhaustion */
+ ret = -ENOMEM;
+ goto err_clock_exit;
+ }
+
+ /* Allocate and initialize the IPA structure */
+ ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
+ if (!ipa) {
+ ret = -ENOMEM;
+ goto err_wakeup_source_unregister;
+ }
+
+ ipa->pdev = pdev;
+ dev_set_drvdata(dev, ipa);
+ ipa->modem_rproc = rproc;
+ ipa->clock = clock;
+ atomic_set(&ipa->suspend_ref, 0);
+ ipa->wakeup_source = wakeup_source;
+ ipa->version = data->version;
+
+ ret = ipa_reg_init(ipa);
+ if (ret)
+ goto err_kfree_ipa;
+
+ ret = ipa_mem_init(ipa, data->mem_count, data->mem_data);
+ if (ret)
+ goto err_reg_exit;
+
+ /* GSI v2.0+ (IPA v4.0+) uses prefetch for the command channel */
+ prefetch = ipa->version != IPA_VERSION_3_5_1;
+ /* IPA v4.2 requires the AP to allocate channels for the modem */
+ modem_alloc = ipa->version == IPA_VERSION_4_2;
+
+ ret = gsi_init(&ipa->gsi, pdev, prefetch, data->endpoint_count,
+ data->endpoint_data, modem_alloc);
+ if (ret)
+ goto err_mem_exit;
+
+ /* Result is a non-zero mask endpoints that support filtering */
+ ipa->filter_map = ipa_endpoint_init(ipa, data->endpoint_count,
+ data->endpoint_data);
+ if (!ipa->filter_map) {
+ ret = -EINVAL;
+ goto err_gsi_exit;
+ }
+
+ ret = ipa_table_init(ipa);
+ if (ret)
+ goto err_endpoint_exit;
+
+ ret = ipa_modem_init(ipa, modem_init);
+ if (ret)
+ goto err_table_exit;
+
+ ret = ipa_config(ipa, data);
+ if (ret)
+ goto err_modem_exit;
+
+ dev_info(dev, "IPA driver initialized");
+
+ /* If the modem is doing early initialization, it will trigger a
+ * call to ipa_setup() call when it has finished. In that case
+ * we're done here.
+ */
+ if (modem_init)
+ return 0;
+
+ /* Otherwise we need to load the firmware and have Trust Zone validate
+ * and install it. If that succeeds we can proceed with setup.
+ */
+ ret = ipa_firmware_load(dev);
+ if (ret)
+ goto err_deconfig;
+
+ ret = ipa_setup(ipa);
+ if (ret)
+ goto err_deconfig;
+
+ return 0;
+
+err_deconfig:
+ ipa_deconfig(ipa);
+err_modem_exit:
+ ipa_modem_exit(ipa);
+err_table_exit:
+ ipa_table_exit(ipa);
+err_endpoint_exit:
+ ipa_endpoint_exit(ipa);
+err_gsi_exit:
+ gsi_exit(&ipa->gsi);
+err_mem_exit:
+ ipa_mem_exit(ipa);
+err_reg_exit:
+ ipa_reg_exit(ipa);
+err_kfree_ipa:
+ kfree(ipa);
+err_wakeup_source_unregister:
+ wakeup_source_unregister(wakeup_source);
+err_clock_exit:
+ ipa_clock_exit(clock);
+err_rproc_put:
+ rproc_put(rproc);
+
+ return ret;
+}
+
+static int ipa_remove(struct platform_device *pdev)
+{
+ struct ipa *ipa = dev_get_drvdata(&pdev->dev);
+ struct rproc *rproc = ipa->modem_rproc;
+ struct ipa_clock *clock = ipa->clock;
+ struct wakeup_source *wakeup_source;
+ int ret;
+
+ wakeup_source = ipa->wakeup_source;
+
+ if (ipa->setup_complete) {
+ ret = ipa_modem_stop(ipa);
+ if (ret)
+ return ret;
+
+ ipa_teardown(ipa);
+ }
+
+ ipa_deconfig(ipa);
+ ipa_modem_exit(ipa);
+ ipa_table_exit(ipa);
+ ipa_endpoint_exit(ipa);
+ gsi_exit(&ipa->gsi);
+ ipa_mem_exit(ipa);
+ ipa_reg_exit(ipa);
+ kfree(ipa);
+ wakeup_source_unregister(wakeup_source);
+ ipa_clock_exit(clock);
+ rproc_put(rproc);
+
+ return 0;
+}
+
+/**
+ * ipa_suspend() - Power management system suspend callback
+ * @dev: IPA device structure
+ *
+ * @Return: Zero
+ *
+ * Called by the PM framework when a system suspend operation is invoked.
+ */
+static int ipa_suspend(struct device *dev)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ ipa_clock_put(ipa);
+ atomic_set(&ipa->suspend_ref, 0);
+
+ return 0;
+}
+
+/**
+ * ipa_resume() - Power management system resume callback
+ * @dev: IPA device structure
+ *
+ * @Return: Always returns 0
+ *
+ * Called by the PM framework when a system resume operation is invoked.
+ */
+static int ipa_resume(struct device *dev)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ /* This clock reference will keep the IPA out of suspend
+ * until we get a power management suspend request.
+ */
+ atomic_set(&ipa->suspend_ref, 1);
+ ipa_clock_get(ipa);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ipa_pm_ops = {
+ .suspend_noirq = ipa_suspend,
+ .resume_noirq = ipa_resume,
+};
+
+static struct platform_driver ipa_driver = {
+ .probe = ipa_probe,
+ .remove = ipa_remove,
+ .driver = {
+ .name = "ipa",
+ .pm = &ipa_pm_ops,
+ .of_match_table = ipa_match,
+ },
+};
+
+module_platform_driver(ipa_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm IP Accelerator device driver");
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
new file mode 100644
index 000000000000..42d2c29d9f0c
--- /dev/null
+++ b/drivers/net/ipa/ipa_mem.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+#include "ipa.h"
+#include "ipa_reg.h"
+#include "ipa_cmd.h"
+#include "ipa_mem.h"
+#include "ipa_data.h"
+#include "ipa_table.h"
+#include "gsi_trans.h"
+
+/* "Canary" value placed between memory regions to detect overflow */
+#define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef)
+
+/* Add an immediate command to a transaction that zeroes a memory region */
+static void
+ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ dma_addr_t addr = ipa->zero_addr;
+
+ if (!mem->size)
+ return;
+
+ ipa_cmd_dma_shared_mem_add(trans, mem->offset, mem->size, addr, true);
+}
+
+/**
+ * ipa_mem_setup() - Set up IPA AP and modem shared memory areas
+ *
+ * Set up the shared memory regions in IPA local memory. This involves
+ * zero-filling memory regions, and in the case of header memory, telling
+ * the IPA where it's located.
+ *
+ * This function performs the initial setup of this memory. If the modem
+ * crashes, its regions are re-zeroed in ipa_mem_zero_modem().
+ *
+ * The AP informs the modem where its portions of memory are located
+ * in a QMI exchange that occurs at modem startup.
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int ipa_mem_setup(struct ipa *ipa)
+{
+ dma_addr_t addr = ipa->zero_addr;
+ struct gsi_trans *trans;
+ u32 offset;
+ u16 size;
+
+ /* Get a transaction to define the header memory region and to zero
+ * the processing context and modem memory regions.
+ */
+ trans = ipa_cmd_trans_alloc(ipa, 4);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev, "no transaction for memory setup\n");
+ return -EBUSY;
+ }
+
+ /* Initialize IPA-local header memory. The modem and AP header
+ * regions are contiguous, and initialized together.
+ */
+ offset = ipa->mem[IPA_MEM_MODEM_HEADER].offset;
+ size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
+ size += ipa->mem[IPA_MEM_AP_HEADER].size;
+
+ ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_AP_PROC_CTX]);
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
+
+ gsi_trans_commit_wait(trans);
+
+ /* Tell the hardware where the processing context area is located */
+ iowrite32(ipa->mem_offset + offset,
+ ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET);
+
+ return 0;
+}
+
+void ipa_mem_teardown(struct ipa *ipa)
+{
+ /* Nothing to do */
+}
+
+#ifdef IPA_VALIDATE
+
+static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ const struct ipa_mem *mem = &ipa->mem[mem_id];
+ struct device *dev = &ipa->pdev->dev;
+ u16 size_multiple;
+
+ /* Other than modem memory, sizes must be a multiple of 8 */
+ size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
+ if (mem->size % size_multiple)
+ dev_err(dev, "region %u size not a multiple of %u bytes\n",
+ mem_id, size_multiple);
+ else if (mem->offset % 8)
+ dev_err(dev, "region %u offset not 8-byte aligned\n", mem_id);
+ else if (mem->offset < mem->canary_count * sizeof(__le32))
+ dev_err(dev, "region %u offset too small for %hu canaries\n",
+ mem_id, mem->canary_count);
+ else if (mem->offset + mem->size > ipa->mem_size)
+ dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
+ mem_id, ipa->mem_size);
+ else
+ return true;
+
+ return false;
+}
+
+#else /* !IPA_VALIDATE */
+
+static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ return true;
+}
+
+#endif /*! IPA_VALIDATE */
+
+/**
+ * ipa_mem_config() - Configure IPA shared memory
+ *
+ * @Return: 0 if successful, or a negative error code
+ */
+int ipa_mem_config(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ enum ipa_mem_id mem_id;
+ dma_addr_t addr;
+ u32 mem_size;
+ void *virt;
+ u32 val;
+
+ /* Check the advertised location and size of the shared memory area */
+ val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET);
+
+ /* The fields in the register are in 8 byte units */
+ ipa->mem_offset = 8 * u32_get_bits(val, SHARED_MEM_BADDR_FMASK);
+ /* Make sure the end is within the region's mapped space */
+ mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK);
+
+ /* If the sizes don't match, issue a warning */
+ if (ipa->mem_offset + mem_size > ipa->mem_size) {
+ dev_warn(dev, "ignoring larger reported memory size: 0x%08x\n",
+ mem_size);
+ } else if (ipa->mem_offset + mem_size < ipa->mem_size) {
+ dev_warn(dev, "limiting IPA memory size to 0x%08x\n",
+ mem_size);
+ ipa->mem_size = mem_size;
+ }
+
+ /* Prealloc DMA memory for zeroing regions */
+ virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+ ipa->zero_addr = addr;
+ ipa->zero_virt = virt;
+ ipa->zero_size = IPA_MEM_MAX;
+
+ /* Verify each defined memory region is valid, and if indicated
+ * for the region, write "canary" values in the space prior to
+ * the region's base address.
+ */
+ for (mem_id = 0; mem_id < IPA_MEM_COUNT; mem_id++) {
+ const struct ipa_mem *mem = &ipa->mem[mem_id];
+ u16 canary_count;
+ __le32 *canary;
+
+ /* Validate all regions (even undefined ones) */
+ if (!ipa_mem_valid(ipa, mem_id))
+ goto err_dma_free;
+
+ /* Skip over undefined regions */
+ if (!mem->offset && !mem->size)
+ continue;
+
+ canary_count = mem->canary_count;
+ if (!canary_count)
+ continue;
+
+ /* Write canary values in the space before the region */
+ canary = ipa->mem_virt + ipa->mem_offset + mem->offset;
+ do
+ *--canary = IPA_MEM_CANARY_VAL;
+ while (--canary_count);
+ }
+
+ /* Make sure filter and route table memory regions are valid */
+ if (!ipa_table_valid(ipa))
+ goto err_dma_free;
+
+ /* Validate memory-related properties relevant to immediate commands */
+ if (!ipa_cmd_data_valid(ipa))
+ goto err_dma_free;
+
+ /* Verify the microcontroller ring alignment (0 is OK too) */
+ if (ipa->mem[IPA_MEM_UC_EVENT_RING].offset % 1024) {
+ dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
+ goto err_dma_free;
+ }
+
+ return 0;
+
+err_dma_free:
+ dma_free_coherent(dev, IPA_MEM_MAX, ipa->zero_virt, ipa->zero_addr);
+
+ return -EINVAL;
+}
+
+/* Inverse of ipa_mem_config() */
+void ipa_mem_deconfig(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+
+ dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
+ ipa->zero_size = 0;
+ ipa->zero_virt = NULL;
+ ipa->zero_addr = 0;
+}
+
+/**
+ * ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem
+ *
+ * Zero regions of IPA-local memory used by the modem. These are configured
+ * (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and
+ * restarts via SSR we need to re-initialize them. A QMI message tells the
+ * modem where to find regions of IPA local memory it needs to know about
+ * (these included).
+ */
+int ipa_mem_zero_modem(struct ipa *ipa)
+{
+ struct gsi_trans *trans;
+
+ /* Get a transaction to zero the modem memory, modem header,
+ * and modem processing context regions.
+ */
+ trans = ipa_cmd_trans_alloc(ipa, 3);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction to zero modem memory\n");
+ return -EBUSY;
+ }
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_HEADER]);
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
+
+ ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+/* Perform memory region-related initialization */
+int ipa_mem_init(struct ipa *ipa, u32 count, const struct ipa_mem *mem)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct resource *res;
+ int ret;
+
+ if (count > IPA_MEM_COUNT) {
+ dev_err(dev, "to many memory regions (%u > %u)\n",
+ count, IPA_MEM_COUNT);
+ return -EINVAL;
+ }
+
+ ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(dev, "error %d setting DMA mask\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
+ "ipa-shared");
+ if (!res) {
+ dev_err(dev,
+ "DT error getting \"ipa-shared\" memory property\n");
+ return -ENODEV;
+ }
+
+ ipa->mem_virt = memremap(res->start, resource_size(res), MEMREMAP_WC);
+ if (!ipa->mem_virt) {
+ dev_err(dev, "unable to remap \"ipa-shared\" memory\n");
+ return -ENOMEM;
+ }
+
+ ipa->mem_addr = res->start;
+ ipa->mem_size = resource_size(res);
+
+ /* The ipa->mem[] array is indexed by enum ipa_mem_id values */
+ ipa->mem = mem;
+
+ return 0;
+}
+
+/* Inverse of ipa_mem_init() */
+void ipa_mem_exit(struct ipa *ipa)
+{
+ memunmap(ipa->mem_virt);
+}
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
new file mode 100644
index 000000000000..065cb499ebe5
--- /dev/null
+++ b/drivers/net/ipa/ipa_mem.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_MEM_H_
+#define _IPA_MEM_H_
+
+struct ipa;
+
+/**
+ * DOC: IPA Local Memory
+ *
+ * The IPA has a block of shared memory, divided into regions used for
+ * specific purposes.
+ *
+ * The regions within the shared block are bounded by an offset (relative to
+ * the "ipa-shared" memory range) and size found in the IPA_SHARED_MEM_SIZE
+ * register.
+ *
+ * Each region is optionally preceded by one or more 32-bit "canary" values.
+ * These are meant to detect out-of-range writes (if they become corrupted).
+ * A given region (such as a filter or routing table) has the same number
+ * of canaries for all IPA hardware versions. Still, the number used is
+ * defined in the config data, allowing for generic handling of regions.
+ *
+ * The set of memory regions is defined in configuration data. They are
+ * subject to these constraints:
+ * - a zero offset and zero size represents and undefined region
+ * - a region's offset is defined to be *past* all "canary" values
+ * - offset must be large enough to account for all canaries
+ * - a region's size may be zero, but may still have canaries
+ * - all offsets must be 8-byte aligned
+ * - most sizes must be a multiple of 8
+ * - modem memory size must be a multiple of 4
+ * - the microcontroller ring offset must be a multiple of 1024
+ */
+
+/* The maximum allowed size for any memory region */
+#define IPA_MEM_MAX (2 * PAGE_SIZE)
+
+/* IPA-resident memory region ids */
+enum ipa_mem_id {
+ IPA_MEM_UC_SHARED, /* 0 canaries */
+ IPA_MEM_UC_INFO, /* 0 canaries */
+ IPA_MEM_V4_FILTER_HASHED, /* 2 canaries */
+ IPA_MEM_V4_FILTER, /* 2 canaries */
+ IPA_MEM_V6_FILTER_HASHED, /* 2 canaries */
+ IPA_MEM_V6_FILTER, /* 2 canaries */
+ IPA_MEM_V4_ROUTE_HASHED, /* 2 canaries */
+ IPA_MEM_V4_ROUTE, /* 2 canaries */
+ IPA_MEM_V6_ROUTE_HASHED, /* 2 canaries */
+ IPA_MEM_V6_ROUTE, /* 2 canaries */
+ IPA_MEM_MODEM_HEADER, /* 2 canaries */
+ IPA_MEM_AP_HEADER, /* 0 canaries */
+ IPA_MEM_MODEM_PROC_CTX, /* 2 canaries */
+ IPA_MEM_AP_PROC_CTX, /* 0 canaries */
+ IPA_MEM_PDN_CONFIG, /* 2 canaries (IPA v4.0 and above) */
+ IPA_MEM_STATS_QUOTA, /* 2 canaries (IPA v4.0 and above) */
+ IPA_MEM_STATS_TETHERING, /* 0 canaries (IPA v4.0 and above) */
+ IPA_MEM_STATS_DROP, /* 0 canaries (IPA v4.0 and above) */
+ IPA_MEM_MODEM, /* 0 canaries */
+ IPA_MEM_UC_EVENT_RING, /* 1 canary */
+ IPA_MEM_COUNT, /* Number of regions (not an index) */
+};
+
+/**
+ * struct ipa_mem - IPA local memory region description
+ * @offset: offset in IPA memory space to base of the region
+ * @size: size in bytes base of the region
+ * @canary_count # 32-bit "canary" values that precede region
+ */
+struct ipa_mem {
+ u32 offset;
+ u16 size;
+ u16 canary_count;
+};
+
+int ipa_mem_config(struct ipa *ipa);
+void ipa_mem_deconfig(struct ipa *ipa);
+
+int ipa_mem_setup(struct ipa *ipa);
+void ipa_mem_teardown(struct ipa *ipa);
+
+int ipa_mem_zero_modem(struct ipa *ipa);
+
+int ipa_mem_init(struct ipa *ipa, u32 count, const struct ipa_mem *mem);
+void ipa_mem_exit(struct ipa *ipa);
+
+#endif /* _IPA_MEM_H_ */
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
new file mode 100644
index 000000000000..55c9329a4b1d
--- /dev/null
+++ b/drivers/net/ipa/ipa_modem.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_rmnet.h>
+#include <linux/remoteproc/qcom_q6v5_ipa_notify.h>
+
+#include "ipa.h"
+#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_table.h"
+#include "ipa_mem.h"
+#include "ipa_modem.h"
+#include "ipa_smp2p.h"
+#include "ipa_qmi.h"
+
+#define IPA_NETDEV_NAME "rmnet_ipa%d"
+#define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */
+#define IPA_NETDEV_TIMEOUT 10 /* seconds */
+
+enum ipa_modem_state {
+ IPA_MODEM_STATE_STOPPED = 0,
+ IPA_MODEM_STATE_STARTING,
+ IPA_MODEM_STATE_RUNNING,
+ IPA_MODEM_STATE_STOPPING,
+};
+
+/** struct ipa_priv - IPA network device private data */
+struct ipa_priv {
+ struct ipa *ipa;
+};
+
+/** ipa_open() - Opens the modem network interface */
+static int ipa_open(struct net_device *netdev)
+{
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa *ipa = priv->ipa;
+ int ret;
+
+ ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ if (ret)
+ return ret;
+ ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ if (ret)
+ goto err_disable_tx;
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+err_disable_tx:
+ ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+
+ return ret;
+}
+
+/** ipa_stop() - Stops the modem network interface. */
+static int ipa_stop(struct net_device *netdev)
+{
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa *ipa = priv->ipa;
+
+ netif_stop_queue(netdev);
+
+ ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+
+ return 0;
+}
+
+/** ipa_start_xmit() - Transmits an skb.
+ * @skb: skb to be transmitted
+ * @dev: network device
+ *
+ * Return codes:
+ * NETDEV_TX_OK: Success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again later
+ */
+static int ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct net_device_stats *stats = &netdev->stats;
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa_endpoint *endpoint;
+ struct ipa *ipa = priv->ipa;
+ u32 skb_len = skb->len;
+ int ret;
+
+ if (!skb_len)
+ goto err_drop_skb;
+
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
+ if (endpoint->data->qmap && skb->protocol != htons(ETH_P_MAP))
+ goto err_drop_skb;
+
+ ret = ipa_endpoint_skb_tx(endpoint, skb);
+ if (ret) {
+ if (ret != -E2BIG)
+ return NETDEV_TX_BUSY;
+ goto err_drop_skb;
+ }
+
+ stats->tx_packets++;
+ stats->tx_bytes += skb_len;
+
+ return NETDEV_TX_OK;
+
+err_drop_skb:
+ dev_kfree_skb_any(skb);
+ stats->tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb)
+{
+ struct net_device_stats *stats = &netdev->stats;
+
+ if (skb) {
+ skb->dev = netdev;
+ skb->protocol = htons(ETH_P_MAP);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+
+ (void)netif_receive_skb(skb);
+ } else {
+ stats->rx_dropped++;
+ }
+}
+
+static const struct net_device_ops ipa_modem_ops = {
+ .ndo_open = ipa_open,
+ .ndo_stop = ipa_stop,
+ .ndo_start_xmit = ipa_start_xmit,
+};
+
+/** ipa_modem_netdev_setup() - netdev setup function for the modem */
+static void ipa_modem_netdev_setup(struct net_device *netdev)
+{
+ netdev->netdev_ops = &ipa_modem_ops;
+ ether_setup(netdev);
+ /* No header ops (override value set by ether_setup()) */
+ netdev->header_ops = NULL;
+ netdev->type = ARPHRD_RAWIP;
+ netdev->hard_header_len = 0;
+ netdev->max_mtu = IPA_MTU;
+ netdev->mtu = netdev->max_mtu;
+ netdev->addr_len = 0;
+ netdev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ /* The endpoint is configured for QMAP */
+ netdev->needed_headroom = sizeof(struct rmnet_map_header);
+ netdev->needed_tailroom = IPA_NETDEV_TAILROOM;
+ netdev->watchdog_timeo = IPA_NETDEV_TIMEOUT * HZ;
+ netdev->hw_features = NETIF_F_SG;
+}
+
+/** ipa_modem_suspend() - suspend callback
+ * @netdev: Network device
+ *
+ * Suspend the modem's endpoints.
+ */
+void ipa_modem_suspend(struct net_device *netdev)
+{
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa *ipa = priv->ipa;
+
+ netif_stop_queue(netdev);
+
+ ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+}
+
+/** ipa_modem_resume() - resume callback for runtime_pm
+ * @dev: pointer to device
+ *
+ * Resume the modem's endpoints.
+ */
+void ipa_modem_resume(struct net_device *netdev)
+{
+ struct ipa_priv *priv = netdev_priv(netdev);
+ struct ipa *ipa = priv->ipa;
+
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+
+ netif_wake_queue(netdev);
+}
+
+int ipa_modem_start(struct ipa *ipa)
+{
+ enum ipa_modem_state state;
+ struct net_device *netdev;
+ struct ipa_priv *priv;
+ int ret;
+
+ /* Only attempt to start the modem if it's stopped */
+ state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_STOPPED,
+ IPA_MODEM_STATE_STARTING);
+
+ /* Silently ignore attempts when running, or when changing state */
+ if (state != IPA_MODEM_STATE_STOPPED)
+ return 0;
+
+ netdev = alloc_netdev(sizeof(struct ipa_priv), IPA_NETDEV_NAME,
+ NET_NAME_UNKNOWN, ipa_modem_netdev_setup);
+ if (!netdev) {
+ ret = -ENOMEM;
+ goto out_set_state;
+ }
+
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
+
+ priv = netdev_priv(netdev);
+ priv->ipa = ipa;
+
+ ret = register_netdev(netdev);
+ if (ret)
+ free_netdev(netdev);
+ else
+ ipa->modem_netdev = netdev;
+
+out_set_state:
+ if (ret)
+ atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
+ else
+ atomic_set(&ipa->modem_state, IPA_MODEM_STATE_RUNNING);
+ smp_mb__after_atomic();
+
+ return ret;
+}
+
+int ipa_modem_stop(struct ipa *ipa)
+{
+ struct net_device *netdev = ipa->modem_netdev;
+ enum ipa_modem_state state;
+ int ret;
+
+ /* Only attempt to stop the modem if it's running */
+ state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_RUNNING,
+ IPA_MODEM_STATE_STOPPING);
+
+ /* Silently ignore attempts when already stopped */
+ if (state == IPA_MODEM_STATE_STOPPED)
+ return 0;
+
+ /* If we're somewhere between stopped and starting, we're busy */
+ if (state != IPA_MODEM_STATE_RUNNING)
+ return -EBUSY;
+
+ /* Prevent the modem from triggering a call to ipa_setup() */
+ ipa_smp2p_disable(ipa);
+
+ if (netdev) {
+ /* Stop the queue and disable the endpoints if it's open */
+ ret = ipa_stop(netdev);
+ if (ret)
+ goto out_set_state;
+
+ ipa->modem_netdev = NULL;
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ } else {
+ ret = 0;
+ }
+
+out_set_state:
+ if (ret)
+ atomic_set(&ipa->modem_state, IPA_MODEM_STATE_RUNNING);
+ else
+ atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
+ smp_mb__after_atomic();
+
+ return ret;
+}
+
+/* Treat a "clean" modem stop the same as a crash */
+static void ipa_modem_crashed(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ int ret;
+
+ ipa_endpoint_modem_pause_all(ipa, true);
+
+ ipa_endpoint_modem_hol_block_clear_all(ipa);
+
+ ipa_table_reset(ipa, true);
+
+ ret = ipa_table_hash_flush(ipa);
+ if (ret)
+ dev_err(dev, "error %d flushing hash caches\n", ret);
+
+ ret = ipa_endpoint_modem_exception_reset_all(ipa);
+ if (ret)
+ dev_err(dev, "error %d resetting exception endpoint",
+ ret);
+
+ ipa_endpoint_modem_pause_all(ipa, false);
+
+ ret = ipa_modem_stop(ipa);
+ if (ret)
+ dev_err(dev, "error %d stopping modem", ret);
+
+ /* Now prepare for the next modem boot */
+ ret = ipa_mem_zero_modem(ipa);
+ if (ret)
+ dev_err(dev, "error %d zeroing modem memory regions\n", ret);
+}
+
+static void ipa_modem_notify(void *data, enum qcom_rproc_event event)
+{
+ struct ipa *ipa = data;
+ struct device *dev;
+
+ dev = &ipa->pdev->dev;
+ switch (event) {
+ case MODEM_STARTING:
+ dev_info(dev, "received modem starting event\n");
+ ipa_smp2p_notify_reset(ipa);
+ break;
+
+ case MODEM_RUNNING:
+ dev_info(dev, "received modem running event\n");
+ break;
+
+ case MODEM_STOPPING:
+ case MODEM_CRASHED:
+ dev_info(dev, "received modem %s event\n",
+ event == MODEM_STOPPING ? "stopping"
+ : "crashed");
+ if (ipa->setup_complete)
+ ipa_modem_crashed(ipa);
+ break;
+
+ case MODEM_OFFLINE:
+ dev_info(dev, "received modem offline event\n");
+ break;
+
+ case MODEM_REMOVING:
+ dev_info(dev, "received modem stopping event\n");
+ break;
+
+ default:
+ dev_err(&ipa->pdev->dev, "unrecognized event %u\n", event);
+ break;
+ }
+}
+
+int ipa_modem_init(struct ipa *ipa, bool modem_init)
+{
+ return ipa_smp2p_init(ipa, modem_init);
+}
+
+void ipa_modem_exit(struct ipa *ipa)
+{
+ ipa_smp2p_exit(ipa);
+}
+
+int ipa_modem_config(struct ipa *ipa)
+{
+ return qcom_register_ipa_notify(ipa->modem_rproc, ipa_modem_notify,
+ ipa);
+}
+
+void ipa_modem_deconfig(struct ipa *ipa)
+{
+ qcom_deregister_ipa_notify(ipa->modem_rproc);
+}
+
+int ipa_modem_setup(struct ipa *ipa)
+{
+ return ipa_qmi_setup(ipa);
+}
+
+void ipa_modem_teardown(struct ipa *ipa)
+{
+ ipa_qmi_teardown(ipa);
+}
diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h
new file mode 100644
index 000000000000..2de3e216d1d4
--- /dev/null
+++ b/drivers/net/ipa/ipa_modem.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_MODEM_H_
+#define _IPA_MODEM_H_
+
+struct ipa;
+struct ipa_endpoint;
+struct net_device;
+struct sk_buff;
+
+int ipa_modem_start(struct ipa *ipa);
+int ipa_modem_stop(struct ipa *ipa);
+
+void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb);
+
+void ipa_modem_suspend(struct net_device *netdev);
+void ipa_modem_resume(struct net_device *netdev);
+
+int ipa_modem_init(struct ipa *ipa, bool modem_init);
+void ipa_modem_exit(struct ipa *ipa);
+
+int ipa_modem_config(struct ipa *ipa);
+void ipa_modem_deconfig(struct ipa *ipa);
+
+int ipa_modem_setup(struct ipa *ipa);
+void ipa_modem_teardown(struct ipa *ipa);
+
+#endif /* _IPA_MODEM_H_ */
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
new file mode 100644
index 000000000000..5090f0f923ad
--- /dev/null
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/qrtr.h>
+#include <linux/soc/qcom/qmi.h>
+
+#include "ipa.h"
+#include "ipa_endpoint.h"
+#include "ipa_mem.h"
+#include "ipa_table.h"
+#include "ipa_modem.h"
+#include "ipa_qmi_msg.h"
+
+/**
+ * DOC: AP/Modem QMI Handshake
+ *
+ * The AP and modem perform a "handshake" at initialization time to ensure
+ * both sides know when everything is ready to begin operating. The AP
+ * driver (this code) uses two QMI handles (endpoints) for this; a client
+ * using a service on the modem, and server to service modem requests (and
+ * to supply an indication message from the AP). Once the handshake is
+ * complete, the AP and modem may begin IPA operation. This occurs
+ * only when the AP IPA driver, modem IPA driver, and IPA microcontroller
+ * are ready.
+ *
+ * The QMI service on the modem expects to receive an INIT_DRIVER request from
+ * the AP, which contains parameters used by the modem during initialization.
+ * The AP sends this request as soon as it is knows the modem side service
+ * is available. The modem responds to this request, and if this response
+ * contains a success result, the AP knows the modem IPA driver is ready.
+ *
+ * The modem is responsible for loading firmware on the IPA microcontroller.
+ * This occurs only during the initial modem boot. The modem sends a
+ * separate DRIVER_INIT_COMPLETE request to the AP to report that the
+ * microcontroller is ready. The AP may assume the microcontroller is
+ * ready and remain so (even if the modem reboots) once it has received
+ * and responded to this request.
+ *
+ * There is one final exchange involved in the handshake. It is required
+ * on the initial modem boot, but optional (but in practice does occur) on
+ * subsequent boots. The modem expects to receive a final INIT_COMPLETE
+ * indication message from the AP when it is about to begin its normal
+ * operation. The AP will only send this message after it has received
+ * and responded to an INDICATION_REGISTER request from the modem.
+ *
+ * So in summary:
+ * - Whenever the AP learns the modem has booted and its IPA QMI service
+ * is available, it sends an INIT_DRIVER request to the modem. The
+ * modem supplies a success response when it is ready to operate.
+ * - On the initial boot, the modem sets up the IPA microcontroller, and
+ * sends a DRIVER_INIT_COMPLETE request to the AP when this is done.
+ * - When the modem is ready to receive an INIT_COMPLETE indication from
+ * the AP, it sends an INDICATION_REGISTER request to the AP.
+ * - On the initial modem boot, everything is ready when:
+ * - AP has received a success response from its INIT_DRIVER request
+ * - AP has responded to a DRIVER_INIT_COMPLETE request
+ * - AP has responded to an INDICATION_REGISTER request from the modem
+ * - AP has sent an INIT_COMPLETE indication to the modem
+ * - On subsequent modem boots, everything is ready when:
+ * - AP has received a success response from its INIT_DRIVER request
+ * - AP has responded to a DRIVER_INIT_COMPLETE request
+ * - The INDICATION_REGISTER request and INIT_COMPLETE indication are
+ * optional for non-initial modem boots, and have no bearing on the
+ * determination of when things are "ready"
+ */
+
+#define IPA_HOST_SERVICE_SVC_ID 0x31
+#define IPA_HOST_SVC_VERS 1
+#define IPA_HOST_SERVICE_INS_ID 1
+
+#define IPA_MODEM_SERVICE_SVC_ID 0x31
+#define IPA_MODEM_SERVICE_INS_ID 2
+#define IPA_MODEM_SVC_VERS 1
+
+#define QMI_INIT_DRIVER_TIMEOUT 60000 /* A minute in milliseconds */
+
+/* Send an INIT_COMPLETE indication message to the modem */
+static void ipa_server_init_complete(struct ipa_qmi *ipa_qmi)
+{
+ struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ struct qmi_handle *qmi = &ipa_qmi->server_handle;
+ struct sockaddr_qrtr *sq = &ipa_qmi->modem_sq;
+ struct ipa_init_complete_ind ind = { };
+ int ret;
+
+ ind.status.result = QMI_RESULT_SUCCESS_V01;
+ ind.status.error = QMI_ERR_NONE_V01;
+
+ ret = qmi_send_indication(qmi, sq, IPA_QMI_INIT_COMPLETE,
+ IPA_QMI_INIT_COMPLETE_IND_SZ,
+ ipa_init_complete_ind_ei, &ind);
+ if (ret)
+ dev_err(&ipa->pdev->dev,
+ "error %d sending init complete indication\n", ret);
+ else
+ ipa_qmi->indication_sent = true;
+}
+
+/* If requested (and not already sent) send the INIT_COMPLETE indication */
+static void ipa_qmi_indication(struct ipa_qmi *ipa_qmi)
+{
+ if (!ipa_qmi->indication_requested)
+ return;
+
+ if (ipa_qmi->indication_sent)
+ return;
+
+ ipa_server_init_complete(ipa_qmi);
+}
+
+/* Determine whether everything is ready to start normal operation.
+ * We know everything (else) is ready when we know the IPA driver on
+ * the modem is ready, and the microcontroller is ready.
+ *
+ * When the modem boots (or reboots), the handshake sequence starts
+ * with the AP sending the modem an INIT_DRIVER request. Within
+ * that request, the uc_loaded flag will be zero (false) for an
+ * initial boot, non-zero (true) for a subsequent (SSR) boot.
+ */
+static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
+{
+ struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ int ret;
+
+ /* We aren't ready until the modem and microcontroller are */
+ if (!ipa_qmi->modem_ready || !ipa_qmi->uc_ready)
+ return;
+
+ /* Send the indication message if it was requested */
+ ipa_qmi_indication(ipa_qmi);
+
+ /* The initial boot requires us to send the indication. */
+ if (ipa_qmi->initial_boot) {
+ if (!ipa_qmi->indication_sent)
+ return;
+
+ /* The initial modem boot completed successfully */
+ ipa_qmi->initial_boot = false;
+ }
+
+ /* We're ready. Start up normal operation */
+ ipa = container_of(ipa_qmi, struct ipa, qmi);
+ ret = ipa_modem_start(ipa);
+ if (ret)
+ dev_err(&ipa->pdev->dev, "error %d starting modem\n", ret);
+}
+
+/* All QMI clients from the modem node are gone (modem shut down or crashed). */
+static void ipa_server_bye(struct qmi_handle *qmi, unsigned int node)
+{
+ struct ipa_qmi *ipa_qmi;
+
+ ipa_qmi = container_of(qmi, struct ipa_qmi, server_handle);
+
+ /* The modem client and server go away at the same time */
+ memset(&ipa_qmi->modem_sq, 0, sizeof(ipa_qmi->modem_sq));
+
+ /* initial_boot doesn't change when modem reboots */
+ /* uc_ready doesn't change when modem reboots */
+ ipa_qmi->modem_ready = false;
+ ipa_qmi->indication_requested = false;
+ ipa_qmi->indication_sent = false;
+}
+
+static struct qmi_ops ipa_server_ops = {
+ .bye = ipa_server_bye,
+};
+
+/* Callback function to handle an INDICATION_REGISTER request message from the
+ * modem. This informs the AP that the modem is now ready to receive the
+ * INIT_COMPLETE indication message.
+ */
+static void ipa_server_indication_register(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ipa_indication_register_rsp rsp = { };
+ struct ipa_qmi *ipa_qmi;
+ struct ipa *ipa;
+ int ret;
+
+ ipa_qmi = container_of(qmi, struct ipa_qmi, server_handle);
+ ipa = container_of(ipa_qmi, struct ipa, qmi);
+
+ rsp.rsp.result = QMI_RESULT_SUCCESS_V01;
+ rsp.rsp.error = QMI_ERR_NONE_V01;
+
+ ret = qmi_send_response(qmi, sq, txn, IPA_QMI_INDICATION_REGISTER,
+ IPA_QMI_INDICATION_REGISTER_RSP_SZ,
+ ipa_indication_register_rsp_ei, &rsp);
+ if (!ret) {
+ ipa_qmi->indication_requested = true;
+ ipa_qmi_ready(ipa_qmi); /* We might be ready now */
+ } else {
+ dev_err(&ipa->pdev->dev,
+ "error %d sending register indication response\n", ret);
+ }
+}
+
+/* Respond to a DRIVER_INIT_COMPLETE request message from the modem. */
+static void ipa_server_driver_init_complete(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ipa_driver_init_complete_rsp rsp = { };
+ struct ipa_qmi *ipa_qmi;
+ struct ipa *ipa;
+ int ret;
+
+ ipa_qmi = container_of(qmi, struct ipa_qmi, server_handle);
+ ipa = container_of(ipa_qmi, struct ipa, qmi);
+
+ rsp.rsp.result = QMI_RESULT_SUCCESS_V01;
+ rsp.rsp.error = QMI_ERR_NONE_V01;
+
+ ret = qmi_send_response(qmi, sq, txn, IPA_QMI_DRIVER_INIT_COMPLETE,
+ IPA_QMI_DRIVER_INIT_COMPLETE_RSP_SZ,
+ ipa_driver_init_complete_rsp_ei, &rsp);
+ if (!ret) {
+ ipa_qmi->uc_ready = true;
+ ipa_qmi_ready(ipa_qmi); /* We might be ready now */
+ } else {
+ dev_err(&ipa->pdev->dev,
+ "error %d sending init complete response\n", ret);
+ }
+}
+
+/* The server handles two request message types sent by the modem. */
+static struct qmi_msg_handler ipa_server_msg_handlers[] = {
+ {
+ .type = QMI_REQUEST,
+ .msg_id = IPA_QMI_INDICATION_REGISTER,
+ .ei = ipa_indication_register_req_ei,
+ .decoded_size = IPA_QMI_INDICATION_REGISTER_REQ_SZ,
+ .fn = ipa_server_indication_register,
+ },
+ {
+ .type = QMI_REQUEST,
+ .msg_id = IPA_QMI_DRIVER_INIT_COMPLETE,
+ .ei = ipa_driver_init_complete_req_ei,
+ .decoded_size = IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ,
+ .fn = ipa_server_driver_init_complete,
+ },
+};
+
+/* Handle an INIT_DRIVER response message from the modem. */
+static void ipa_client_init_driver(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *decoded)
+{
+ txn->result = 0; /* IPA_QMI_INIT_DRIVER request was successful */
+ complete(&txn->completion);
+}
+
+/* The client handles one response message type sent by the modem. */
+static struct qmi_msg_handler ipa_client_msg_handlers[] = {
+ {
+ .type = QMI_RESPONSE,
+ .msg_id = IPA_QMI_INIT_DRIVER,
+ .ei = ipa_init_modem_driver_rsp_ei,
+ .decoded_size = IPA_QMI_INIT_DRIVER_RSP_SZ,
+ .fn = ipa_client_init_driver,
+ },
+};
+
+/* Return a pointer to an init modem driver request structure, which contains
+ * configuration parameters for the modem. The modem may be started multiple
+ * times, but generally these parameters don't change so we can reuse the
+ * request structure once it's initialized. The only exception is the
+ * skip_uc_load field, which will be set only after the microcontroller has
+ * reported it has completed its initialization.
+ */
+static const struct ipa_init_modem_driver_req *
+init_modem_driver_req(struct ipa_qmi *ipa_qmi)
+{
+ struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ static struct ipa_init_modem_driver_req req;
+ const struct ipa_mem *mem;
+
+ /* The microcontroller is initialized on the first boot */
+ req.skip_uc_load_valid = 1;
+ req.skip_uc_load = ipa->uc_loaded ? 1 : 0;
+
+ /* We only have to initialize most of it once */
+ if (req.platform_type_valid)
+ return &req;
+
+ req.platform_type_valid = 1;
+ req.platform_type = IPA_QMI_PLATFORM_TYPE_MSM_ANDROID;
+
+ mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
+ if (mem->size) {
+ req.hdr_tbl_info_valid = 1;
+ req.hdr_tbl_info.start = ipa->mem_offset + mem->offset;
+ req.hdr_tbl_info.end = req.hdr_tbl_info.start + mem->size - 1;
+ }
+
+ mem = &ipa->mem[IPA_MEM_V4_ROUTE];
+ req.v4_route_tbl_info_valid = 1;
+ req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
+ req.v4_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE;
+
+ mem = &ipa->mem[IPA_MEM_V6_ROUTE];
+ req.v6_route_tbl_info_valid = 1;
+ req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
+ req.v6_route_tbl_info.count = mem->size / IPA_TABLE_ENTRY_SIZE;
+
+ mem = &ipa->mem[IPA_MEM_V4_FILTER];
+ req.v4_filter_tbl_start_valid = 1;
+ req.v4_filter_tbl_start = ipa->mem_offset + mem->offset;
+
+ mem = &ipa->mem[IPA_MEM_V6_FILTER];
+ req.v6_filter_tbl_start_valid = 1;
+ req.v6_filter_tbl_start = ipa->mem_offset + mem->offset;
+
+ mem = &ipa->mem[IPA_MEM_MODEM];
+ if (mem->size) {
+ req.modem_mem_info_valid = 1;
+ req.modem_mem_info.start = ipa->mem_offset + mem->offset;
+ req.modem_mem_info.size = mem->size;
+ }
+
+ req.ctrl_comm_dest_end_pt_valid = 1;
+ req.ctrl_comm_dest_end_pt =
+ ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->endpoint_id;
+
+ /* skip_uc_load_valid and skip_uc_load are set above */
+
+ mem = &ipa->mem[IPA_MEM_MODEM_PROC_CTX];
+ if (mem->size) {
+ req.hdr_proc_ctx_tbl_info_valid = 1;
+ req.hdr_proc_ctx_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+ req.hdr_proc_ctx_tbl_info.end =
+ req.hdr_proc_ctx_tbl_info.start + mem->size - 1;
+ }
+
+ /* Nothing to report for the compression table (zip_tbl_info) */
+
+ mem = &ipa->mem[IPA_MEM_V4_ROUTE_HASHED];
+ if (mem->size) {
+ req.v4_hash_route_tbl_info_valid = 1;
+ req.v4_hash_route_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+ req.v4_hash_route_tbl_info.count =
+ mem->size / IPA_TABLE_ENTRY_SIZE;
+ }
+
+ mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED];
+ if (mem->size) {
+ req.v6_hash_route_tbl_info_valid = 1;
+ req.v6_hash_route_tbl_info.start =
+ ipa->mem_offset + mem->offset;
+ req.v6_hash_route_tbl_info.count =
+ mem->size / IPA_TABLE_ENTRY_SIZE;
+ }
+
+ mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED];
+ if (mem->size) {
+ req.v4_hash_filter_tbl_start_valid = 1;
+ req.v4_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
+ }
+
+ mem = &ipa->mem[IPA_MEM_V6_FILTER_HASHED];
+ if (mem->size) {
+ req.v6_hash_filter_tbl_start_valid = 1;
+ req.v6_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
+ }
+
+ /* None of the stats fields are valid (IPA v4.0 and above) */
+
+ if (ipa->version != IPA_VERSION_3_5_1) {
+ mem = &ipa->mem[IPA_MEM_STATS_QUOTA];
+ if (mem->size) {
+ req.hw_stats_quota_base_addr_valid = 1;
+ req.hw_stats_quota_base_addr =
+ ipa->mem_offset + mem->offset;
+ req.hw_stats_quota_size_valid = 1;
+ req.hw_stats_quota_size = ipa->mem_offset + mem->size;
+ }
+
+ mem = &ipa->mem[IPA_MEM_STATS_DROP];
+ if (mem->size) {
+ req.hw_stats_drop_base_addr_valid = 1;
+ req.hw_stats_drop_base_addr =
+ ipa->mem_offset + mem->offset;
+ req.hw_stats_drop_size_valid = 1;
+ req.hw_stats_drop_size = ipa->mem_offset + mem->size;
+ }
+ }
+
+ return &req;
+}
+
+/* Send an INIT_DRIVER request to the modem, and wait for it to complete. */
+static void ipa_client_init_driver_work(struct work_struct *work)
+{
+ unsigned long timeout = msecs_to_jiffies(QMI_INIT_DRIVER_TIMEOUT);
+ const struct ipa_init_modem_driver_req *req;
+ struct ipa_qmi *ipa_qmi;
+ struct qmi_handle *qmi;
+ struct qmi_txn txn;
+ struct device *dev;
+ struct ipa *ipa;
+ int ret;
+
+ ipa_qmi = container_of(work, struct ipa_qmi, init_driver_work);
+ qmi = &ipa_qmi->client_handle,
+
+ ipa = container_of(ipa_qmi, struct ipa, qmi);
+ dev = &ipa->pdev->dev;
+
+ ret = qmi_txn_init(qmi, &txn, NULL, NULL);
+ if (ret < 0) {
+ dev_err(dev, "error %d preparing init driver request\n", ret);
+ return;
+ }
+
+ /* Send the request, and if successful wait for its response */
+ req = init_modem_driver_req(ipa_qmi);
+ ret = qmi_send_request(qmi, &ipa_qmi->modem_sq, &txn,
+ IPA_QMI_INIT_DRIVER, IPA_QMI_INIT_DRIVER_REQ_SZ,
+ ipa_init_modem_driver_req_ei, req);
+ if (ret)
+ dev_err(dev, "error %d sending init driver request\n", ret);
+ else if ((ret = qmi_txn_wait(&txn, timeout)))
+ dev_err(dev, "error %d awaiting init driver response\n", ret);
+
+ if (!ret) {
+ ipa_qmi->modem_ready = true;
+ ipa_qmi_ready(ipa_qmi); /* We might be ready now */
+ } else {
+ /* If any error occurs we need to cancel the transaction */
+ qmi_txn_cancel(&txn);
+ }
+}
+
+/* The modem server is now available. We will send an INIT_DRIVER request
+ * to the modem, but can't wait for it to complete in this callback thread.
+ * Schedule a worker on the global workqueue to do that for us.
+ */
+static int
+ipa_client_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+ struct ipa_qmi *ipa_qmi;
+
+ ipa_qmi = container_of(qmi, struct ipa_qmi, client_handle);
+
+ ipa_qmi->modem_sq.sq_family = AF_QIPCRTR;
+ ipa_qmi->modem_sq.sq_node = svc->node;
+ ipa_qmi->modem_sq.sq_port = svc->port;
+
+ schedule_work(&ipa_qmi->init_driver_work);
+
+ return 0;
+}
+
+static struct qmi_ops ipa_client_ops = {
+ .new_server = ipa_client_new_server,
+};
+
+/* This is called by ipa_setup(). We can be informed via remoteproc that
+ * the modem has shut down, in which case this function will be called
+ * again to prepare for it coming back up again.
+ */
+int ipa_qmi_setup(struct ipa *ipa)
+{
+ struct ipa_qmi *ipa_qmi = &ipa->qmi;
+ int ret;
+
+ ipa_qmi->initial_boot = true;
+
+ /* The server handle is used to handle the DRIVER_INIT_COMPLETE
+ * request on the first modem boot. It also receives the
+ * INDICATION_REGISTER request on the first boot and (optionally)
+ * subsequent boots. The INIT_COMPLETE indication message is
+ * sent over the server handle if requested.
+ */
+ ret = qmi_handle_init(&ipa_qmi->server_handle,
+ IPA_QMI_SERVER_MAX_RCV_SZ, &ipa_server_ops,
+ ipa_server_msg_handlers);
+ if (ret)
+ return ret;
+
+ ret = qmi_add_server(&ipa_qmi->server_handle, IPA_HOST_SERVICE_SVC_ID,
+ IPA_HOST_SVC_VERS, IPA_HOST_SERVICE_INS_ID);
+ if (ret)
+ goto err_server_handle_release;
+
+ /* The client handle is only used for sending an INIT_DRIVER request
+ * to the modem, and receiving its response message.
+ */
+ ret = qmi_handle_init(&ipa_qmi->client_handle,
+ IPA_QMI_CLIENT_MAX_RCV_SZ, &ipa_client_ops,
+ ipa_client_msg_handlers);
+ if (ret)
+ goto err_server_handle_release;
+
+ /* We need this ready before the service lookup is added */
+ INIT_WORK(&ipa_qmi->init_driver_work, ipa_client_init_driver_work);
+
+ ret = qmi_add_lookup(&ipa_qmi->client_handle, IPA_MODEM_SERVICE_SVC_ID,
+ IPA_MODEM_SVC_VERS, IPA_MODEM_SERVICE_INS_ID);
+ if (ret)
+ goto err_client_handle_release;
+
+ return 0;
+
+err_client_handle_release:
+ /* Releasing the handle also removes registered lookups */
+ qmi_handle_release(&ipa_qmi->client_handle);
+ memset(&ipa_qmi->client_handle, 0, sizeof(ipa_qmi->client_handle));
+err_server_handle_release:
+ /* Releasing the handle also removes registered services */
+ qmi_handle_release(&ipa_qmi->server_handle);
+ memset(&ipa_qmi->server_handle, 0, sizeof(ipa_qmi->server_handle));
+
+ return ret;
+}
+
+void ipa_qmi_teardown(struct ipa *ipa)
+{
+ cancel_work_sync(&ipa->qmi.init_driver_work);
+
+ qmi_handle_release(&ipa->qmi.client_handle);
+ memset(&ipa->qmi.client_handle, 0, sizeof(ipa->qmi.client_handle));
+
+ qmi_handle_release(&ipa->qmi.server_handle);
+ memset(&ipa->qmi.server_handle, 0, sizeof(ipa->qmi.server_handle));
+}
diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h
new file mode 100644
index 000000000000..3993687593d0
--- /dev/null
+++ b/drivers/net/ipa/ipa_qmi.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_QMI_H_
+#define _IPA_QMI_H_
+
+#include <linux/types.h>
+#include <linux/soc/qcom/qmi.h>
+
+struct ipa;
+
+/**
+ * struct ipa_qmi - QMI state associated with an IPA
+ * @client_handle - used to send an QMI requests to the modem
+ * @server_handle - used to handle QMI requests from the modem
+ * @initialized - whether QMI initialization has completed
+ * @indication_register_received - tracks modem request receipt
+ * @init_driver_response_received - tracks modem response receipt
+ */
+struct ipa_qmi {
+ struct qmi_handle client_handle;
+ struct qmi_handle server_handle;
+
+ /* Information used for the client handle */
+ struct sockaddr_qrtr modem_sq;
+ struct work_struct init_driver_work;
+
+ /* Flags used in negotiating readiness */
+ bool initial_boot;
+ bool uc_ready;
+ bool modem_ready;
+ bool indication_requested;
+ bool indication_sent;
+};
+
+int ipa_qmi_setup(struct ipa *ipa);
+void ipa_qmi_teardown(struct ipa *ipa);
+
+#endif /* !_IPA_QMI_H_ */
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
new file mode 100644
index 000000000000..03a1d0e55964
--- /dev/null
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#include <linux/stddef.h>
+#include <linux/soc/qcom/qmi.h>
+
+#include "ipa_qmi_msg.h"
+
+/* QMI message structure definition for struct ipa_indication_register_req */
+struct qmi_elem_info ipa_indication_register_req_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ master_driver_init_complete_valid),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_indication_register_req,
+ master_driver_init_complete_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ master_driver_init_complete),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_indication_register_req,
+ master_driver_init_complete),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ data_usage_quota_reached_valid),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_indication_register_req,
+ data_usage_quota_reached_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ data_usage_quota_reached),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_indication_register_req,
+ data_usage_quota_reached),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ ipa_mhi_ready_ind_valid),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_indication_register_req,
+ ipa_mhi_ready_ind_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_req,
+ ipa_mhi_ready_ind),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_indication_register_req,
+ ipa_mhi_ready_ind),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_indication_register_rsp */
+struct qmi_elem_info ipa_indication_register_rsp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_indication_register_rsp,
+ rsp),
+ .tlv_type = 0x02,
+ .offset = offsetof(struct ipa_indication_register_rsp,
+ rsp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_driver_init_complete_req */
+struct qmi_elem_info ipa_driver_init_complete_req_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_driver_init_complete_req,
+ status),
+ .tlv_type = 0x01,
+ .offset = offsetof(struct ipa_driver_init_complete_req,
+ status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_driver_init_complete_rsp */
+struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_driver_init_complete_rsp,
+ rsp),
+ .tlv_type = 0x02,
+ .elem_size = offsetof(struct ipa_driver_init_complete_rsp,
+ rsp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_init_complete_ind */
+struct qmi_elem_info ipa_init_complete_ind_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_complete_ind,
+ status),
+ .tlv_type = 0x02,
+ .elem_size = offsetof(struct ipa_init_complete_ind,
+ status),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_mem_bounds */
+struct qmi_elem_info ipa_mem_bounds_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_bounds, start),
+ .offset = offsetof(struct ipa_mem_bounds, start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_bounds, end),
+ .offset = offsetof(struct ipa_mem_bounds, end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_mem_array */
+struct qmi_elem_info ipa_mem_array_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_array, start),
+ .offset = offsetof(struct ipa_mem_array, start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_array, count),
+ .offset = offsetof(struct ipa_mem_array, count),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_mem_range */
+struct qmi_elem_info ipa_mem_range_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_range, start),
+ .offset = offsetof(struct ipa_mem_range, start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_mem_range, size),
+ .offset = offsetof(struct ipa_mem_range, size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_init_modem_driver_req */
+struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ platform_type_valid),
+ .tlv_type = 0x10,
+ .elem_size = offsetof(struct ipa_init_modem_driver_req,
+ platform_type_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ platform_type),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ platform_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hdr_tbl_info_valid),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hdr_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hdr_tbl_info),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hdr_tbl_info),
+ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info_valid),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_route_tbl_info),
+ .ei_array = ipa_mem_array_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info_valid),
+ .tlv_type = 0x13,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info),
+ .tlv_type = 0x13,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_route_tbl_info),
+ .ei_array = ipa_mem_array_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_filter_tbl_start_valid),
+ .tlv_type = 0x14,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_filter_tbl_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_filter_tbl_start),
+ .tlv_type = 0x14,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_filter_tbl_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_filter_tbl_start_valid),
+ .tlv_type = 0x15,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_filter_tbl_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_filter_tbl_start),
+ .tlv_type = 0x15,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_filter_tbl_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ modem_mem_info_valid),
+ .tlv_type = 0x16,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ modem_mem_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ modem_mem_info),
+ .tlv_type = 0x16,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ modem_mem_info),
+ .ei_array = ipa_mem_range_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ ctrl_comm_dest_end_pt_valid),
+ .tlv_type = 0x17,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ ctrl_comm_dest_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ ctrl_comm_dest_end_pt),
+ .tlv_type = 0x17,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ ctrl_comm_dest_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ skip_uc_load_valid),
+ .tlv_type = 0x18,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ skip_uc_load_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ skip_uc_load),
+ .tlv_type = 0x18,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ skip_uc_load),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hdr_proc_ctx_tbl_info_valid),
+ .tlv_type = 0x19,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hdr_proc_ctx_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hdr_proc_ctx_tbl_info),
+ .tlv_type = 0x19,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hdr_proc_ctx_tbl_info),
+ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ zip_tbl_info_valid),
+ .tlv_type = 0x1a,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ zip_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ zip_tbl_info),
+ .tlv_type = 0x1a,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ zip_tbl_info),
+ .ei_array = ipa_mem_bounds_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info_valid),
+ .tlv_type = 0x1b,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info),
+ .tlv_type = 0x1b,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_route_tbl_info),
+ .ei_array = ipa_mem_array_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info_valid),
+ .tlv_type = 0x1c,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info),
+ .tlv_type = 0x1c,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_route_tbl_info),
+ .ei_array = ipa_mem_array_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_hash_filter_tbl_start_valid),
+ .tlv_type = 0x1d,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_filter_tbl_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v4_hash_filter_tbl_start),
+ .tlv_type = 0x1d,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v4_hash_filter_tbl_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_hash_filter_tbl_start_valid),
+ .tlv_type = 0x1e,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_filter_tbl_start_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ v6_hash_filter_tbl_start),
+ .tlv_type = 0x1e,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ v6_hash_filter_tbl_start),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_quota_base_addr_valid),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_quota_base_addr_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_quota_base_addr),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_quota_base_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_quota_size_valid),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_quota_size_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_quota_size),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_quota_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_drop_size_valid),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_drop_size_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_req,
+ hw_stats_drop_size),
+ .tlv_type = 0x1f,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
+ hw_stats_drop_size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
+
+/* QMI message structure definition for struct ipa_init_modem_driver_rsp */
+struct qmi_elem_info ipa_init_modem_driver_rsp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ rsp),
+ .tlv_type = 0x02,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ rsp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ ctrl_comm_dest_end_pt_valid),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ ctrl_comm_dest_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ ctrl_comm_dest_end_pt),
+ .tlv_type = 0x10,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ ctrl_comm_dest_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ default_end_pt_valid),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ default_end_pt_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ default_end_pt),
+ .tlv_type = 0x11,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ default_end_pt),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ modem_driver_init_pending_valid),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ modem_driver_init_pending_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size =
+ sizeof_field(struct ipa_init_modem_driver_rsp,
+ modem_driver_init_pending),
+ .tlv_type = 0x12,
+ .offset = offsetof(struct ipa_init_modem_driver_rsp,
+ modem_driver_init_pending),
+ },
+ {
+ .data_type = QMI_EOTI,
+ },
+};
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
new file mode 100644
index 000000000000..cfac456cea0c
--- /dev/null
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_QMI_MSG_H_
+#define _IPA_QMI_MSG_H_
+
+/* === Only "ipa_qmi" and "ipa_qmi_msg.c" should include this file === */
+
+#include <linux/types.h>
+#include <linux/soc/qcom/qmi.h>
+
+/* Request/response/indication QMI message ids used for IPA. Receiving
+ * end issues a response for requests; indications require no response.
+ */
+#define IPA_QMI_INDICATION_REGISTER 0x20 /* modem -> AP request */
+#define IPA_QMI_INIT_DRIVER 0x21 /* AP -> modem request */
+#define IPA_QMI_INIT_COMPLETE 0x22 /* AP -> modem indication */
+#define IPA_QMI_DRIVER_INIT_COMPLETE 0x35 /* modem -> AP request */
+
+/* The maximum size required for message types. These sizes include
+ * the message data, along with type (1 byte) and length (2 byte)
+ * information for each field. The qmi_send_*() interfaces require
+ * the message size to be provided.
+ */
+#define IPA_QMI_INDICATION_REGISTER_REQ_SZ 12 /* -> server handle */
+#define IPA_QMI_INDICATION_REGISTER_RSP_SZ 7 /* <- server handle */
+#define IPA_QMI_INIT_DRIVER_REQ_SZ 162 /* client handle -> */
+#define IPA_QMI_INIT_DRIVER_RSP_SZ 25 /* client handle <- */
+#define IPA_QMI_INIT_COMPLETE_IND_SZ 7 /* <- server handle */
+#define IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ 4 /* -> server handle */
+#define IPA_QMI_DRIVER_INIT_COMPLETE_RSP_SZ 7 /* <- server handle */
+
+/* Maximum size of messages we expect the AP to receive (max of above) */
+#define IPA_QMI_SERVER_MAX_RCV_SZ 8
+#define IPA_QMI_CLIENT_MAX_RCV_SZ 25
+
+/* Request message for the IPA_QMI_INDICATION_REGISTER request */
+struct ipa_indication_register_req {
+ u8 master_driver_init_complete_valid;
+ u8 master_driver_init_complete;
+ u8 data_usage_quota_reached_valid;
+ u8 data_usage_quota_reached;
+ u8 ipa_mhi_ready_ind_valid;
+ u8 ipa_mhi_ready_ind;
+};
+
+/* The response to a IPA_QMI_INDICATION_REGISTER request consists only of
+ * a standard QMI response.
+ */
+struct ipa_indication_register_rsp {
+ struct qmi_response_type_v01 rsp;
+};
+
+/* Request message for the IPA_QMI_DRIVER_INIT_COMPLETE request */
+struct ipa_driver_init_complete_req {
+ u8 status;
+};
+
+/* The response to a IPA_QMI_DRIVER_INIT_COMPLETE request consists only
+ * of a standard QMI response.
+ */
+struct ipa_driver_init_complete_rsp {
+ struct qmi_response_type_v01 rsp;
+};
+
+/* The message for the IPA_QMI_INIT_COMPLETE_IND indication consists
+ * only of a standard QMI response.
+ */
+struct ipa_init_complete_ind {
+ struct qmi_response_type_v01 status;
+};
+
+/* The AP tells the modem its platform type. We assume Android. */
+enum ipa_platform_type {
+ IPA_QMI_PLATFORM_TYPE_INVALID = 0, /* Invalid */
+ IPA_QMI_PLATFORM_TYPE_TN = 1, /* Data card */
+ IPA_QMI_PLATFORM_TYPE_LE = 2, /* Data router */
+ IPA_QMI_PLATFORM_TYPE_MSM_ANDROID = 3, /* Android MSM */
+ IPA_QMI_PLATFORM_TYPE_MSM_WINDOWS = 4, /* Windows MSM */
+ IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 5, /* QNX MSM */
+};
+
+/* This defines the start and end offset of a range of memory. Both
+ * fields are offsets relative to the start of IPA shared memory.
+ * The end value is the last addressable byte *within* the range.
+ */
+struct ipa_mem_bounds {
+ u32 start;
+ u32 end;
+};
+
+/* This defines the location and size of an array. The start value
+ * is an offset relative to the start of IPA shared memory. The
+ * size of the array is implied by the number of entries (the entry
+ * size is assumed to be known).
+ */
+struct ipa_mem_array {
+ u32 start;
+ u32 count;
+};
+
+/* This defines the location and size of a range of memory. The
+ * start is an offset relative to the start of IPA shared memory.
+ * This differs from the ipa_mem_bounds structure in that the size
+ * (in bytes) of the memory region is specified rather than the
+ * offset of its last byte.
+ */
+struct ipa_mem_range {
+ u32 start;
+ u32 size;
+};
+
+/* The message for the IPA_QMI_INIT_DRIVER request contains information
+ * from the AP that affects modem initialization.
+ */
+struct ipa_init_modem_driver_req {
+ u8 platform_type_valid;
+ u32 platform_type; /* enum ipa_platform_type */
+
+ /* Modem header table information. This defines the IPA shared
+ * memory in which the modem may insert header table entries.
+ */
+ u8 hdr_tbl_info_valid;
+ struct ipa_mem_bounds hdr_tbl_info;
+
+ /* Routing table information. These define the location and size of
+ * non-hashable IPv4 and IPv6 filter tables. The start values are
+ * offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_route_tbl_info_valid;
+ struct ipa_mem_array v4_route_tbl_info;
+ u8 v6_route_tbl_info_valid;
+ struct ipa_mem_array v6_route_tbl_info;
+
+ /* Filter table information. These define the location of the
+ * non-hashable IPv4 and IPv6 filter tables. The start values are
+ * offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_filter_tbl_start_valid;
+ u32 v4_filter_tbl_start;
+ u8 v6_filter_tbl_start_valid;
+ u32 v6_filter_tbl_start;
+
+ /* Modem memory information. This defines the location and
+ * size of memory available for the modem to use.
+ */
+ u8 modem_mem_info_valid;
+ struct ipa_mem_range modem_mem_info;
+
+ /* This defines the destination endpoint on the AP to which
+ * the modem driver can send control commands. Must be less
+ * than ipa_endpoint_max().
+ */
+ u8 ctrl_comm_dest_end_pt_valid;
+ u32 ctrl_comm_dest_end_pt;
+
+ /* This defines whether the modem should load the microcontroller
+ * or not. It is unnecessary to reload it if the modem is being
+ * restarted.
+ *
+ * NOTE: this field is named "is_ssr_bootup" elsewhere.
+ */
+ u8 skip_uc_load_valid;
+ u8 skip_uc_load;
+
+ /* Processing context memory information. This defines the memory in
+ * which the modem may insert header processing context table entries.
+ */
+ u8 hdr_proc_ctx_tbl_info_valid;
+ struct ipa_mem_bounds hdr_proc_ctx_tbl_info;
+
+ /* Compression command memory information. This defines the memory
+ * in which the modem may insert compression/decompression commands.
+ */
+ u8 zip_tbl_info_valid;
+ struct ipa_mem_bounds zip_tbl_info;
+
+ /* Routing table information. These define the location and size
+ * of hashable IPv4 and IPv6 filter tables. The start values are
+ * offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_hash_route_tbl_info_valid;
+ struct ipa_mem_array v4_hash_route_tbl_info;
+ u8 v6_hash_route_tbl_info_valid;
+ struct ipa_mem_array v6_hash_route_tbl_info;
+
+ /* Filter table information. These define the location and size
+ * of hashable IPv4 and IPv6 filter tables. The start values are
+ * offsets relative to the start of IPA shared memory.
+ */
+ u8 v4_hash_filter_tbl_start_valid;
+ u32 v4_hash_filter_tbl_start;
+ u8 v6_hash_filter_tbl_start_valid;
+ u32 v6_hash_filter_tbl_start;
+
+ /* Statistics information. These define the locations of the
+ * first and last statistics sub-regions. (IPA v4.0 and above)
+ */
+ u8 hw_stats_quota_base_addr_valid;
+ u32 hw_stats_quota_base_addr;
+ u8 hw_stats_quota_size_valid;
+ u32 hw_stats_quota_size;
+ u8 hw_stats_drop_base_addr_valid;
+ u32 hw_stats_drop_base_addr;
+ u8 hw_stats_drop_size_valid;
+ u32 hw_stats_drop_size;
+};
+
+/* The response to a IPA_QMI_INIT_DRIVER request begins with a standard
+ * QMI response, but contains other information as well. Currently we
+ * simply wait for the the INIT_DRIVER transaction to complete and
+ * ignore any other data that might be returned.
+ */
+struct ipa_init_modem_driver_rsp {
+ struct qmi_response_type_v01 rsp;
+
+ /* This defines the destination endpoint on the modem to which
+ * the AP driver can send control commands. Must be less than
+ * ipa_endpoint_max().
+ */
+ u8 ctrl_comm_dest_end_pt_valid;
+ u32 ctrl_comm_dest_end_pt;
+
+ /* This defines the default endpoint. The AP driver is not
+ * required to configure the hardware with this value. Must
+ * be less than ipa_endpoint_max().
+ */
+ u8 default_end_pt_valid;
+ u32 default_end_pt;
+
+ /* This defines whether a second handshake is required to complete
+ * initialization.
+ */
+ u8 modem_driver_init_pending_valid;
+ u8 modem_driver_init_pending;
+};
+
+/* Message structure definitions defined in "ipa_qmi_msg.c" */
+extern struct qmi_elem_info ipa_indication_register_req_ei[];
+extern struct qmi_elem_info ipa_indication_register_rsp_ei[];
+extern struct qmi_elem_info ipa_driver_init_complete_req_ei[];
+extern struct qmi_elem_info ipa_driver_init_complete_rsp_ei[];
+extern struct qmi_elem_info ipa_init_complete_ind_ei[];
+extern struct qmi_elem_info ipa_mem_bounds_ei[];
+extern struct qmi_elem_info ipa_mem_array_ei[];
+extern struct qmi_elem_info ipa_mem_range_ei[];
+extern struct qmi_elem_info ipa_init_modem_driver_req_ei[];
+extern struct qmi_elem_info ipa_init_modem_driver_rsp_ei[];
+
+#endif /* !_IPA_QMI_MSG_H_ */
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
new file mode 100644
index 000000000000..e6147a1cd787
--- /dev/null
+++ b/drivers/net/ipa/ipa_reg.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/io.h>
+
+#include "ipa.h"
+#include "ipa_reg.h"
+
+int ipa_reg_init(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ struct resource *res;
+
+ /* Setup IPA register memory */
+ res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
+ "ipa-reg");
+ if (!res) {
+ dev_err(dev, "DT error getting \"ipa-reg\" memory property\n");
+ return -ENODEV;
+ }
+
+ ipa->reg_virt = ioremap(res->start, resource_size(res));
+ if (!ipa->reg_virt) {
+ dev_err(dev, "unable to remap \"ipa-reg\" memory\n");
+ return -ENOMEM;
+ }
+ ipa->reg_addr = res->start;
+
+ return 0;
+}
+
+void ipa_reg_exit(struct ipa *ipa)
+{
+ iounmap(ipa->reg_virt);
+}
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
new file mode 100644
index 000000000000..3b8106aa277a
--- /dev/null
+++ b/drivers/net/ipa/ipa_reg.h
@@ -0,0 +1,476 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+#ifndef _IPA_REG_H_
+#define _IPA_REG_H_
+
+#include <linux/bitfield.h>
+
+#include "ipa_version.h"
+
+struct ipa;
+
+/**
+ * DOC: IPA Registers
+ *
+ * IPA registers are located within the "ipa-reg" address space defined by
+ * Device Tree. The offset of each register within that space is specified
+ * by symbols defined below. The address space is mapped to virtual memory
+ * space in ipa_mem_init(). All IPA registers are 32 bits wide.
+ *
+ * Certain register types are duplicated for a number of instances of
+ * something. For example, each IPA endpoint has an set of registers
+ * defining its configuration. The offset to an endpoint's set of registers
+ * is computed based on an "base" offset, plus an endpoint's ID multiplied
+ * and a "stride" value for the register. For such registers, the offset is
+ * computed by a function-like macro that takes a parameter used in the
+ * computation.
+ *
+ * Some register offsets depend on execution environment. For these an "ee"
+ * parameter is supplied to the offset macro. The "ee" value is a member of
+ * the gsi_ee enumerated type.
+ *
+ * The offset of a register dependent on endpoint id is computed by a macro
+ * that is supplied a parameter "ep". The "ep" value is assumed to be less
+ * than the maximum endpoint value for the current hardware, and that will
+ * not exceed IPA_ENDPOINT_MAX.
+ *
+ * The offset of registers related to filter and route tables is computed
+ * by a macro that is supplied a parameter "er". The "er" represents an
+ * endpoint ID for filters, or a route ID for routes. For filters, the
+ * endpoint ID must be less than IPA_ENDPOINT_MAX, but is further restricted
+ * because not all endpoints support filtering. For routes, the route ID
+ * must be less than IPA_ROUTE_MAX.
+ *
+ * The offset of registers related to resource types is computed by a macro
+ * that is supplied a parameter "rt". The "rt" represents a resource type,
+ * which is is a member of the ipa_resource_type_src enumerated type for
+ * source endpoint resources or the ipa_resource_type_dst enumerated type
+ * for destination endpoint resources.
+ *
+ * Some registers encode multiple fields within them. For these, each field
+ * has a symbol below defining a field mask that encodes both the position
+ * and width of the field within its register.
+ *
+ * In some cases, different versions of IPA hardware use different offset or
+ * field mask values. In such cases an inline_function(ipa) is used rather
+ * than a MACRO to define the offset or field mask to use.
+ *
+ * Finally, some registers hold bitmasks representing endpoints. In such
+ * cases the @available field in the @ipa structure defines the "full" set
+ * of valid bits for the register.
+ */
+
+#define IPA_REG_ENABLED_PIPES_OFFSET 0x00000038
+
+#define IPA_REG_COMP_CFG_OFFSET 0x0000003c
+#define ENABLE_FMASK GENMASK(0, 0)
+#define GSI_SNOC_BYPASS_DIS_FMASK GENMASK(1, 1)
+#define GEN_QMB_0_SNOC_BYPASS_DIS_FMASK GENMASK(2, 2)
+#define GEN_QMB_1_SNOC_BYPASS_DIS_FMASK GENMASK(3, 3)
+#define IPA_DCMP_FAST_CLK_EN_FMASK GENMASK(4, 4)
+#define IPA_QMB_SELECT_CONS_EN_FMASK GENMASK(5, 5)
+#define IPA_QMB_SELECT_PROD_EN_FMASK GENMASK(6, 6)
+#define GSI_MULTI_INORDER_RD_DIS_FMASK GENMASK(7, 7)
+#define GSI_MULTI_INORDER_WR_DIS_FMASK GENMASK(8, 8)
+#define GEN_QMB_0_MULTI_INORDER_RD_DIS_FMASK GENMASK(9, 9)
+#define GEN_QMB_1_MULTI_INORDER_RD_DIS_FMASK GENMASK(10, 10)
+#define GEN_QMB_0_MULTI_INORDER_WR_DIS_FMASK GENMASK(11, 11)
+#define GEN_QMB_1_MULTI_INORDER_WR_DIS_FMASK GENMASK(12, 12)
+#define GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS_FMASK GENMASK(13, 13)
+#define GSI_SNOC_CNOC_LOOP_PROT_DISABLE_FMASK GENMASK(14, 14)
+#define GSI_MULTI_AXI_MASTERS_DIS_FMASK GENMASK(15, 15)
+#define IPA_QMB_SELECT_GLOBAL_EN_FMASK GENMASK(16, 16)
+#define IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_FMASK GENMASK(20, 17)
+
+#define IPA_REG_CLKON_CFG_OFFSET 0x00000044
+#define RX_FMASK GENMASK(0, 0)
+#define PROC_FMASK GENMASK(1, 1)
+#define TX_WRAPPER_FMASK GENMASK(2, 2)
+#define MISC_FMASK GENMASK(3, 3)
+#define RAM_ARB_FMASK GENMASK(4, 4)
+#define FTCH_HPS_FMASK GENMASK(5, 5)
+#define FTCH_DPS_FMASK GENMASK(6, 6)
+#define HPS_FMASK GENMASK(7, 7)
+#define DPS_FMASK GENMASK(8, 8)
+#define RX_HPS_CMDQS_FMASK GENMASK(9, 9)
+#define HPS_DPS_CMDQS_FMASK GENMASK(10, 10)
+#define DPS_TX_CMDQS_FMASK GENMASK(11, 11)
+#define RSRC_MNGR_FMASK GENMASK(12, 12)
+#define CTX_HANDLER_FMASK GENMASK(13, 13)
+#define ACK_MNGR_FMASK GENMASK(14, 14)
+#define D_DCPH_FMASK GENMASK(15, 15)
+#define H_DCPH_FMASK GENMASK(16, 16)
+#define DCMP_FMASK GENMASK(17, 17)
+#define NTF_TX_CMDQS_FMASK GENMASK(18, 18)
+#define TX_0_FMASK GENMASK(19, 19)
+#define TX_1_FMASK GENMASK(20, 20)
+#define FNR_FMASK GENMASK(21, 21)
+#define QSB2AXI_CMDQ_L_FMASK GENMASK(22, 22)
+#define AGGR_WRAPPER_FMASK GENMASK(23, 23)
+#define RAM_SLAVEWAY_FMASK GENMASK(24, 24)
+#define QMB_FMASK GENMASK(25, 25)
+#define WEIGHT_ARB_FMASK GENMASK(26, 26)
+#define GSI_IF_FMASK GENMASK(27, 27)
+#define GLOBAL_FMASK GENMASK(28, 28)
+#define GLOBAL_2X_CLK_FMASK GENMASK(29, 29)
+
+#define IPA_REG_ROUTE_OFFSET 0x00000048
+#define ROUTE_DIS_FMASK GENMASK(0, 0)
+#define ROUTE_DEF_PIPE_FMASK GENMASK(5, 1)
+#define ROUTE_DEF_HDR_TABLE_FMASK GENMASK(6, 6)
+#define ROUTE_DEF_HDR_OFST_FMASK GENMASK(16, 7)
+#define ROUTE_FRAG_DEF_PIPE_FMASK GENMASK(21, 17)
+#define ROUTE_DEF_RETAIN_HDR_FMASK GENMASK(24, 24)
+
+#define IPA_REG_SHARED_MEM_SIZE_OFFSET 0x00000054
+#define SHARED_MEM_SIZE_FMASK GENMASK(15, 0)
+#define SHARED_MEM_BADDR_FMASK GENMASK(31, 16)
+
+#define IPA_REG_QSB_MAX_WRITES_OFFSET 0x00000074
+#define GEN_QMB_0_MAX_WRITES_FMASK GENMASK(3, 0)
+#define GEN_QMB_1_MAX_WRITES_FMASK GENMASK(7, 4)
+
+#define IPA_REG_QSB_MAX_READS_OFFSET 0x00000078
+#define GEN_QMB_0_MAX_READS_FMASK GENMASK(3, 0)
+#define GEN_QMB_1_MAX_READS_FMASK GENMASK(7, 4)
+/* The next two fields are present for IPA v4.0 and above */
+#define GEN_QMB_0_MAX_READS_BEATS_FMASK GENMASK(23, 16)
+#define GEN_QMB_1_MAX_READS_BEATS_FMASK GENMASK(31, 24)
+
+static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version)
+{
+ if (version == IPA_VERSION_3_5_1)
+ return 0x0000010c;
+
+ return 0x000000b4;
+}
+/* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */
+
+/* The next register is present for IPA v4.2 and above */
+#define IPA_REG_FILT_ROUT_HASH_EN_OFFSET 0x00000148
+#define IPV6_ROUTER_HASH_EN GENMASK(0, 0)
+#define IPV6_FILTER_HASH_EN GENMASK(4, 4)
+#define IPV4_ROUTER_HASH_EN GENMASK(8, 8)
+#define IPV4_FILTER_HASH_EN GENMASK(12, 12)
+
+static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version)
+{
+ if (version == IPA_VERSION_3_5_1)
+ return 0x0000090;
+
+ return 0x000014c;
+}
+
+#define IPV6_ROUTER_HASH_FLUSH GENMASK(0, 0)
+#define IPV6_FILTER_HASH_FLUSH GENMASK(4, 4)
+#define IPV4_ROUTER_HASH_FLUSH GENMASK(8, 8)
+#define IPV4_FILTER_HASH_FLUSH GENMASK(12, 12)
+
+#define IPA_REG_BCR_OFFSET 0x000001d0
+#define BCR_CMDQ_L_LACK_ONE_ENTRY BIT(0)
+#define BCR_TX_NOT_USING_BRESP BIT(1)
+#define BCR_SUSPEND_L2_IRQ BIT(3)
+#define BCR_HOLB_DROP_L2_IRQ BIT(4)
+#define BCR_DUAL_TX BIT(5)
+
+/* Backward compatibility register value to use for each version */
+static inline u32 ipa_reg_bcr_val(enum ipa_version version)
+{
+ if (version == IPA_VERSION_3_5_1)
+ return BCR_CMDQ_L_LACK_ONE_ENTRY | BCR_TX_NOT_USING_BRESP |
+ BCR_SUSPEND_L2_IRQ | BCR_HOLB_DROP_L2_IRQ | BCR_DUAL_TX;
+
+ if (version == IPA_VERSION_4_0 || version == IPA_VERSION_4_1)
+ return BCR_CMDQ_L_LACK_ONE_ENTRY | BCR_SUSPEND_L2_IRQ |
+ BCR_HOLB_DROP_L2_IRQ | BCR_DUAL_TX;
+
+ return 0x00000000;
+}
+
+
+#define IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET 0x000001e8
+
+#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec
+/* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */
+
+#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0
+#define AGGR_GRANULARITY GENMASK(8, 4)
+/* Compute the value to use in the AGGR_GRANULARITY field representing
+ * the given number of microseconds (up to 1 millisecond).
+ * x = (32 * usec) / 1000 - 1
+ */
+static inline u32 ipa_aggr_granularity_val(u32 microseconds)
+{
+ /* assert(microseconds >= 16); (?) */
+ /* assert(microseconds <= 1015); */
+
+ return DIV_ROUND_CLOSEST(32 * microseconds, 1000) - 1;
+}
+
+#define IPA_REG_TX_CFG_OFFSET 0x000001fc
+/* The first three fields are present for IPA v3.5.1 only */
+#define TX0_PREFETCH_DISABLE GENMASK(0, 0)
+#define TX1_PREFETCH_DISABLE GENMASK(1, 1)
+#define PREFETCH_ALMOST_EMPTY_SIZE GENMASK(4, 2)
+/* The next fields are present for IPA v4.0 and above */
+#define PREFETCH_ALMOST_EMPTY_SIZE_TX0 GENMASK(5, 2)
+#define DMAW_SCND_OUTSD_PRED_THRESHOLD GENMASK(9, 6)
+#define DMAW_SCND_OUTSD_PRED_EN GENMASK(10, 10)
+#define DMAW_MAX_BEATS_256_DIS GENMASK(11, 11)
+#define PA_MASK_EN GENMASK(12, 12)
+#define PREFETCH_ALMOST_EMPTY_SIZE_TX1 GENMASK(16, 13)
+/* The last two fields are present for IPA v4.2 and above */
+#define SSPND_PA_NO_START_STATE GENMASK(18, 18)
+#define SSPND_PA_NO_BQ_STATE GENMASK(19, 19)
+
+#define IPA_REG_FLAVOR_0_OFFSET 0x00000210
+#define BAM_MAX_PIPES_FMASK GENMASK(4, 0)
+#define BAM_MAX_CONS_PIPES_FMASK GENMASK(12, 8)
+#define BAM_MAX_PROD_PIPES_FMASK GENMASK(20, 16)
+#define BAM_PROD_LOWEST_FMASK GENMASK(27, 24)
+
+static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
+{
+ if (version == IPA_VERSION_4_2)
+ return 0x00000240;
+
+ return 0x00000220;
+}
+
+#define ENTER_IDLE_DEBOUNCE_THRESH_FMASK GENMASK(15, 0)
+#define CONST_NON_IDLE_ENABLE_FMASK GENMASK(16, 16)
+
+#define IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000400 + 0x0020 * (rt))
+#define IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000404 + 0x0020 * (rt))
+#define IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000408 + 0x0020 * (rt))
+#define IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000500 + 0x0020 * (rt))
+#define IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000504 + 0x0020 * (rt))
+#define IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
+ (0x00000508 + 0x0020 * (rt))
+#define X_MIN_LIM_FMASK GENMASK(5, 0)
+#define X_MAX_LIM_FMASK GENMASK(13, 8)
+#define Y_MIN_LIM_FMASK GENMASK(21, 16)
+#define Y_MAX_LIM_FMASK GENMASK(29, 24)
+
+#define IPA_REG_ENDP_INIT_CTRL_N_OFFSET(ep) \
+ (0x00000800 + 0x0070 * (ep))
+#define ENDP_SUSPEND_FMASK GENMASK(0, 0)
+#define ENDP_DELAY_FMASK GENMASK(1, 1)
+
+#define IPA_REG_ENDP_INIT_CFG_N_OFFSET(ep) \
+ (0x00000808 + 0x0070 * (ep))
+#define FRAG_OFFLOAD_EN_FMASK GENMASK(0, 0)
+#define CS_OFFLOAD_EN_FMASK GENMASK(2, 1)
+#define CS_METADATA_HDR_OFFSET_FMASK GENMASK(6, 3)
+#define CS_GEN_QMB_MASTER_SEL_FMASK GENMASK(8, 8)
+
+#define IPA_REG_ENDP_INIT_HDR_N_OFFSET(ep) \
+ (0x00000810 + 0x0070 * (ep))
+#define HDR_LEN_FMASK GENMASK(5, 0)
+#define HDR_OFST_METADATA_VALID_FMASK GENMASK(6, 6)
+#define HDR_OFST_METADATA_FMASK GENMASK(12, 7)
+#define HDR_ADDITIONAL_CONST_LEN_FMASK GENMASK(18, 13)
+#define HDR_OFST_PKT_SIZE_VALID_FMASK GENMASK(19, 19)
+#define HDR_OFST_PKT_SIZE_FMASK GENMASK(25, 20)
+#define HDR_A5_MUX_FMASK GENMASK(26, 26)
+#define HDR_LEN_INC_DEAGG_HDR_FMASK GENMASK(27, 27)
+#define HDR_METADATA_REG_VALID_FMASK GENMASK(28, 28)
+
+#define IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(ep) \
+ (0x00000814 + 0x0070 * (ep))
+#define HDR_ENDIANNESS_FMASK GENMASK(0, 0)
+#define HDR_TOTAL_LEN_OR_PAD_VALID_FMASK GENMASK(1, 1)
+#define HDR_TOTAL_LEN_OR_PAD_FMASK GENMASK(2, 2)
+#define HDR_PAYLOAD_LEN_INC_PADDING_FMASK GENMASK(3, 3)
+#define HDR_TOTAL_LEN_OR_PAD_OFFSET_FMASK GENMASK(9, 4)
+#define HDR_PAD_TO_ALIGNMENT_FMASK GENMASK(13, 10)
+
+#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(ep) \
+ (0x00000818 + 0x0070 * (ep))
+
+#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(ep) \
+ (0x00000820 + 0x0070 * (ep))
+#define MODE_FMASK GENMASK(2, 0)
+#define DEST_PIPE_INDEX_FMASK GENMASK(8, 4)
+#define BYTE_THRESHOLD_FMASK GENMASK(27, 12)
+#define PIPE_REPLICATION_EN_FMASK GENMASK(28, 28)
+#define PAD_EN_FMASK GENMASK(29, 29)
+#define HDR_FTCH_DISABLE_FMASK GENMASK(30, 30)
+
+#define IPA_REG_ENDP_INIT_AGGR_N_OFFSET(ep) \
+ (0x00000824 + 0x0070 * (ep))
+#define AGGR_EN_FMASK GENMASK(1, 0)
+#define AGGR_TYPE_FMASK GENMASK(4, 2)
+#define AGGR_BYTE_LIMIT_FMASK GENMASK(9, 5)
+#define AGGR_TIME_LIMIT_FMASK GENMASK(14, 10)
+#define AGGR_PKT_LIMIT_FMASK GENMASK(20, 15)
+#define AGGR_SW_EOF_ACTIVE_FMASK GENMASK(21, 21)
+#define AGGR_FORCE_CLOSE_FMASK GENMASK(22, 22)
+#define AGGR_HARD_BYTE_LIMIT_ENABLE_FMASK GENMASK(24, 24)
+
+#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(ep) \
+ (0x0000082c + 0x0070 * (ep))
+#define HOL_BLOCK_EN_FMASK GENMASK(0, 0)
+
+/* The next register is valid only for RX (IPA producer) endpoints */
+#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(ep) \
+ (0x00000830 + 0x0070 * (ep))
+/* The next fields are present for IPA v4.2 only */
+#define BASE_VALUE_FMASK GENMASK(4, 0)
+#define SCALE_FMASK GENMASK(12, 8)
+
+#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(ep) \
+ (0x00000834 + 0x0070 * (ep))
+#define DEAGGR_HDR_LEN_FMASK GENMASK(5, 0)
+#define PACKET_OFFSET_VALID_FMASK GENMASK(7, 7)
+#define PACKET_OFFSET_LOCATION_FMASK GENMASK(13, 8)
+#define MAX_PACKET_LEN_FMASK GENMASK(31, 16)
+
+#define IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(ep) \
+ (0x00000838 + 0x0070 * (ep))
+#define RSRC_GRP_FMASK GENMASK(1, 0)
+
+#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(ep) \
+ (0x0000083c + 0x0070 * (ep))
+#define HPS_SEQ_TYPE_FMASK GENMASK(3, 0)
+#define DPS_SEQ_TYPE_FMASK GENMASK(7, 4)
+#define HPS_REP_SEQ_TYPE_FMASK GENMASK(11, 8)
+#define DPS_REP_SEQ_TYPE_FMASK GENMASK(15, 12)
+
+#define IPA_REG_ENDP_STATUS_N_OFFSET(ep) \
+ (0x00000840 + 0x0070 * (ep))
+#define STATUS_EN_FMASK GENMASK(0, 0)
+#define STATUS_ENDP_FMASK GENMASK(5, 1)
+#define STATUS_LOCATION_FMASK GENMASK(8, 8)
+/* The next field is present for IPA v4.0 and above */
+#define STATUS_PKT_SUPPRESS_FMASK GENMASK(9, 9)
+
+/* "er" is either an endpoint id (for filters) or a route id (for routes) */
+#define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \
+ (0x0000085c + 0x0070 * (er))
+#define FILTER_HASH_MSK_SRC_ID_FMASK GENMASK(0, 0)
+#define FILTER_HASH_MSK_SRC_IP_FMASK GENMASK(1, 1)
+#define FILTER_HASH_MSK_DST_IP_FMASK GENMASK(2, 2)
+#define FILTER_HASH_MSK_SRC_PORT_FMASK GENMASK(3, 3)
+#define FILTER_HASH_MSK_DST_PORT_FMASK GENMASK(4, 4)
+#define FILTER_HASH_MSK_PROTOCOL_FMASK GENMASK(5, 5)
+#define FILTER_HASH_MSK_METADATA_FMASK GENMASK(6, 6)
+#define IPA_REG_ENDP_FILTER_HASH_MSK_ALL GENMASK(6, 0)
+
+#define ROUTER_HASH_MSK_SRC_ID_FMASK GENMASK(16, 16)
+#define ROUTER_HASH_MSK_SRC_IP_FMASK GENMASK(17, 17)
+#define ROUTER_HASH_MSK_DST_IP_FMASK GENMASK(18, 18)
+#define ROUTER_HASH_MSK_SRC_PORT_FMASK GENMASK(19, 19)
+#define ROUTER_HASH_MSK_DST_PORT_FMASK GENMASK(20, 20)
+#define ROUTER_HASH_MSK_PROTOCOL_FMASK GENMASK(21, 21)
+#define ROUTER_HASH_MSK_METADATA_FMASK GENMASK(22, 22)
+#define IPA_REG_ENDP_ROUTER_HASH_MSK_ALL GENMASK(22, 16)
+
+#define IPA_REG_IRQ_STTS_OFFSET \
+ IPA_REG_IRQ_STTS_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_STTS_EE_N_OFFSET(ee) \
+ (0x00003008 + 0x1000 * (ee))
+
+#define IPA_REG_IRQ_EN_OFFSET \
+ IPA_REG_IRQ_EN_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_EN_EE_N_OFFSET(ee) \
+ (0x0000300c + 0x1000 * (ee))
+
+#define IPA_REG_IRQ_CLR_OFFSET \
+ IPA_REG_IRQ_CLR_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_CLR_EE_N_OFFSET(ee) \
+ (0x00003010 + 0x1000 * (ee))
+
+#define IPA_REG_IRQ_UC_OFFSET \
+ IPA_REG_IRQ_UC_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_UC_EE_N_OFFSET(ee) \
+ (0x0000301c + 0x1000 * (ee))
+
+#define IPA_REG_IRQ_SUSPEND_INFO_OFFSET \
+ IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(ee) \
+ (0x00003030 + 0x1000 * (ee))
+/* ipa->available defines the valid bits in the SUSPEND_INFO register */
+
+#define IPA_REG_SUSPEND_IRQ_EN_OFFSET \
+ IPA_REG_SUSPEND_IRQ_EN_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_SUSPEND_IRQ_EN_EE_N_OFFSET(ee) \
+ (0x00003034 + 0x1000 * (ee))
+/* ipa->available defines the valid bits in the SUSPEND_IRQ_EN register */
+
+#define IPA_REG_SUSPEND_IRQ_CLR_OFFSET \
+ IPA_REG_SUSPEND_IRQ_CLR_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_SUSPEND_IRQ_CLR_EE_N_OFFSET(ee) \
+ (0x00003038 + 0x1000 * (ee))
+/* ipa->available defines the valid bits in the SUSPEND_IRQ_CLR register */
+
+/** enum ipa_cs_offload_en - checksum offload field in ENDP_INIT_CFG_N */
+enum ipa_cs_offload_en {
+ IPA_CS_OFFLOAD_NONE = 0,
+ IPA_CS_OFFLOAD_UL = 1,
+ IPA_CS_OFFLOAD_DL = 2,
+ IPA_CS_RSVD
+};
+
+/** enum ipa_aggr_en - aggregation type field in ENDP_INIT_AGGR_N */
+enum ipa_aggr_en {
+ IPA_BYPASS_AGGR = 0,
+ IPA_ENABLE_AGGR = 1,
+ IPA_ENABLE_DEAGGR = 2,
+};
+
+/** enum ipa_aggr_type - aggregation type field in in_ENDP_INIT_AGGR_N */
+enum ipa_aggr_type {
+ IPA_MBIM_16 = 0,
+ IPA_HDLC = 1,
+ IPA_TLP = 2,
+ IPA_RNDIS = 3,
+ IPA_GENERIC = 4,
+ IPA_COALESCE = 5,
+ IPA_QCMAP = 6,
+};
+
+/** enum ipa_mode - mode field in ENDP_INIT_MODE_N */
+enum ipa_mode {
+ IPA_BASIC = 0,
+ IPA_ENABLE_FRAMING_HDLC = 1,
+ IPA_ENABLE_DEFRAMING_HDLC = 2,
+ IPA_DMA = 3,
+};
+
+/**
+ * enum ipa_seq_type - HPS and DPS sequencer type fields in in ENDP_INIT_SEQ_N
+ * @IPA_SEQ_DMA_ONLY: only DMA is performed
+ * @IPA_SEQ_PKT_PROCESS_NO_DEC_UCP:
+ * packet processing + no decipher + microcontroller (Ethernet Bridging)
+ * @IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP:
+ * second packet processing pass + no decipher + microcontroller
+ * @IPA_SEQ_DMA_DEC: DMA + cipher/decipher
+ * @IPA_SEQ_DMA_COMP_DECOMP: DMA + compression/decompression
+ * @IPA_SEQ_INVALID: invalid sequencer type
+ *
+ * The values defined here are broken into 4-bit nibbles that are written
+ * into fields of the INIT_SEQ_N endpoint registers.
+ */
+enum ipa_seq_type {
+ IPA_SEQ_DMA_ONLY = 0x0000,
+ IPA_SEQ_PKT_PROCESS_NO_DEC_UCP = 0x0002,
+ IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP = 0x0004,
+ IPA_SEQ_DMA_DEC = 0x0011,
+ IPA_SEQ_DMA_COMP_DECOMP = 0x0020,
+ IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP = 0x0806,
+ IPA_SEQ_INVALID = 0xffff,
+};
+
+int ipa_reg_init(struct ipa *ipa);
+void ipa_reg_exit(struct ipa *ipa);
+
+#endif /* _IPA_REG_H_ */
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
new file mode 100644
index 000000000000..4d33aa7ebfbb
--- /dev/null
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+#include "ipa_smp2p.h"
+#include "ipa.h"
+#include "ipa_uc.h"
+#include "ipa_clock.h"
+
+/**
+ * DOC: IPA SMP2P communication with the modem
+ *
+ * SMP2P is a primitive communication mechanism available between the AP and
+ * the modem. The IPA driver uses this for two purposes: to enable the modem
+ * to state that the GSI hardware is ready to use; and to communicate the
+ * state of the IPA clock in the event of a crash.
+ *
+ * GSI needs to have early initialization completed before it can be used.
+ * This initialization is done either by Trust Zone or by the modem. In the
+ * latter case, the modem uses an SMP2P interrupt to tell the AP IPA driver
+ * when the GSI is ready to use.
+ *
+ * The modem is also able to inquire about the current state of the IPA
+ * clock by trigging another SMP2P interrupt to the AP. We communicate
+ * whether the clock is enabled using two SMP2P state bits--one to
+ * indicate the clock state (on or off), and a second to indicate the
+ * clock state bit is valid. The modem will poll the valid bit until it
+ * is set, and at that time records whether the AP has the IPA clock enabled.
+ *
+ * Finally, if the AP kernel panics, we update the SMP2P state bits even if
+ * we never receive an interrupt from the modem requesting this.
+ */
+
+/**
+ * struct ipa_smp2p - IPA SMP2P information
+ * @ipa: IPA pointer
+ * @valid_state: SMEM state indicating enabled state is valid
+ * @enabled_state: SMEM state to indicate clock is enabled
+ * @valid_bit: Valid bit in 32-bit SMEM state mask
+ * @enabled_bit: Enabled bit in 32-bit SMEM state mask
+ * @enabled_bit: Enabled bit in 32-bit SMEM state mask
+ * @clock_query_irq: IPA interrupt triggered by modem for clock query
+ * @setup_ready_irq: IPA interrupt triggered by modem to signal GSI ready
+ * @clock_on: Whether IPA clock is on
+ * @notified: Whether modem has been notified of clock state
+ * @disabled: Whether setup ready interrupt handling is disabled
+ * @mutex mutex: Motex protecting ready interrupt/shutdown interlock
+ * @panic_notifier: Panic notifier structure
+*/
+struct ipa_smp2p {
+ struct ipa *ipa;
+ struct qcom_smem_state *valid_state;
+ struct qcom_smem_state *enabled_state;
+ u32 valid_bit;
+ u32 enabled_bit;
+ u32 clock_query_irq;
+ u32 setup_ready_irq;
+ bool clock_on;
+ bool notified;
+ bool disabled;
+ struct mutex mutex;
+ struct notifier_block panic_notifier;
+};
+
+/**
+ * ipa_smp2p_notify() - use SMP2P to tell modem about IPA clock state
+ * @smp2p: SMP2P information
+ *
+ * This is called either when the modem has requested it (by triggering
+ * the modem clock query IPA interrupt) or whenever the AP is shutting down
+ * (via a panic notifier). It sets the two SMP2P state bits--one saying
+ * whether the IPA clock is running, and the other indicating the first bit
+ * is valid.
+ */
+static void ipa_smp2p_notify(struct ipa_smp2p *smp2p)
+{
+ u32 value;
+ u32 mask;
+
+ if (smp2p->notified)
+ return;
+
+ smp2p->clock_on = ipa_clock_get_additional(smp2p->ipa);
+
+ /* Signal whether the clock is enabled */
+ mask = BIT(smp2p->enabled_bit);
+ value = smp2p->clock_on ? mask : 0;
+ qcom_smem_state_update_bits(smp2p->enabled_state, mask, value);
+
+ /* Now indicate that the enabled flag is valid */
+ mask = BIT(smp2p->valid_bit);
+ value = mask;
+ qcom_smem_state_update_bits(smp2p->valid_state, mask, value);
+
+ smp2p->notified = true;
+}
+
+/* Threaded IRQ handler for modem "ipa-clock-query" SMP2P interrupt */
+static irqreturn_t ipa_smp2p_modem_clk_query_isr(int irq, void *dev_id)
+{
+ struct ipa_smp2p *smp2p = dev_id;
+
+ ipa_smp2p_notify(smp2p);
+
+ return IRQ_HANDLED;
+}
+
+static int ipa_smp2p_panic_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct ipa_smp2p *smp2p;
+
+ smp2p = container_of(nb, struct ipa_smp2p, panic_notifier);
+
+ ipa_smp2p_notify(smp2p);
+
+ if (smp2p->clock_on)
+ ipa_uc_panic_notifier(smp2p->ipa);
+
+ return NOTIFY_DONE;
+}
+
+static int ipa_smp2p_panic_notifier_register(struct ipa_smp2p *smp2p)
+{
+ /* IPA panic handler needs to run before modem shuts down */
+ smp2p->panic_notifier.notifier_call = ipa_smp2p_panic_notifier;
+ smp2p->panic_notifier.priority = INT_MAX; /* Do it early */
+
+ return atomic_notifier_chain_register(&panic_notifier_list,
+ &smp2p->panic_notifier);
+}
+
+static void ipa_smp2p_panic_notifier_unregister(struct ipa_smp2p *smp2p)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &smp2p->panic_notifier);
+}
+
+/* Threaded IRQ handler for modem "ipa-setup-ready" SMP2P interrupt */
+static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
+{
+ struct ipa_smp2p *smp2p = dev_id;
+
+ mutex_lock(&smp2p->mutex);
+
+ if (!smp2p->disabled) {
+ int ret;
+
+ ret = ipa_setup(smp2p->ipa);
+ if (ret)
+ dev_err(&smp2p->ipa->pdev->dev,
+ "error %d from ipa_setup()\n", ret);
+ smp2p->disabled = true;
+ }
+
+ mutex_unlock(&smp2p->mutex);
+
+ return IRQ_HANDLED;
+}
+
+/* Initialize SMP2P interrupts */
+static int ipa_smp2p_irq_init(struct ipa_smp2p *smp2p, const char *name,
+ irq_handler_t handler)
+{
+ struct device *dev = &smp2p->ipa->pdev->dev;
+ unsigned int irq;
+ int ret;
+
+ ret = platform_get_irq_byname(smp2p->ipa->pdev, name);
+ if (ret <= 0) {
+ dev_err(dev, "DT error %d getting \"%s\" IRQ property\n",
+ ret, name);
+ return ret ? : -EINVAL;
+ }
+ irq = ret;
+
+ ret = request_threaded_irq(irq, NULL, handler, 0, name, smp2p);
+ if (ret) {
+ dev_err(dev, "error %d requesting \"%s\" IRQ\n", ret, name);
+ return ret;
+ }
+
+ return irq;
+}
+
+static void ipa_smp2p_irq_exit(struct ipa_smp2p *smp2p, u32 irq)
+{
+ free_irq(irq, smp2p);
+}
+
+/* Drop the clock reference if it was taken in ipa_smp2p_notify() */
+static void ipa_smp2p_clock_release(struct ipa *ipa)
+{
+ if (!ipa->smp2p->clock_on)
+ return;
+
+ ipa_clock_put(ipa);
+ ipa->smp2p->clock_on = false;
+}
+
+/* Initialize the IPA SMP2P subsystem */
+int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
+{
+ struct qcom_smem_state *enabled_state;
+ struct device *dev = &ipa->pdev->dev;
+ struct qcom_smem_state *valid_state;
+ struct ipa_smp2p *smp2p;
+ u32 enabled_bit;
+ u32 valid_bit;
+ int ret;
+
+ valid_state = qcom_smem_state_get(dev, "ipa-clock-enabled-valid",
+ &valid_bit);
+ if (IS_ERR(valid_state))
+ return PTR_ERR(valid_state);
+ if (valid_bit >= 32) /* BITS_PER_U32 */
+ return -EINVAL;
+
+ enabled_state = qcom_smem_state_get(dev, "ipa-clock-enabled",
+ &enabled_bit);
+ if (IS_ERR(enabled_state))
+ return PTR_ERR(enabled_state);
+ if (enabled_bit >= 32) /* BITS_PER_U32 */
+ return -EINVAL;
+
+ smp2p = kzalloc(sizeof(*smp2p), GFP_KERNEL);
+ if (!smp2p)
+ return -ENOMEM;
+
+ smp2p->ipa = ipa;
+
+ /* These fields are needed by the clock query interrupt
+ * handler, so initialize them now.
+ */
+ mutex_init(&smp2p->mutex);
+ smp2p->valid_state = valid_state;
+ smp2p->valid_bit = valid_bit;
+ smp2p->enabled_state = enabled_state;
+ smp2p->enabled_bit = enabled_bit;
+
+ /* We have enough information saved to handle notifications */
+ ipa->smp2p = smp2p;
+
+ ret = ipa_smp2p_irq_init(smp2p, "ipa-clock-query",
+ ipa_smp2p_modem_clk_query_isr);
+ if (ret < 0)
+ goto err_null_smp2p;
+ smp2p->clock_query_irq = ret;
+
+ ret = ipa_smp2p_panic_notifier_register(smp2p);
+ if (ret)
+ goto err_irq_exit;
+
+ if (modem_init) {
+ /* Result will be non-zero (negative for error) */
+ ret = ipa_smp2p_irq_init(smp2p, "ipa-setup-ready",
+ ipa_smp2p_modem_setup_ready_isr);
+ if (ret < 0)
+ goto err_notifier_unregister;
+ smp2p->setup_ready_irq = ret;
+ }
+
+ return 0;
+
+err_notifier_unregister:
+ ipa_smp2p_panic_notifier_unregister(smp2p);
+err_irq_exit:
+ ipa_smp2p_irq_exit(smp2p, smp2p->clock_query_irq);
+err_null_smp2p:
+ ipa->smp2p = NULL;
+ mutex_destroy(&smp2p->mutex);
+ kfree(smp2p);
+
+ return ret;
+}
+
+void ipa_smp2p_exit(struct ipa *ipa)
+{
+ struct ipa_smp2p *smp2p = ipa->smp2p;
+
+ if (smp2p->setup_ready_irq)
+ ipa_smp2p_irq_exit(smp2p, smp2p->setup_ready_irq);
+ ipa_smp2p_panic_notifier_unregister(smp2p);
+ ipa_smp2p_irq_exit(smp2p, smp2p->clock_query_irq);
+ /* We won't get notified any more; drop clock reference (if any) */
+ ipa_smp2p_clock_release(ipa);
+ ipa->smp2p = NULL;
+ mutex_destroy(&smp2p->mutex);
+ kfree(smp2p);
+}
+
+void ipa_smp2p_disable(struct ipa *ipa)
+{
+ struct ipa_smp2p *smp2p = ipa->smp2p;
+
+ if (!smp2p->setup_ready_irq)
+ return;
+
+ mutex_lock(&smp2p->mutex);
+
+ smp2p->disabled = true;
+
+ mutex_unlock(&smp2p->mutex);
+}
+
+/* Reset state tracking whether we have notified the modem */
+void ipa_smp2p_notify_reset(struct ipa *ipa)
+{
+ struct ipa_smp2p *smp2p = ipa->smp2p;
+ u32 mask;
+
+ if (!smp2p->notified)
+ return;
+
+ ipa_smp2p_clock_release(ipa);
+
+ /* Reset the clock enabled valid flag */
+ mask = BIT(smp2p->valid_bit);
+ qcom_smem_state_update_bits(smp2p->valid_state, mask, 0);
+
+ /* Mark the clock disabled for good measure... */
+ mask = BIT(smp2p->enabled_bit);
+ qcom_smem_state_update_bits(smp2p->enabled_state, mask, 0);
+
+ smp2p->notified = false;
+}
diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
new file mode 100644
index 000000000000..1f65cdc9d406
--- /dev/null
+++ b/drivers/net/ipa/ipa_smp2p.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_SMP2P_H_
+#define _IPA_SMP2P_H_
+
+#include <linux/types.h>
+
+struct ipa;
+
+/**
+ * ipa_smp2p_init() - Initialize the IPA SMP2P subsystem
+ * @ipa: IPA pointer
+ * @modem_init: Whether the modem is responsible for GSI initialization
+ *
+ * @Return: 0 if successful, or a negative error code
+ *
+ */
+int ipa_smp2p_init(struct ipa *ipa, bool modem_init);
+
+/**
+ * ipa_smp2p_exit() - Inverse of ipa_smp2p_init()
+ * @ipa: IPA pointer
+ */
+void ipa_smp2p_exit(struct ipa *ipa);
+
+/**
+ * ipa_smp2p_disable() - Prevent "ipa-setup-ready" interrupt handling
+ * @IPA: IPA pointer
+ *
+ * Prevent handling of the "setup ready" interrupt from the modem.
+ * This is used before initiating shutdown of the driver.
+ */
+void ipa_smp2p_disable(struct ipa *ipa);
+
+/**
+ * ipa_smp2p_notify_reset() - Reset modem notification state
+ * @ipa: IPA pointer
+ *
+ * If the modem crashes it queries the IPA clock state. In cleaning
+ * up after such a crash this is used to reset some state maintained
+ * for managing this notification.
+ */
+void ipa_smp2p_notify_reset(struct ipa *ipa);
+
+#endif /* _IPA_SMP2P_H_ */
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
new file mode 100644
index 000000000000..9df2a3e78c98
--- /dev/null
+++ b/drivers/net/ipa/ipa_table.c
@@ -0,0 +1,700 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bits.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/build_bug.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+#include "ipa.h"
+#include "ipa_version.h"
+#include "ipa_endpoint.h"
+#include "ipa_table.h"
+#include "ipa_reg.h"
+#include "ipa_mem.h"
+#include "ipa_cmd.h"
+#include "gsi.h"
+#include "gsi_trans.h"
+
+/**
+ * DOC: IPA Filter and Route Tables
+ *
+ * The IPA has tables defined in its local shared memory that define filter
+ * and routing rules. Each entry in these tables contains a 64-bit DMA
+ * address that refers to DRAM (system memory) containing a rule definition.
+ * A rule consists of a contiguous block of 32-bit values terminated with
+ * 32 zero bits. A special "zero entry" rule consisting of 64 zero bits
+ * represents "no filtering" or "no routing," and is the reset value for
+ * filter or route table rules. Separate tables (both filter and route)
+ * used for IPv4 and IPv6. Additionally, there can be hashed filter or
+ * route tables, which are used when a hash of message metadata matches.
+ * Hashed operation is not supported by all IPA hardware.
+ *
+ * Each filter rule is associated with an AP or modem TX endpoint, though
+ * not all TX endpoints support filtering. The first 64-bit entry in a
+ * filter table is a bitmap indicating which endpoints have entries in
+ * the table. The low-order bit (bit 0) in this bitmap represents a
+ * special global filter, which applies to all traffic. This is not
+ * used in the current code. Bit 1, if set, indicates that there is an
+ * entry (i.e. a DMA address referring to a rule) for endpoint 0 in the
+ * table. Bit 2, if set, indicates there is an entry for endpoint 1,
+ * and so on. Space is set aside in IPA local memory to hold as many
+ * filter table entries as might be required, but typically they are not
+ * all used.
+ *
+ * The AP initializes all entries in a filter table to refer to a "zero"
+ * entry. Once initialized the modem and AP update the entries for
+ * endpoints they "own" directly. Currently the AP does not use the
+ * IPA filtering functionality.
+ *
+ * IPA Filter Table
+ * ----------------------
+ * endpoint bitmap | 0x0000000000000048 | Bits 3 and 6 set (endpoints 2 and 5)
+ * |--------------------|
+ * 1st endpoint | 0x000123456789abc0 | DMA address for modem endpoint 2 rule
+ * |--------------------|
+ * 2nd endpoint | 0x000123456789abf0 | DMA address for AP endpoint 5 rule
+ * |--------------------|
+ * (unused) | | (Unused space in filter table)
+ * |--------------------|
+ * . . .
+ * |--------------------|
+ * (unused) | | (Unused space in filter table)
+ * ----------------------
+ *
+ * The set of available route rules is divided about equally between the AP
+ * and modem. The AP initializes all entries in a route table to refer to
+ * a "zero entry". Once initialized, the modem and AP are responsible for
+ * updating their own entries. All entries in a route table are usable,
+ * though the AP currently does not use the IPA routing functionality.
+ *
+ * IPA Route Table
+ * ----------------------
+ * 1st modem route | 0x0001234500001100 | DMA address for first route rule
+ * |--------------------|
+ * 2nd modem route | 0x0001234500001140 | DMA address for second route rule
+ * |--------------------|
+ * . . .
+ * |--------------------|
+ * Last modem route| 0x0001234500002280 | DMA address for Nth route rule
+ * |--------------------|
+ * 1st AP route | 0x0001234500001100 | DMA address for route rule (N+1)
+ * |--------------------|
+ * 2nd AP route | 0x0001234500001140 | DMA address for next route rule
+ * |--------------------|
+ * . . .
+ * |--------------------|
+ * Last AP route | 0x0001234500002280 | DMA address for last route rule
+ * ----------------------
+ */
+
+/* IPA hardware constrains filter and route tables alignment */
+#define IPA_TABLE_ALIGN 128 /* Minimum table alignment */
+
+/* Assignment of route table entries to the modem and AP */
+#define IPA_ROUTE_MODEM_MIN 0
+#define IPA_ROUTE_MODEM_COUNT 8
+
+#define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
+#define IPA_ROUTE_AP_COUNT \
+ (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
+
+/* Filter or route rules consist of a set of 32-bit values followed by a
+ * 32-bit all-zero rule list terminator. The "zero rule" is simply an
+ * all-zero rule followed by the list terminator.
+ */
+#define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32))
+
+#ifdef IPA_VALIDATE
+
+/* Check things that can be validated at build time. */
+static void ipa_table_validate_build(void)
+{
+ /* IPA hardware accesses memory 128 bytes at a time. Addresses
+ * referred to by entries in filter and route tables must be
+ * aligned on 128-byte byte boundaries. The only rule address
+ * ever use is the "zero rule", and it's aligned at the base
+ * of a coherent DMA allocation.
+ */
+ BUILD_BUG_ON(ARCH_DMA_MINALIGN % IPA_TABLE_ALIGN);
+
+ /* Filter and route tables contain DMA addresses that refer to
+ * filter or route rules. We use a fixed constant to represent
+ * the size of either type of table entry. Code in ipa_table_init()
+ * uses a pointer to __le64 to initialize table entriews.
+ */
+ BUILD_BUG_ON(IPA_TABLE_ENTRY_SIZE != sizeof(dma_addr_t));
+ BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(__le64));
+
+ /* A "zero rule" is used to represent no filtering or no routing.
+ * It is a 64-bit block of zeroed memory. Code in ipa_table_init()
+ * assumes that it can be written using a pointer to __le64.
+ */
+ BUILD_BUG_ON(IPA_ZERO_RULE_SIZE != sizeof(__le64));
+
+ /* Impose a practical limit on the number of routes */
+ BUILD_BUG_ON(IPA_ROUTE_COUNT_MAX > 32);
+ /* The modem must be allotted at least one route table entry */
+ BUILD_BUG_ON(!IPA_ROUTE_MODEM_COUNT);
+ /* But it can't have more than what is available */
+ BUILD_BUG_ON(IPA_ROUTE_MODEM_COUNT > IPA_ROUTE_COUNT_MAX);
+
+}
+
+static bool
+ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
+{
+ struct device *dev = &ipa->pdev->dev;
+ const struct ipa_mem *mem;
+ u32 size;
+
+ if (route) {
+ if (ipv6)
+ mem = hashed ? &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]
+ : &ipa->mem[IPA_MEM_V6_ROUTE];
+ else
+ mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]
+ : &ipa->mem[IPA_MEM_V4_ROUTE];
+ size = IPA_ROUTE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE;
+ } else {
+ if (ipv6)
+ mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED]
+ : &ipa->mem[IPA_MEM_V6_FILTER];
+ else
+ mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED]
+ : &ipa->mem[IPA_MEM_V4_FILTER];
+ size = (1 + IPA_FILTER_COUNT_MAX) * IPA_TABLE_ENTRY_SIZE;
+ }
+
+ if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
+ return false;
+
+ /* mem->size >= size is sufficient, but we'll demand more */
+ if (mem->size == size)
+ return true;
+
+ /* Hashed table regions can be zero size if hashing is not supported */
+ if (hashed && !mem->size)
+ return true;
+
+ dev_err(dev, "IPv%c %s%s table region size 0x%02x, expected 0x%02x\n",
+ ipv6 ? '6' : '4', hashed ? "hashed " : "",
+ route ? "route" : "filter", mem->size, size);
+
+ return false;
+}
+
+/* Verify the filter and route table memory regions are the expected size */
+bool ipa_table_valid(struct ipa *ipa)
+{
+ bool valid = true;
+
+ valid = valid && ipa_table_valid_one(ipa, false, false, false);
+ valid = valid && ipa_table_valid_one(ipa, false, false, true);
+ valid = valid && ipa_table_valid_one(ipa, false, true, false);
+ valid = valid && ipa_table_valid_one(ipa, false, true, true);
+ valid = valid && ipa_table_valid_one(ipa, true, false, false);
+ valid = valid && ipa_table_valid_one(ipa, true, false, true);
+ valid = valid && ipa_table_valid_one(ipa, true, true, false);
+ valid = valid && ipa_table_valid_one(ipa, true, true, true);
+
+ return valid;
+}
+
+bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map)
+{
+ struct device *dev = &ipa->pdev->dev;
+ u32 count;
+
+ if (!filter_map) {
+ dev_err(dev, "at least one filtering endpoint is required\n");
+
+ return false;
+ }
+
+ count = hweight32(filter_map);
+ if (count > IPA_FILTER_COUNT_MAX) {
+ dev_err(dev, "too many filtering endpoints (%u, max %u)\n",
+ count, IPA_FILTER_COUNT_MAX);
+
+ return false;
+ }
+
+ return true;
+}
+
+#else /* !IPA_VALIDATE */
+static void ipa_table_validate_build(void)
+
+{
+}
+
+#endif /* !IPA_VALIDATE */
+
+/* Zero entry count means no table, so just return a 0 address */
+static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
+{
+ u32 skip;
+
+ if (!count)
+ return 0;
+
+/* assert(count <= max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX)); */
+
+ /* Skip over the zero rule and possibly the filter mask */
+ skip = filter_mask ? 1 : 2;
+
+ return ipa->table_addr + skip * sizeof(*ipa->table_virt);
+}
+
+static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
+ u16 first, u16 count, const struct ipa_mem *mem)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ dma_addr_t addr;
+ u32 offset;
+ u16 size;
+
+ /* Nothing to do if the table memory regions is empty */
+ if (!mem->size)
+ return;
+
+ if (filter)
+ first++; /* skip over bitmap */
+
+ offset = mem->offset + first * IPA_TABLE_ENTRY_SIZE;
+ size = count * IPA_TABLE_ENTRY_SIZE;
+ addr = ipa_table_addr(ipa, false, count);
+
+ ipa_cmd_dma_shared_mem_add(trans, offset, size, addr, true);
+}
+
+/* Reset entries in a single filter table belonging to either the AP or
+ * modem to refer to the zero entry. The memory region supplied will be
+ * for the IPv4 and IPv6 non-hashed and hashed filter tables.
+ */
+static int
+ipa_filter_reset_table(struct ipa *ipa, const struct ipa_mem *mem, bool modem)
+{
+ u32 ep_mask = ipa->filter_map;
+ u32 count = hweight32(ep_mask);
+ struct gsi_trans *trans;
+ enum gsi_ee_id ee_id;
+
+ if (!mem->size)
+ return 0;
+
+ trans = ipa_cmd_trans_alloc(ipa, count);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction for %s filter reset\n",
+ modem ? "modem" : "AP");
+ return -EBUSY;
+ }
+
+ ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
+ while (ep_mask) {
+ u32 endpoint_id = __ffs(ep_mask);
+ struct ipa_endpoint *endpoint;
+
+ ep_mask ^= BIT(endpoint_id);
+
+ endpoint = &ipa->endpoint[endpoint_id];
+ if (endpoint->ee_id != ee_id)
+ continue;
+
+ ipa_table_reset_add(trans, true, endpoint_id, 1, mem);
+ }
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+/* Theoretically, each filter table could have more filter slots to
+ * update than the maximum number of commands in a transaction. So
+ * we do each table separately.
+ */
+static int ipa_filter_reset(struct ipa *ipa, bool modem)
+{
+ int ret;
+
+ ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER], modem);
+ if (ret)
+ return ret;
+
+ ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER_HASHED],
+ modem);
+ if (ret)
+ return ret;
+
+ ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER], modem);
+ if (ret)
+ return ret;
+ ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER_HASHED],
+ modem);
+
+ return ret;
+}
+
+/* The AP routes and modem routes are each contiguous within the
+ * table. We can update each table with a single command, and we
+ * won't exceed the per-transaction command limit.
+ * */
+static int ipa_route_reset(struct ipa *ipa, bool modem)
+{
+ struct gsi_trans *trans;
+ u16 first;
+ u16 count;
+
+ trans = ipa_cmd_trans_alloc(ipa, 4);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev,
+ "no transaction for %s route reset\n",
+ modem ? "modem" : "AP");
+ return -EBUSY;
+ }
+
+ if (modem) {
+ first = IPA_ROUTE_MODEM_MIN;
+ count = IPA_ROUTE_MODEM_COUNT;
+ } else {
+ first = IPA_ROUTE_AP_MIN;
+ count = IPA_ROUTE_AP_COUNT;
+ }
+
+ ipa_table_reset_add(trans, false, first, count,
+ &ipa->mem[IPA_MEM_V4_ROUTE]);
+ ipa_table_reset_add(trans, false, first, count,
+ &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]);
+
+ ipa_table_reset_add(trans, false, first, count,
+ &ipa->mem[IPA_MEM_V6_ROUTE]);
+ ipa_table_reset_add(trans, false, first, count,
+ &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]);
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+void ipa_table_reset(struct ipa *ipa, bool modem)
+{
+ struct device *dev = &ipa->pdev->dev;
+ const char *ee_name;
+ int ret;
+
+ ee_name = modem ? "modem" : "AP";
+
+ /* Report errors, but reset filter and route tables */
+ ret = ipa_filter_reset(ipa, modem);
+ if (ret)
+ dev_err(dev, "error %d resetting filter table for %s\n",
+ ret, ee_name);
+
+ ret = ipa_route_reset(ipa, modem);
+ if (ret)
+ dev_err(dev, "error %d resetting route table for %s\n",
+ ret, ee_name);
+}
+
+int ipa_table_hash_flush(struct ipa *ipa)
+{
+ u32 offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ struct gsi_trans *trans;
+ u32 val;
+
+ /* IPA version 4.2 does not support hashed tables */
+ if (ipa->version == IPA_VERSION_4_2)
+ return 0;
+
+ trans = ipa_cmd_trans_alloc(ipa, 1);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev, "no transaction for hash flush\n");
+ return -EBUSY;
+ }
+
+ val = IPV4_FILTER_HASH_FLUSH | IPV6_FILTER_HASH_FLUSH;
+ val |= IPV6_ROUTER_HASH_FLUSH | IPV4_ROUTER_HASH_FLUSH;
+
+ ipa_cmd_register_write_add(trans, offset, val, val, false);
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
+ enum ipa_cmd_opcode opcode,
+ const struct ipa_mem *mem,
+ const struct ipa_mem *hash_mem)
+{
+ struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ dma_addr_t hash_addr;
+ dma_addr_t addr;
+ u16 hash_count;
+ u16 hash_size;
+ u16 count;
+ u16 size;
+
+ /* The number of filtering endpoints determines number of entries
+ * in the filter table. The hashed and non-hashed filter table
+ * will have the same number of entries. The size of the route
+ * table region determines the number of entries it has.
+ */
+ if (filter) {
+ count = hweight32(ipa->filter_map);
+ hash_count = hash_mem->size ? count : 0;
+ } else {
+ count = mem->size / IPA_TABLE_ENTRY_SIZE;
+ hash_count = hash_mem->size / IPA_TABLE_ENTRY_SIZE;
+ }
+ size = count * IPA_TABLE_ENTRY_SIZE;
+ hash_size = hash_count * IPA_TABLE_ENTRY_SIZE;
+
+ addr = ipa_table_addr(ipa, filter, count);
+ hash_addr = ipa_table_addr(ipa, filter, hash_count);
+
+ ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr,
+ hash_size, hash_mem->offset, hash_addr);
+}
+
+int ipa_table_setup(struct ipa *ipa)
+{
+ struct gsi_trans *trans;
+
+ trans = ipa_cmd_trans_alloc(ipa, 4);
+ if (!trans) {
+ dev_err(&ipa->pdev->dev, "no transaction for table setup\n");
+ return -EBUSY;
+ }
+
+ ipa_table_init_add(trans, false, IPA_CMD_IP_V4_ROUTING_INIT,
+ &ipa->mem[IPA_MEM_V4_ROUTE],
+ &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]);
+
+ ipa_table_init_add(trans, false, IPA_CMD_IP_V6_ROUTING_INIT,
+ &ipa->mem[IPA_MEM_V6_ROUTE],
+ &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]);
+
+ ipa_table_init_add(trans, true, IPA_CMD_IP_V4_FILTER_INIT,
+ &ipa->mem[IPA_MEM_V4_FILTER],
+ &ipa->mem[IPA_MEM_V4_FILTER_HASHED]);
+
+ ipa_table_init_add(trans, true, IPA_CMD_IP_V6_FILTER_INIT,
+ &ipa->mem[IPA_MEM_V6_FILTER],
+ &ipa->mem[IPA_MEM_V6_FILTER_HASHED]);
+
+ gsi_trans_commit_wait(trans);
+
+ return 0;
+}
+
+void ipa_table_teardown(struct ipa *ipa)
+{
+ /* Nothing to do */ /* XXX Maybe reset the tables? */
+}
+
+/**
+ * ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple
+ * @endpoint_id: Endpoint whose filter hash tuple should be zeroed
+ *
+ * Endpoint must be for the AP (not modem) and support filtering. Updates
+ * the filter hash values without changing route ones.
+ */
+static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
+{
+ u32 endpoint_id = endpoint->endpoint_id;
+ u32 offset;
+ u32 val;
+
+ offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(endpoint_id);
+
+ val = ioread32(endpoint->ipa->reg_virt + offset);
+
+ /* Zero all filter-related fields, preserving the rest */
+ u32_replace_bits(val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL);
+
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
+}
+
+static void ipa_filter_config(struct ipa *ipa, bool modem)
+{
+ enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
+ u32 ep_mask = ipa->filter_map;
+
+ /* IPA version 4.2 has no hashed route tables */
+ if (ipa->version == IPA_VERSION_4_2)
+ return;
+
+ while (ep_mask) {
+ u32 endpoint_id = __ffs(ep_mask);
+ struct ipa_endpoint *endpoint;
+
+ ep_mask ^= BIT(endpoint_id);
+
+ endpoint = &ipa->endpoint[endpoint_id];
+ if (endpoint->ee_id == ee_id)
+ ipa_filter_tuple_zero(endpoint);
+ }
+}
+
+static void ipa_filter_deconfig(struct ipa *ipa, bool modem)
+{
+ /* Nothing to do */
+}
+
+static bool ipa_route_id_modem(u32 route_id)
+{
+ return route_id >= IPA_ROUTE_MODEM_MIN &&
+ route_id <= IPA_ROUTE_MODEM_MIN + IPA_ROUTE_MODEM_COUNT - 1;
+}
+
+/**
+ * ipa_route_tuple_zero() - Zero a hashed route table entry tuple
+ * @route_id: Route table entry whose hash tuple should be zeroed
+ *
+ * Updates the route hash values without changing filter ones.
+ */
+static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
+{
+ u32 offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(route_id);
+ u32 val;
+
+ val = ioread32(ipa->reg_virt + offset);
+
+ /* Zero all route-related fields, preserving the rest */
+ u32_replace_bits(val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+static void ipa_route_config(struct ipa *ipa, bool modem)
+{
+ u32 route_id;
+
+ /* IPA version 4.2 has no hashed route tables */
+ if (ipa->version == IPA_VERSION_4_2)
+ return;
+
+ for (route_id = 0; route_id < IPA_ROUTE_COUNT_MAX; route_id++)
+ if (ipa_route_id_modem(route_id) == modem)
+ ipa_route_tuple_zero(ipa, route_id);
+}
+
+static void ipa_route_deconfig(struct ipa *ipa, bool modem)
+{
+ /* Nothing to do */
+}
+
+void ipa_table_config(struct ipa *ipa)
+{
+ ipa_filter_config(ipa, false);
+ ipa_filter_config(ipa, true);
+ ipa_route_config(ipa, false);
+ ipa_route_config(ipa, true);
+}
+
+void ipa_table_deconfig(struct ipa *ipa)
+{
+ ipa_route_deconfig(ipa, true);
+ ipa_route_deconfig(ipa, false);
+ ipa_filter_deconfig(ipa, true);
+ ipa_filter_deconfig(ipa, false);
+}
+
+/*
+ * Initialize a coherent DMA allocation containing initialized filter and
+ * route table data. This is used when initializing or resetting the IPA
+ * filter or route table.
+ *
+ * The first entry in a filter table contains a bitmap indicating which
+ * endpoints contain entries in the table. In addition to that first entry,
+ * there are at most IPA_FILTER_COUNT_MAX entries that follow. Filter table
+ * entries are 64 bits wide, and (other than the bitmap) contain the DMA
+ * address of a filter rule. A "zero rule" indicates no filtering, and
+ * consists of 64 bits of zeroes. When a filter table is initialized (or
+ * reset) its entries are made to refer to the zero rule.
+ *
+ * Each entry in a route table is the DMA address of a routing rule. For
+ * routing there is also a 64-bit "zero rule" that means no routing, and
+ * when a route table is initialized or reset, its entries are made to refer
+ * to the zero rule. The zero rule is shared for route and filter tables.
+ *
+ * Note that the IPA hardware requires a filter or route rule address to be
+ * aligned on a 128 byte boundary. The coherent DMA buffer we allocate here
+ * has a minimum alignment, and we place the zero rule at the base of that
+ * allocated space. In ipa_table_init() we verify the minimum DMA allocation
+ * meets our requirement.
+ *
+ * +-------------------+
+ * --> | zero rule |
+ * / |-------------------|
+ * | | filter mask |
+ * |\ |-------------------|
+ * | ---- zero rule address | \
+ * |\ |-------------------| |
+ * | ---- zero rule address | | IPA_FILTER_COUNT_MAX
+ * | |-------------------| > or IPA_ROUTE_COUNT_MAX,
+ * | ... | whichever is greater
+ * \ |-------------------| |
+ * ---- zero rule address | /
+ * +-------------------+
+ */
+int ipa_table_init(struct ipa *ipa)
+{
+ u32 count = max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX);
+ struct device *dev = &ipa->pdev->dev;
+ dma_addr_t addr;
+ __le64 le_addr;
+ __le64 *virt;
+ size_t size;
+
+ ipa_table_validate_build();
+
+ size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
+ virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ ipa->table_virt = virt;
+ ipa->table_addr = addr;
+
+ /* First slot is the zero rule */
+ *virt++ = 0;
+
+ /* Next is the filter table bitmap. The "soft" bitmap value
+ * must be converted to the hardware representation by shifting
+ * it left one position. (Bit 0 repesents global filtering,
+ * which is possible but not used.)
+ */
+ *virt++ = cpu_to_le64((u64)ipa->filter_map << 1);
+
+ /* All the rest contain the DMA address of the zero rule */
+ le_addr = cpu_to_le64(addr);
+ while (count--)
+ *virt++ = le_addr;
+
+ return 0;
+}
+
+void ipa_table_exit(struct ipa *ipa)
+{
+ u32 count = max_t(u32, 1 + IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX);
+ struct device *dev = &ipa->pdev->dev;
+ size_t size;
+
+ size = IPA_ZERO_RULE_SIZE + (1 + count) * IPA_TABLE_ENTRY_SIZE;
+
+ dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr);
+ ipa->table_addr = 0;
+ ipa->table_virt = NULL;
+}
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
new file mode 100644
index 000000000000..64ea0221441a
--- /dev/null
+++ b/drivers/net/ipa/ipa_table.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_TABLE_H_
+#define _IPA_TABLE_H_
+
+#include <linux/types.h>
+
+struct ipa;
+
+/* The size of a filter or route table entry */
+#define IPA_TABLE_ENTRY_SIZE sizeof(__le64) /* Holds a physical address */
+
+/* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
+#define IPA_FILTER_COUNT_MAX 14
+
+/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
+#define IPA_ROUTE_COUNT_MAX 15
+
+#ifdef IPA_VALIDATE
+
+/**
+ * ipa_table_valid() - Validate route and filter table memory regions
+ * @ipa: IPA pointer
+
+ * @Return: true if all regions are valid, false otherwise
+ */
+bool ipa_table_valid(struct ipa *ipa);
+
+/**
+ * ipa_filter_map_valid() - Validate a filter table endpoint bitmap
+ * @ipa: IPA pointer
+ *
+ * @Return: true if all regions are valid, false otherwise
+ */
+bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask);
+
+#else /* !IPA_VALIDATE */
+
+static inline bool ipa_table_valid(struct ipa *ipa)
+{
+ return true;
+}
+
+static inline bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask)
+{
+ return true;
+}
+
+#endif /* !IPA_VALIDATE */
+
+/**
+ * ipa_table_reset() - Reset filter and route tables entries to "none"
+ * @ipa: IPA pointer
+ * @modem: Whether to reset modem or AP entries
+ */
+void ipa_table_reset(struct ipa *ipa, bool modem);
+
+/**
+ * ipa_table_hash_flush() - Synchronize hashed filter and route updates
+ * @ipa: IPA pointer
+ */
+int ipa_table_hash_flush(struct ipa *ipa);
+
+/**
+ * ipa_table_setup() - Set up filter and route tables
+ * @ipa: IPA pointer
+ */
+int ipa_table_setup(struct ipa *ipa);
+
+/**
+ * ipa_table_teardown() - Inverse of ipa_table_setup()
+ * @ipa: IPA pointer
+ */
+void ipa_table_teardown(struct ipa *ipa);
+
+/**
+ * ipa_table_config() - Configure filter and route tables
+ * @ipa: IPA pointer
+ */
+void ipa_table_config(struct ipa *ipa);
+
+/**
+ * ipa_table_deconfig() - Inverse of ipa_table_config()
+ * @ipa: IPA pointer
+ */
+void ipa_table_deconfig(struct ipa *ipa);
+
+/**
+ * ipa_table_init() - Do early initialization of filter and route tables
+ * @ipa: IPA pointer
+ */
+int ipa_table_init(struct ipa *ipa);
+
+/**
+ * ipa_table_exit() - Inverse of ipa_table_init()
+ * @ipa: IPA pointer
+ */
+void ipa_table_exit(struct ipa *ipa);
+
+#endif /* _IPA_TABLE_H_ */
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
new file mode 100644
index 000000000000..a1f8db00d55a
--- /dev/null
+++ b/drivers/net/ipa/ipa_uc.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2018-2020 Linaro Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include "ipa.h"
+#include "ipa_clock.h"
+#include "ipa_uc.h"
+
+/**
+ * DOC: The IPA embedded microcontroller
+ *
+ * The IPA incorporates a microcontroller that is able to do some additional
+ * handling/offloading of network activity. The current code makes
+ * essentially no use of the microcontroller, but it still requires some
+ * initialization. It needs to be notified in the event the AP crashes.
+ *
+ * The microcontroller can generate two interrupts to the AP. One interrupt
+ * is used to indicate that a response to a request from the AP is available.
+ * The other is used to notify the AP of the occurrence of an event. In
+ * addition, the AP can interrupt the microcontroller by writing a register.
+ *
+ * A 128 byte block of structured memory within the IPA SRAM is used together
+ * with these interrupts to implement the communication interface between the
+ * AP and the IPA microcontroller. Each side writes data to the shared area
+ * before interrupting its peer, which will read the written data in response
+ * to the interrupt. Some information found in the shared area is currently
+ * unused. All remaining space in the shared area is reserved, and must not
+ * be read or written by the AP.
+ */
+/* Supports hardware interface version 0x2000 */
+
+/* Offset relative to the base of the IPA shared address space of the
+ * shared region used for communication with the microcontroller. The
+ * region is 128 bytes in size, but only the first 40 bytes are used.
+ */
+#define IPA_MEM_UC_OFFSET 0x0000
+
+/* Delay to allow a the microcontroller to save state when crashing */
+#define IPA_SEND_DELAY 100 /* microseconds */
+
+/**
+ * struct ipa_uc_mem_area - AP/microcontroller shared memory area
+ * @command: command code (AP->microcontroller)
+ * @command_param: low 32 bits of command parameter (AP->microcontroller)
+ * @command_param_hi: high 32 bits of command parameter (AP->microcontroller)
+ *
+ * @response: response code (microcontroller->AP)
+ * @response_param: response parameter (microcontroller->AP)
+ *
+ * @event: event code (microcontroller->AP)
+ * @event_param: event parameter (microcontroller->AP)
+ *
+ * @first_error_address: address of first error-source on SNOC
+ * @hw_state: state of hardware (including error type information)
+ * @warning_counter: counter of non-fatal hardware errors
+ * @interface_version: hardware-reported interface version
+ */
+struct ipa_uc_mem_area {
+ u8 command; /* enum ipa_uc_command */
+ u8 reserved0[3];
+ __le32 command_param;
+ __le32 command_param_hi;
+ u8 response; /* enum ipa_uc_response */
+ u8 reserved1[3];
+ __le32 response_param;
+ u8 event; /* enum ipa_uc_event */
+ u8 reserved2[3];
+
+ __le32 event_param;
+ __le32 first_error_address;
+ u8 hw_state;
+ u8 warning_counter;
+ __le16 reserved3;
+ __le16 interface_version;
+ __le16 reserved4;
+};
+
+/** enum ipa_uc_command - commands from the AP to the microcontroller */
+enum ipa_uc_command {
+ IPA_UC_COMMAND_NO_OP = 0,
+ IPA_UC_COMMAND_UPDATE_FLAGS = 1,
+ IPA_UC_COMMAND_DEBUG_RUN_TEST = 2,
+ IPA_UC_COMMAND_DEBUG_GET_INFO = 3,
+ IPA_UC_COMMAND_ERR_FATAL = 4,
+ IPA_UC_COMMAND_CLK_GATE = 5,
+ IPA_UC_COMMAND_CLK_UNGATE = 6,
+ IPA_UC_COMMAND_MEMCPY = 7,
+ IPA_UC_COMMAND_RESET_PIPE = 8,
+ IPA_UC_COMMAND_REG_WRITE = 9,
+ IPA_UC_COMMAND_GSI_CH_EMPTY = 10,
+};
+
+/** enum ipa_uc_response - microcontroller response codes */
+enum ipa_uc_response {
+ IPA_UC_RESPONSE_NO_OP = 0,
+ IPA_UC_RESPONSE_INIT_COMPLETED = 1,
+ IPA_UC_RESPONSE_CMD_COMPLETED = 2,
+ IPA_UC_RESPONSE_DEBUG_GET_INFO = 3,
+};
+
+/** enum ipa_uc_event - common cpu events reported by the microcontroller */
+enum ipa_uc_event {
+ IPA_UC_EVENT_NO_OP = 0,
+ IPA_UC_EVENT_ERROR = 1,
+ IPA_UC_EVENT_LOG_INFO = 2,
+};
+
+static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
+{
+ u32 offset = ipa->mem_offset + ipa->mem[IPA_MEM_UC_SHARED].offset;
+
+ return ipa->mem_virt + offset;
+}
+
+/* Microcontroller event IPA interrupt handler */
+static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
+{
+ struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+ struct device *dev = &ipa->pdev->dev;
+
+ if (shared->event == IPA_UC_EVENT_ERROR)
+ dev_err(dev, "microcontroller error event\n");
+ else
+ dev_err(dev, "unsupported microcontroller event %hhu\n",
+ shared->event);
+}
+
+/* Microcontroller response IPA interrupt handler */
+static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
+{
+ struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+
+ /* An INIT_COMPLETED response message is sent to the AP by the
+ * microcontroller when it is operational. Other than this, the AP
+ * should only receive responses from the microcontroller when it has
+ * sent it a request message.
+ *
+ * We can drop the clock reference taken in ipa_uc_init() once we
+ * know the microcontroller has finished its initialization.
+ */
+ switch (shared->response) {
+ case IPA_UC_RESPONSE_INIT_COMPLETED:
+ ipa->uc_loaded = true;
+ ipa_clock_put(ipa);
+ break;
+ default:
+ dev_warn(&ipa->pdev->dev,
+ "unsupported microcontroller response %hhu\n",
+ shared->response);
+ break;
+ }
+}
+
+/* ipa_uc_setup() - Set up the microcontroller */
+void ipa_uc_setup(struct ipa *ipa)
+{
+ /* The microcontroller needs the IPA clock running until it has
+ * completed its initialization. It signals this by sending an
+ * INIT_COMPLETED response message to the AP. This could occur after
+ * we have finished doing the rest of the IPA initialization, so we
+ * need to take an extra "proxy" reference, and hold it until we've
+ * received that signal. (This reference is dropped in
+ * ipa_uc_response_hdlr(), above.)
+ */
+ ipa_clock_get(ipa);
+
+ ipa->uc_loaded = false;
+ ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_0, ipa_uc_event_handler);
+ ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_1, ipa_uc_response_hdlr);
+}
+
+/* Inverse of ipa_uc_setup() */
+void ipa_uc_teardown(struct ipa *ipa)
+{
+ ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1);
+ ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0);
+ if (!ipa->uc_loaded)
+ ipa_clock_put(ipa);
+}
+
+/* Send a command to the microcontroller */
+static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
+{
+ struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+
+ shared->command = command;
+ shared->command_param = cpu_to_le32(command_param);
+ shared->command_param_hi = 0;
+ shared->response = 0;
+ shared->response_param = 0;
+
+ iowrite32(1, ipa->reg_virt + IPA_REG_IRQ_UC_OFFSET);
+}
+
+/* Tell the microcontroller the AP is shutting down */
+void ipa_uc_panic_notifier(struct ipa *ipa)
+{
+ if (!ipa->uc_loaded)
+ return;
+
+ send_uc_command(ipa, IPA_UC_COMMAND_ERR_FATAL, 0);
+
+ /* give uc enough time to save state */
+ udelay(IPA_SEND_DELAY);
+}
diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h
new file mode 100644
index 000000000000..e8510899a3f0
--- /dev/null
+++ b/drivers/net/ipa/ipa_uc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_UC_H_
+#define _IPA_UC_H_
+
+struct ipa;
+
+/**
+ * ipa_uc_setup() - set up the IPA microcontroller subsystem
+ * @ipa: IPA pointer
+ */
+void ipa_uc_setup(struct ipa *ipa);
+
+/**
+ * ipa_uc_teardown() - inverse of ipa_uc_setup()
+ * @ipa: IPA pointer
+ */
+void ipa_uc_teardown(struct ipa *ipa);
+
+/**
+ * ipa_uc_panic_notifier()
+ * @ipa: IPA pointer
+ *
+ * Notifier function called when the system crashes, to inform the
+ * microcontroller of the event.
+ */
+void ipa_uc_panic_notifier(struct ipa *ipa);
+
+#endif /* _IPA_UC_H_ */
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
new file mode 100644
index 000000000000..85449df0f512
--- /dev/null
+++ b/drivers/net/ipa/ipa_version.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2020 Linaro Ltd.
+ */
+#ifndef _IPA_VERSION_H_
+#define _IPA_VERSION_H_
+
+/**
+ * enum ipa_version
+ *
+ * Defines the version of IPA (and GSI) hardware present on the platform.
+ * It seems this might be better defined elsewhere, but having it here gets
+ * it where it's needed.
+ */
+enum ipa_version {
+ IPA_VERSION_3_5_1, /* GSI version 1.3.0 */
+ IPA_VERSION_4_0, /* GSI version 2.0 */
+ IPA_VERSION_4_1, /* GSI version 2.1 */
+ IPA_VERSION_4_2, /* GSI version 2.2 */
+};
+
+#endif /* _IPA_VERSION_H_ */
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 92bc2b2df660..da82d7f16a09 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -19,6 +19,7 @@
#include <net/gro_cells.h>
#include <net/macsec.h>
#include <linux/phy.h>
+#include <linux/byteorder/generic.h>
#include <linux/if_arp.h>
#include <uapi/linux/if_macsec.h>
@@ -69,6 +70,16 @@ struct macsec_eth_header {
sc; \
sc = rtnl_dereference(sc->next))
+#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
+
+struct gcm_iv_xpn {
+ union {
+ u8 short_secure_channel_id[4];
+ ssci_t ssci;
+ };
+ __be64 pn;
+} __packed;
+
struct gcm_iv {
union {
u8 secure_channel_id[8];
@@ -77,17 +88,6 @@ struct gcm_iv {
__be32 pn;
};
-struct macsec_dev_stats {
- __u64 OutPktsUntagged;
- __u64 InPktsUntagged;
- __u64 OutPktsTooLong;
- __u64 InPktsNoTag;
- __u64 InPktsBadTag;
- __u64 InPktsUnknownSCI;
- __u64 InPktsNoSCI;
- __u64 InPktsOverrun;
-};
-
#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
struct pcpu_secy_stats {
@@ -230,11 +230,13 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
#define MACSEC_PORT_ES (htons(0x0001))
#define MACSEC_PORT_SCB (0x0000)
#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
+#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
#define MACSEC_GCM_AES_128_SAK_LEN 16
#define MACSEC_GCM_AES_256_SAK_LEN 32
#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
+#define DEFAULT_XPN false
#define DEFAULT_SEND_SCI true
#define DEFAULT_ENCRYPT false
#define DEFAULT_ENCODING_SA 0
@@ -326,7 +328,8 @@ static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
/* Checks if a MACsec interface is being offloaded to an hardware engine */
static bool macsec_is_offloaded(struct macsec_dev *macsec)
{
- if (macsec->offload == MACSEC_OFFLOAD_PHY)
+ if (macsec->offload == MACSEC_OFFLOAD_MAC ||
+ macsec->offload == MACSEC_OFFLOAD_PHY)
return true;
return false;
@@ -342,6 +345,9 @@ static bool macsec_check_offload(enum macsec_offload offload,
if (offload == MACSEC_OFFLOAD_PHY)
return macsec->real_dev->phydev &&
macsec->real_dev->phydev->macsec_ops;
+ else if (offload == MACSEC_OFFLOAD_MAC)
+ return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
+ macsec->real_dev->macsec_ops;
return false;
}
@@ -356,9 +362,14 @@ static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
if (offload == MACSEC_OFFLOAD_PHY)
ctx->phydev = macsec->real_dev->phydev;
+ else if (offload == MACSEC_OFFLOAD_MAC)
+ ctx->netdev = macsec->real_dev;
}
- return macsec->real_dev->phydev->macsec_ops;
+ if (offload == MACSEC_OFFLOAD_PHY)
+ return macsec->real_dev->phydev->macsec_ops;
+ else
+ return macsec->real_dev->macsec_ops;
}
/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
@@ -373,8 +384,8 @@ static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
return __macsec_get_ops(macsec->offload, macsec, ctx);
}
-/* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
-static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
+/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
+static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
{
struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
int len = skb->len - 2 * ETH_ALEN;
@@ -399,8 +410,8 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
if (h->unused)
return false;
- /* rx.pn != 0 (figure 10-5) */
- if (!h->packet_number)
+ /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
+ if (!h->packet_number && !xpn)
return false;
/* length check, f) g) h) i) */
@@ -412,6 +423,15 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
+static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
+ salt_t salt)
+{
+ struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
+
+ gcm_iv->ssci = ssci ^ salt.ssci;
+ gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
+}
+
static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
{
struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
@@ -447,14 +467,19 @@ void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
}
EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
-static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
+static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
+ struct macsec_secy *secy)
{
- u32 pn;
+ pn_t pn;
spin_lock_bh(&tx_sa->lock);
- pn = tx_sa->next_pn;
- tx_sa->next_pn++;
+ pn = tx_sa->next_pn_halves;
+ if (secy->xpn)
+ tx_sa->next_pn++;
+ else
+ tx_sa->next_pn_halves.lower++;
+
if (tx_sa->next_pn == 0)
__macsec_pn_wrapped(secy, tx_sa);
spin_unlock_bh(&tx_sa->lock);
@@ -569,7 +594,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
struct macsec_tx_sa *tx_sa;
struct macsec_dev *macsec = macsec_priv(dev);
bool sci_present;
- u32 pn;
+ pn_t pn;
secy = &macsec->secy;
tx_sc = &secy->tx_sc;
@@ -611,12 +636,12 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
memmove(hh, eth, 2 * ETH_ALEN);
pn = tx_sa_update_pn(tx_sa, secy);
- if (pn == 0) {
+ if (pn.full64 == 0) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOLINK);
}
- macsec_fill_sectag(hh, secy, pn, sci_present);
+ macsec_fill_sectag(hh, secy, pn.lower, sci_present);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
skb_put(skb, secy->icv_len);
@@ -647,7 +672,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
return ERR_PTR(-ENOMEM);
}
- macsec_fill_iv(iv, secy->sci, pn);
+ if (secy->xpn)
+ macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
+ else
+ macsec_fill_iv(iv, secy->sci, pn.lower);
sg_init_table(sg, ret);
ret = skb_to_sgvec(skb, sg, 0, skb->len);
@@ -699,13 +727,14 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u32 lowest_pn = 0;
spin_lock(&rx_sa->lock);
- if (rx_sa->next_pn >= secy->replay_window)
- lowest_pn = rx_sa->next_pn - secy->replay_window;
+ if (rx_sa->next_pn_halves.lower >= secy->replay_window)
+ lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
/* Now perform replay protection check again
* (see IEEE 802.1AE-2006 figure 10-5)
*/
- if (secy->replay_protect && pn < lowest_pn) {
+ if (secy->replay_protect && pn < lowest_pn &&
+ (!secy->xpn || pn_same_half(pn, lowest_pn))) {
spin_unlock(&rx_sa->lock);
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
@@ -754,8 +783,15 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
}
u64_stats_update_end(&rxsc_stats->syncp);
- if (pn >= rx_sa->next_pn)
- rx_sa->next_pn = pn + 1;
+ // Instead of "pn >=" - to support pn overflow in xpn
+ if (pn + 1 > rx_sa->next_pn_halves.lower) {
+ rx_sa->next_pn_halves.lower = pn + 1;
+ } else if (secy->xpn &&
+ !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
+ rx_sa->next_pn_halves.upper++;
+ rx_sa->next_pn_halves.lower = pn + 1;
+ }
+
spin_unlock(&rx_sa->lock);
}
@@ -842,6 +878,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
unsigned char *iv;
struct aead_request *req;
struct macsec_eth_header *hdr;
+ u32 hdr_pn;
u16 icv_len = secy->icv_len;
macsec_skb_cb(skb)->valid = false;
@@ -861,7 +898,21 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
}
hdr = (struct macsec_eth_header *)skb->data;
- macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
+ hdr_pn = ntohl(hdr->packet_number);
+
+ if (secy->xpn) {
+ pn_t recovered_pn = rx_sa->next_pn_halves;
+
+ recovered_pn.lower = hdr_pn;
+ if (hdr_pn < rx_sa->next_pn_halves.lower &&
+ !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
+ recovered_pn.upper++;
+
+ macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
+ rx_sa->key.salt);
+ } else {
+ macsec_fill_iv(iv, sci, hdr_pn);
+ }
sg_init_table(sg, ret);
ret = skb_to_sgvec(skb, sg, 0, skb->len);
@@ -944,22 +995,53 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
{
/* Deliver to the uncontrolled port by default */
enum rx_handler_result ret = RX_HANDLER_PASS;
+ struct ethhdr *hdr = eth_hdr(skb);
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
- /* 10.6 If the management control validateFrames is not
- * Strict, frames without a SecTAG are received, counted, and
- * delivered to the Controlled Port
- */
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
+ struct net_device *ndev = macsec->secy.netdev;
- if (!macsec_is_offloaded(macsec) &&
- macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
+ /* If h/w offloading is enabled, HW decodes frames and strips
+ * the SecTAG, so we have to deduce which port to deliver to.
+ */
+ if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
+ if (ether_addr_equal_64bits(hdr->h_dest,
+ ndev->dev_addr)) {
+ /* exact match, divert skb to this port */
+ skb->dev = ndev;
+ skb->pkt_type = PACKET_HOST;
+ ret = RX_HANDLER_ANOTHER;
+ goto out;
+ } else if (is_multicast_ether_addr_64bits(
+ hdr->h_dest)) {
+ /* multicast frame, deliver on this port too */
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ break;
+
+ nskb->dev = ndev;
+ if (ether_addr_equal_64bits(hdr->h_dest,
+ ndev->broadcast))
+ nskb->pkt_type = PACKET_BROADCAST;
+ else
+ nskb->pkt_type = PACKET_MULTICAST;
+
+ netif_rx(nskb);
+ }
+ continue;
+ }
+
+ /* 10.6 If the management control validateFrames is not
+ * Strict, frames without a SecTAG are received, counted, and
+ * delivered to the Controlled Port
+ */
+ if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
@@ -971,19 +1053,13 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
if (!nskb)
break;
- nskb->dev = macsec->secy.netdev;
+ nskb->dev = ndev;
if (netif_rx(nskb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
}
-
- if (netif_running(macsec->secy.netdev) &&
- macsec_is_offloaded(macsec)) {
- ret = RX_HANDLER_EXACT;
- goto out;
- }
}
out:
@@ -1002,7 +1078,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
sci_t sci;
- u32 pn;
+ u32 hdr_pn;
bool cbit;
struct pcpu_rx_sc_stats *rxsc_stats;
struct pcpu_secy_stats *secy_stats;
@@ -1073,7 +1149,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
secy_stats = this_cpu_ptr(macsec->stats);
rxsc_stats = this_cpu_ptr(rx_sc->stats);
- if (!macsec_validate_skb(skb, secy->icv_len)) {
+ if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsBadTag++;
u64_stats_update_end(&secy_stats->syncp);
@@ -1105,13 +1181,16 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
}
/* First, PN check to avoid decrypting obviously wrong packets */
- pn = ntohl(hdr->packet_number);
+ hdr_pn = ntohl(hdr->packet_number);
if (secy->replay_protect) {
bool late;
spin_lock(&rx_sa->lock);
- late = rx_sa->next_pn >= secy->replay_window &&
- pn < (rx_sa->next_pn - secy->replay_window);
+ late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
+ hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
+
+ if (secy->xpn)
+ late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
spin_unlock(&rx_sa->lock);
if (late) {
@@ -1140,7 +1219,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_CONSUMED;
}
- if (!macsec_post_decrypt(skb, secy, pn))
+ if (!macsec_post_decrypt(skb, secy, hdr_pn))
goto drop;
deliver:
@@ -1258,6 +1337,7 @@ static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
return PTR_ERR(rx_sa->key.tfm);
}
+ rx_sa->ssci = MACSEC_UNDEF_SSCI;
rx_sa->active = false;
rx_sa->next_pn = 1;
refcount_set(&rx_sa->refcnt, 1);
@@ -1356,6 +1436,7 @@ static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
return PTR_ERR(tx_sa->key.tfm);
}
+ tx_sa->ssci = MACSEC_UNDEF_SSCI;
tx_sa->active = false;
refcount_set(&tx_sa->refcnt, 1);
spin_lock_init(&tx_sa->lock);
@@ -1388,6 +1469,11 @@ static struct net_device *get_dev_from_nl(struct net *net,
return dev;
}
+static enum macsec_offload nla_get_offload(const struct nlattr *nla)
+{
+ return (__force enum macsec_offload)nla_get_u8(nla);
+}
+
static sci_t nla_get_sci(const struct nlattr *nla)
{
return (__force sci_t)nla_get_u64(nla);
@@ -1399,6 +1485,16 @@ static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
}
+static ssci_t nla_get_ssci(const struct nlattr *nla)
+{
+ return (__force ssci_t)nla_get_u32(nla);
+}
+
+static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
+{
+ return nla_put_u32(skb, attrtype, (__force u64)value);
+}
+
static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
struct nlattr **attrs,
struct nlattr **tb_sa,
@@ -1514,11 +1610,14 @@ static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
- [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 },
+ [MACSEC_SA_ATTR_PN] = { .type = NLA_MIN_LEN, .len = 4 },
[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
.len = MACSEC_KEYID_LEN, },
[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
.len = MACSEC_MAX_KEY_LEN, },
+ [MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
+ [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
+ .len = MACSEC_SALT_LEN, },
};
static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
@@ -1591,7 +1690,8 @@ static bool validate_add_rxsa(struct nlattr **attrs)
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
return false;
- if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ if (attrs[MACSEC_SA_ATTR_PN] &&
+ *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)
return false;
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -1613,6 +1713,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
struct macsec_rx_sc *rx_sc;
struct macsec_rx_sa *rx_sa;
unsigned char assoc_num;
+ int pn_len;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
int err;
@@ -1645,6 +1746,29 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
+ pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
+ if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
+ pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
+ nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ if (secy->xpn) {
+ if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
+ pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
+ nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
+ MACSEC_SA_ATTR_SALT);
+ rtnl_unlock();
+ return -EINVAL;
+ }
+ }
+
rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
if (rx_sa) {
rtnl_unlock();
@@ -1667,7 +1791,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&rx_sa->lock);
- rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock);
}
@@ -1689,6 +1813,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.rx_sa = rx_sa;
+ ctx.secy = secy;
memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
MACSEC_KEYID_LEN);
@@ -1697,6 +1822,12 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
goto cleanup;
}
+ if (secy->xpn) {
+ rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
+ nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
+ MACSEC_SALT_LEN);
+ }
+
nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
@@ -1730,6 +1861,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
struct nlattr **attrs = info->attrs;
struct macsec_rx_sc *rx_sc;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ struct macsec_secy *secy;
bool was_active;
int ret;
@@ -1749,6 +1881,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(dev);
}
+ secy = &macsec_priv(dev)->secy;
sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
rx_sc = create_rx_sc(dev, sci);
@@ -1772,6 +1905,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
}
ctx.rx_sc = rx_sc;
+ ctx.secy = secy;
ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
if (ret)
@@ -1821,6 +1955,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
struct macsec_tx_sc *tx_sc;
struct macsec_tx_sa *tx_sa;
unsigned char assoc_num;
+ int pn_len;
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
bool was_operational;
int err;
@@ -1853,6 +1988,29 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
+ pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
+ if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
+ pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
+ nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ if (secy->xpn) {
+ if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
+ pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
+ nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
+ MACSEC_SA_ATTR_SALT);
+ rtnl_unlock();
+ return -EINVAL;
+ }
+ }
+
tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
if (tx_sa) {
rtnl_unlock();
@@ -1874,7 +2032,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
}
spin_lock_bh(&tx_sa->lock);
- tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
@@ -1897,6 +2055,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.tx_sa = tx_sa;
+ ctx.secy = secy;
memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
MACSEC_KEYID_LEN);
@@ -1905,6 +2064,12 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
goto cleanup;
}
+ if (secy->xpn) {
+ tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
+ nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
+ MACSEC_SALT_LEN);
+ }
+
nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
@@ -1966,6 +2131,7 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.rx_sa = rx_sa;
+ ctx.secy = secy;
ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
if (ret)
@@ -2031,6 +2197,7 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
}
ctx.rx_sc = rx_sc;
+ ctx.secy = secy;
ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
if (ret)
goto cleanup;
@@ -2089,6 +2256,7 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.tx_sa = tx_sa;
+ ctx.secy = secy;
ret = macsec_offload(ops->mdo_del_txsa, &ctx);
if (ret)
@@ -2111,7 +2279,9 @@ static bool validate_upd_sa(struct nlattr **attrs)
{
if (!attrs[MACSEC_SA_ATTR_AN] ||
attrs[MACSEC_SA_ATTR_KEY] ||
- attrs[MACSEC_SA_ATTR_KEYID])
+ attrs[MACSEC_SA_ATTR_KEYID] ||
+ attrs[MACSEC_SA_ATTR_SSCI] ||
+ attrs[MACSEC_SA_ATTR_SALT])
return false;
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
@@ -2138,9 +2308,11 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
u8 assoc_num;
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
bool was_operational, was_active;
- u32 prev_pn = 0;
+ pn_t prev_pn;
int ret = 0;
+ prev_pn.full64 = 0;
+
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -2159,9 +2331,19 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
}
if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ int pn_len;
+
+ pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
+ if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
+ pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
+ nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
spin_lock_bh(&tx_sa->lock);
- prev_pn = tx_sa->next_pn;
- tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ prev_pn = tx_sa->next_pn_halves;
+ tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
}
@@ -2186,6 +2368,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.tx_sa = tx_sa;
+ ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
if (ret)
@@ -2199,7 +2382,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
cleanup:
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&tx_sa->lock);
- tx_sa->next_pn = prev_pn;
+ tx_sa->next_pn_halves = prev_pn;
spin_unlock_bh(&tx_sa->lock);
}
tx_sa->active = was_active;
@@ -2219,9 +2402,11 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
bool was_active;
- u32 prev_pn = 0;
+ pn_t prev_pn;
int ret = 0;
+ prev_pn.full64 = 0;
+
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -2243,9 +2428,19 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
}
if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ int pn_len;
+
+ pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
+ if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
+ pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
+ nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
spin_lock_bh(&rx_sa->lock);
- prev_pn = rx_sa->next_pn;
- rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ prev_pn = rx_sa->next_pn_halves;
+ rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock);
}
@@ -2266,6 +2461,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
ctx.sa.assoc_num = assoc_num;
ctx.sa.rx_sa = rx_sa;
+ ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
if (ret)
@@ -2278,7 +2474,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
cleanup:
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&rx_sa->lock);
- rx_sa->next_pn = prev_pn;
+ rx_sa->next_pn_halves = prev_pn;
spin_unlock_bh(&rx_sa->lock);
}
rx_sa->active = was_active;
@@ -2336,6 +2532,7 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
}
ctx.rx_sc = rx_sc;
+ ctx.secy = secy;
ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
if (ret)
@@ -2375,11 +2572,10 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
enum macsec_offload offload, prev_offload;
int (*func)(struct macsec_context *ctx);
struct nlattr **attrs = info->attrs;
- struct net_device *dev, *loop_dev;
+ struct net_device *dev;
const struct macsec_ops *ops;
struct macsec_context ctx;
struct macsec_dev *macsec;
- struct net *loop_net;
int ret;
if (!attrs[MACSEC_ATTR_IFINDEX])
@@ -2407,28 +2603,6 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
!macsec_check_offload(offload, macsec))
return -EOPNOTSUPP;
- if (offload == MACSEC_OFFLOAD_OFF)
- goto skip_limitation;
-
- /* Check the physical interface isn't offloading another interface
- * first.
- */
- for_each_net(loop_net) {
- for_each_netdev(loop_net, loop_dev) {
- struct macsec_dev *priv;
-
- if (!netif_is_macsec(loop_dev))
- continue;
-
- priv = macsec_priv(loop_dev);
-
- if (priv->real_dev == macsec->real_dev &&
- priv->offload != MACSEC_OFFLOAD_OFF)
- return -EBUSY;
- }
- }
-
-skip_limitation:
/* Check if the net device is busy. */
if (netif_running(dev))
return -EBUSY;
@@ -2464,6 +2638,10 @@ skip_limitation:
goto rollback;
rtnl_unlock();
+ /* Force features update, since they are different for SW MACSec and
+ * HW offloading cases.
+ */
+ netdev_update_features(dev);
return 0;
rollback:
@@ -2473,207 +2651,309 @@ rollback:
return ret;
}
-static int copy_tx_sa_stats(struct sk_buff *skb,
- struct macsec_tx_sa_stats __percpu *pstats)
+static void get_tx_sa_stats(struct net_device *dev, int an,
+ struct macsec_tx_sa *tx_sa,
+ struct macsec_tx_sa_stats *sum)
{
- struct macsec_tx_sa_stats sum = {0, };
+ struct macsec_dev *macsec = macsec_priv(dev);
int cpu;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ if (ops) {
+ ctx.sa.assoc_num = an;
+ ctx.sa.tx_sa = tx_sa;
+ ctx.stats.tx_sa_stats = sum;
+ ctx.secy = &macsec_priv(dev)->secy;
+ macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
+ }
+ return;
+ }
+
for_each_possible_cpu(cpu) {
- const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
+ const struct macsec_tx_sa_stats *stats =
+ per_cpu_ptr(tx_sa->stats, cpu);
- sum.OutPktsProtected += stats->OutPktsProtected;
- sum.OutPktsEncrypted += stats->OutPktsEncrypted;
+ sum->OutPktsProtected += stats->OutPktsProtected;
+ sum->OutPktsEncrypted += stats->OutPktsEncrypted;
}
+}
- if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
- nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted))
+static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
+{
+ if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
+ sum->OutPktsProtected) ||
+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
+ sum->OutPktsEncrypted))
return -EMSGSIZE;
return 0;
}
-static noinline_for_stack int
-copy_rx_sa_stats(struct sk_buff *skb,
- struct macsec_rx_sa_stats __percpu *pstats)
+static void get_rx_sa_stats(struct net_device *dev,
+ struct macsec_rx_sc *rx_sc, int an,
+ struct macsec_rx_sa *rx_sa,
+ struct macsec_rx_sa_stats *sum)
{
- struct macsec_rx_sa_stats sum = {0, };
+ struct macsec_dev *macsec = macsec_priv(dev);
int cpu;
- for_each_possible_cpu(cpu) {
- const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ if (ops) {
+ ctx.sa.assoc_num = an;
+ ctx.sa.rx_sa = rx_sa;
+ ctx.stats.rx_sa_stats = sum;
+ ctx.secy = &macsec_priv(dev)->secy;
+ ctx.rx_sc = rx_sc;
+ macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
+ }
+ return;
+ }
- sum.InPktsOK += stats->InPktsOK;
- sum.InPktsInvalid += stats->InPktsInvalid;
- sum.InPktsNotValid += stats->InPktsNotValid;
- sum.InPktsNotUsingSA += stats->InPktsNotUsingSA;
- sum.InPktsUnusedSA += stats->InPktsUnusedSA;
+ for_each_possible_cpu(cpu) {
+ const struct macsec_rx_sa_stats *stats =
+ per_cpu_ptr(rx_sa->stats, cpu);
+
+ sum->InPktsOK += stats->InPktsOK;
+ sum->InPktsInvalid += stats->InPktsInvalid;
+ sum->InPktsNotValid += stats->InPktsNotValid;
+ sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
+ sum->InPktsUnusedSA += stats->InPktsUnusedSA;
}
+}
- if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
- nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
- nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
- nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
- nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
+static int copy_rx_sa_stats(struct sk_buff *skb,
+ struct macsec_rx_sa_stats *sum)
+{
+ if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
+ sum->InPktsInvalid) ||
+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
+ sum->InPktsNotValid) ||
+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
+ sum->InPktsNotUsingSA) ||
+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
+ sum->InPktsUnusedSA))
return -EMSGSIZE;
return 0;
}
-static noinline_for_stack int
-copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats)
+static void get_rx_sc_stats(struct net_device *dev,
+ struct macsec_rx_sc *rx_sc,
+ struct macsec_rx_sc_stats *sum)
{
- struct macsec_rx_sc_stats sum = {0, };
+ struct macsec_dev *macsec = macsec_priv(dev);
int cpu;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ if (ops) {
+ ctx.stats.rx_sc_stats = sum;
+ ctx.secy = &macsec_priv(dev)->secy;
+ ctx.rx_sc = rx_sc;
+ macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
+ }
+ return;
+ }
+
for_each_possible_cpu(cpu) {
const struct pcpu_rx_sc_stats *stats;
struct macsec_rx_sc_stats tmp;
unsigned int start;
- stats = per_cpu_ptr(pstats, cpu);
+ stats = per_cpu_ptr(rx_sc->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
- sum.InOctetsValidated += tmp.InOctetsValidated;
- sum.InOctetsDecrypted += tmp.InOctetsDecrypted;
- sum.InPktsUnchecked += tmp.InPktsUnchecked;
- sum.InPktsDelayed += tmp.InPktsDelayed;
- sum.InPktsOK += tmp.InPktsOK;
- sum.InPktsInvalid += tmp.InPktsInvalid;
- sum.InPktsLate += tmp.InPktsLate;
- sum.InPktsNotValid += tmp.InPktsNotValid;
- sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA;
- sum.InPktsUnusedSA += tmp.InPktsUnusedSA;
+ sum->InOctetsValidated += tmp.InOctetsValidated;
+ sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
+ sum->InPktsUnchecked += tmp.InPktsUnchecked;
+ sum->InPktsDelayed += tmp.InPktsDelayed;
+ sum->InPktsOK += tmp.InPktsOK;
+ sum->InPktsInvalid += tmp.InPktsInvalid;
+ sum->InPktsLate += tmp.InPktsLate;
+ sum->InPktsNotValid += tmp.InPktsNotValid;
+ sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA;
+ sum->InPktsUnusedSA += tmp.InPktsUnusedSA;
}
+}
+static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
+{
if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
- sum.InOctetsValidated,
+ sum->InOctetsValidated,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
- sum.InOctetsDecrypted,
+ sum->InOctetsDecrypted,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
- sum.InPktsUnchecked,
+ sum->InPktsUnchecked,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
- sum.InPktsDelayed,
+ sum->InPktsDelayed,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
- sum.InPktsOK,
+ sum->InPktsOK,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
- sum.InPktsInvalid,
+ sum->InPktsInvalid,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
- sum.InPktsLate,
+ sum->InPktsLate,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
- sum.InPktsNotValid,
+ sum->InPktsNotValid,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
- sum.InPktsNotUsingSA,
+ sum->InPktsNotUsingSA,
MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
- sum.InPktsUnusedSA,
+ sum->InPktsUnusedSA,
MACSEC_RXSC_STATS_ATTR_PAD))
return -EMSGSIZE;
return 0;
}
-static noinline_for_stack int
-copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats)
+static void get_tx_sc_stats(struct net_device *dev,
+ struct macsec_tx_sc_stats *sum)
{
- struct macsec_tx_sc_stats sum = {0, };
+ struct macsec_dev *macsec = macsec_priv(dev);
int cpu;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ if (ops) {
+ ctx.stats.tx_sc_stats = sum;
+ ctx.secy = &macsec_priv(dev)->secy;
+ macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
+ }
+ return;
+ }
+
for_each_possible_cpu(cpu) {
const struct pcpu_tx_sc_stats *stats;
struct macsec_tx_sc_stats tmp;
unsigned int start;
- stats = per_cpu_ptr(pstats, cpu);
+ stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
- sum.OutPktsProtected += tmp.OutPktsProtected;
- sum.OutPktsEncrypted += tmp.OutPktsEncrypted;
- sum.OutOctetsProtected += tmp.OutOctetsProtected;
- sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
+ sum->OutPktsProtected += tmp.OutPktsProtected;
+ sum->OutPktsEncrypted += tmp.OutPktsEncrypted;
+ sum->OutOctetsProtected += tmp.OutOctetsProtected;
+ sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
}
+}
+static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
+{
if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
- sum.OutPktsProtected,
+ sum->OutPktsProtected,
MACSEC_TXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
- sum.OutPktsEncrypted,
+ sum->OutPktsEncrypted,
MACSEC_TXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
- sum.OutOctetsProtected,
+ sum->OutOctetsProtected,
MACSEC_TXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
- sum.OutOctetsEncrypted,
+ sum->OutOctetsEncrypted,
MACSEC_TXSC_STATS_ATTR_PAD))
return -EMSGSIZE;
return 0;
}
-static noinline_for_stack int
-copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats)
+static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
{
- struct macsec_dev_stats sum = {0, };
+ struct macsec_dev *macsec = macsec_priv(dev);
int cpu;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ if (ops) {
+ ctx.stats.dev_stats = sum;
+ ctx.secy = &macsec_priv(dev)->secy;
+ macsec_offload(ops->mdo_get_dev_stats, &ctx);
+ }
+ return;
+ }
+
for_each_possible_cpu(cpu) {
const struct pcpu_secy_stats *stats;
struct macsec_dev_stats tmp;
unsigned int start;
- stats = per_cpu_ptr(pstats, cpu);
+ stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
- sum.OutPktsUntagged += tmp.OutPktsUntagged;
- sum.InPktsUntagged += tmp.InPktsUntagged;
- sum.OutPktsTooLong += tmp.OutPktsTooLong;
- sum.InPktsNoTag += tmp.InPktsNoTag;
- sum.InPktsBadTag += tmp.InPktsBadTag;
- sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI;
- sum.InPktsNoSCI += tmp.InPktsNoSCI;
- sum.InPktsOverrun += tmp.InPktsOverrun;
+ sum->OutPktsUntagged += tmp.OutPktsUntagged;
+ sum->InPktsUntagged += tmp.InPktsUntagged;
+ sum->OutPktsTooLong += tmp.OutPktsTooLong;
+ sum->InPktsNoTag += tmp.InPktsNoTag;
+ sum->InPktsBadTag += tmp.InPktsBadTag;
+ sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
+ sum->InPktsNoSCI += tmp.InPktsNoSCI;
+ sum->InPktsOverrun += tmp.InPktsOverrun;
}
+}
+static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
+{
if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
- sum.OutPktsUntagged,
+ sum->OutPktsUntagged,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
- sum.InPktsUntagged,
+ sum->InPktsUntagged,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
- sum.OutPktsTooLong,
+ sum->OutPktsTooLong,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
- sum.InPktsNoTag,
+ sum->InPktsNoTag,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
- sum.InPktsBadTag,
+ sum->InPktsBadTag,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
- sum.InPktsUnknownSCI,
+ sum->InPktsUnknownSCI,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
- sum.InPktsNoSCI,
+ sum->InPktsNoSCI,
MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
- sum.InPktsOverrun,
+ sum->InPktsOverrun,
MACSEC_SECY_STATS_ATTR_PAD))
return -EMSGSIZE;
@@ -2692,10 +2972,10 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
switch (secy->key_len) {
case MACSEC_GCM_AES_128_SAK_LEN:
- csid = MACSEC_DEFAULT_CIPHER_ID;
+ csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
break;
case MACSEC_GCM_AES_256_SAK_LEN:
- csid = MACSEC_CIPHER_ID_GCM_AES_256;
+ csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
break;
default:
goto cancel;
@@ -2734,7 +3014,12 @@ static noinline_for_stack int
dump_secy(struct macsec_secy *secy, struct net_device *dev,
struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct macsec_tx_sc_stats tx_sc_stats = {0, };
+ struct macsec_tx_sa_stats tx_sa_stats = {0, };
+ struct macsec_rx_sc_stats rx_sc_stats = {0, };
+ struct macsec_rx_sa_stats rx_sa_stats = {0, };
struct macsec_dev *macsec = netdev_priv(dev);
+ struct macsec_dev_stats dev_stats = {0, };
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
struct nlattr *txsa_list, *rxsc_list;
struct macsec_rx_sc *rx_sc;
@@ -2765,7 +3050,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
if (!attr)
goto nla_put_failure;
- if (copy_tx_sc_stats(skb, tx_sc->stats)) {
+
+ get_tx_sc_stats(dev, &tx_sc_stats);
+ if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
nla_nest_cancel(skb, attr);
goto nla_put_failure;
}
@@ -2774,7 +3061,8 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
if (!attr)
goto nla_put_failure;
- if (copy_secy_stats(skb, macsec_priv(dev)->stats)) {
+ get_secy_stats(dev, &dev_stats);
+ if (copy_secy_stats(skb, &dev_stats)) {
nla_nest_cancel(skb, attr);
goto nla_put_failure;
}
@@ -2786,6 +3074,8 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
struct nlattr *txsa_nest;
+ u64 pn;
+ int pn_len;
if (!tx_sa)
continue;
@@ -2796,22 +3086,15 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
goto nla_put_failure;
}
- if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
- nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
- nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
- nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
- nla_nest_cancel(skb, txsa_nest);
- nla_nest_cancel(skb, txsa_list);
- goto nla_put_failure;
- }
-
attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
if (!attr) {
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
goto nla_put_failure;
}
- if (copy_tx_sa_stats(skb, tx_sa->stats)) {
+ memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
+ get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
+ if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
@@ -2819,6 +3102,24 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
}
nla_nest_end(skb, attr);
+ if (secy->xpn) {
+ pn = tx_sa->next_pn;
+ pn_len = MACSEC_XPN_PN_LEN;
+ } else {
+ pn = tx_sa->next_pn_halves.lower;
+ pn_len = MACSEC_DEFAULT_PN_LEN;
+ }
+
+ if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
+ nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
+ nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
+ (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
+ nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
+ nla_nest_cancel(skb, txsa_nest);
+ nla_nest_cancel(skb, txsa_list);
+ goto nla_put_failure;
+ }
+
nla_nest_end(skb, txsa_nest);
}
nla_nest_end(skb, txsa_list);
@@ -2852,7 +3153,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
nla_nest_cancel(skb, rxsc_list);
goto nla_put_failure;
}
- if (copy_rx_sc_stats(skb, rx_sc->stats)) {
+ memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
+ get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
+ if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, rxsc_nest);
nla_nest_cancel(skb, rxsc_list);
@@ -2871,6 +3174,8 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
struct nlattr *rxsa_nest;
+ u64 pn;
+ int pn_len;
if (!rx_sa)
continue;
@@ -2891,7 +3196,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
nla_nest_cancel(skb, rxsc_list);
goto nla_put_failure;
}
- if (copy_rx_sa_stats(skb, rx_sa->stats)) {
+ memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
+ get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
+ if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, rxsa_list);
nla_nest_cancel(skb, rxsc_nest);
@@ -2900,9 +3207,18 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
}
nla_nest_end(skb, attr);
+ if (secy->xpn) {
+ pn = rx_sa->next_pn;
+ pn_len = MACSEC_XPN_PN_LEN;
+ } else {
+ pn = rx_sa->next_pn_halves.lower;
+ pn_len = MACSEC_DEFAULT_PN_LEN;
+ }
+
if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
- nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
+ nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
+ (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
nla_nest_cancel(skb, rxsa_nest);
nla_nest_cancel(skb, rxsc_nest);
@@ -3092,9 +3408,16 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
return ret;
}
-#define MACSEC_FEATURES \
+#define SW_MACSEC_FEATURES \
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
+/* If h/w offloading is enabled, use real device features save for
+ * VLAN_FEATURES - they require additional ops
+ * HW_MACSEC - no reason to report it
+ */
+#define REAL_DEV_FEATURES(dev) \
+ ((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
+
static int macsec_dev_init(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
@@ -3111,8 +3434,12 @@ static int macsec_dev_init(struct net_device *dev)
return err;
}
- dev->features = real_dev->features & MACSEC_FEATURES;
- dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
+ if (macsec_is_offloaded(macsec)) {
+ dev->features = REAL_DEV_FEATURES(real_dev);
+ } else {
+ dev->features = real_dev->features & SW_MACSEC_FEATURES;
+ dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
+ }
dev->needed_headroom = real_dev->needed_headroom +
MACSEC_NEEDED_HEADROOM;
@@ -3141,7 +3468,10 @@ static netdev_features_t macsec_fix_features(struct net_device *dev,
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
- features &= (real_dev->features & MACSEC_FEATURES) |
+ if (macsec_is_offloaded(macsec))
+ return REAL_DEV_FEATURES(real_dev);
+
+ features &= (real_dev->features & SW_MACSEC_FEATURES) |
NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
features |= NETIF_F_LLTX;
@@ -3181,6 +3511,7 @@ static int macsec_dev_open(struct net_device *dev)
goto clear_allmulti;
}
+ ctx.secy = &macsec->secy;
err = macsec_offload(ops->mdo_dev_open, &ctx);
if (err)
goto clear_allmulti;
@@ -3212,8 +3543,10 @@ static int macsec_dev_stop(struct net_device *dev)
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
- if (ops)
+ if (ops) {
+ ctx.secy = &macsec->secy;
macsec_offload(ops->mdo_dev_stop, &ctx);
+ }
}
dev_mc_unsync(real_dev, dev);
@@ -3446,9 +3779,19 @@ static int macsec_changelink_common(struct net_device *dev,
case MACSEC_CIPHER_ID_GCM_AES_128:
case MACSEC_DEFAULT_CIPHER_ID:
secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
+ secy->xpn = false;
break;
case MACSEC_CIPHER_ID_GCM_AES_256:
secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
+ secy->xpn = false;
+ break;
+ case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
+ secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
+ secy->xpn = true;
+ break;
+ case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
+ secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
+ secy->xpn = true;
break;
default:
return -EINVAL;
@@ -3638,6 +3981,7 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
secy->protect_frames = true;
secy->replay_protect = false;
+ secy->xpn = DEFAULT_XPN;
secy->sci = sci;
secy->tx_sc.active = true;
@@ -3673,8 +4017,16 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
macsec->real_dev = real_dev;
- /* MACsec offloading is off by default */
- macsec->offload = MACSEC_OFFLOAD_OFF;
+ if (data && data[IFLA_MACSEC_OFFLOAD])
+ macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
+ else
+ /* MACsec offloading is off by default */
+ macsec->offload = MACSEC_OFFLOAD_OFF;
+
+ /* Check if the offloading mode is supported by the underlying layers */
+ if (macsec->offload != MACSEC_OFFLOAD_OFF &&
+ !macsec_check_offload(macsec->offload, macsec))
+ return -EOPNOTSUPP;
if (data && data[IFLA_MACSEC_ICV_LEN])
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
@@ -3717,6 +4069,20 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
goto del_dev;
}
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ if (ops) {
+ ctx.secy = &macsec->secy;
+ err = macsec_offload(ops->mdo_add_secy, &ctx);
+ if (err)
+ goto del_dev;
+ }
+ }
+
err = register_macsec_dev(real_dev, dev);
if (err < 0)
goto del_dev;
@@ -3769,6 +4135,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
switch (csid) {
case MACSEC_CIPHER_ID_GCM_AES_128:
case MACSEC_CIPHER_ID_GCM_AES_256:
+ case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
+ case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
case MACSEC_DEFAULT_CIPHER_ID:
if (icv_len < MACSEC_MIN_ICV_LEN ||
icv_len > MACSEC_STD_ICV_LEN)
@@ -3842,10 +4210,10 @@ static int macsec_fill_info(struct sk_buff *skb,
switch (secy->key_len) {
case MACSEC_GCM_AES_128_SAK_LEN:
- csid = MACSEC_DEFAULT_CIPHER_ID;
+ csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
break;
case MACSEC_GCM_AES_256_SAK_LEN:
- csid = MACSEC_CIPHER_ID_GCM_AES_256;
+ csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
break;
default:
goto nla_put_failure;
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index d7706a0346f2..68668a22b9dd 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -28,6 +28,7 @@
#include <linux/workqueue.h>
#include <net/devlink.h>
#include <net/ip.h>
+#include <net/flow_offload.h>
#include <uapi/linux/devlink.h>
#include <uapi/linux/ip.h>
#include <uapi/linux/udp.h>
@@ -38,24 +39,48 @@ static struct dentry *nsim_dev_ddir;
#define NSIM_DEV_DUMMY_REGION_SIZE (1024 * 32)
+static int
+nsim_dev_take_snapshot(struct devlink *devlink, struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ void *dummy_data;
+
+ dummy_data = kmalloc(NSIM_DEV_DUMMY_REGION_SIZE, GFP_KERNEL);
+ if (!dummy_data)
+ return -ENOMEM;
+
+ get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE);
+
+ *data = dummy_data;
+
+ return 0;
+}
+
static ssize_t nsim_dev_take_snapshot_write(struct file *file,
const char __user *data,
size_t count, loff_t *ppos)
{
struct nsim_dev *nsim_dev = file->private_data;
- void *dummy_data;
+ struct devlink *devlink;
+ u8 *dummy_data;
int err;
u32 id;
- dummy_data = kmalloc(NSIM_DEV_DUMMY_REGION_SIZE, GFP_KERNEL);
- if (!dummy_data)
- return -ENOMEM;
+ devlink = priv_to_devlink(nsim_dev);
- get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE);
+ err = nsim_dev_take_snapshot(devlink, NULL, &dummy_data);
+ if (err)
+ return err;
- id = devlink_region_snapshot_id_get(priv_to_devlink(nsim_dev));
+ err = devlink_region_snapshot_id_get(devlink, &id);
+ if (err) {
+ pr_err("Failed to get snapshot id\n");
+ kfree(dummy_data);
+ return err;
+ }
err = devlink_region_snapshot_create(nsim_dev->dummy_region,
- dummy_data, id, kfree);
+ dummy_data, id);
+ devlink_region_snapshot_id_put(devlink, id);
if (err) {
pr_err("Failed to create region snapshot\n");
kfree(dummy_data);
@@ -71,6 +96,98 @@ static const struct file_operations nsim_dev_take_snapshot_fops = {
.llseek = generic_file_llseek,
};
+static ssize_t nsim_dev_trap_fa_cookie_read(struct file *file,
+ char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev *nsim_dev = file->private_data;
+ struct flow_action_cookie *fa_cookie;
+ unsigned int buf_len;
+ ssize_t ret;
+ char *buf;
+
+ spin_lock(&nsim_dev->fa_cookie_lock);
+ fa_cookie = nsim_dev->fa_cookie;
+ if (!fa_cookie) {
+ ret = -EINVAL;
+ goto errout;
+ }
+ buf_len = fa_cookie->cookie_len * 2;
+ buf = kmalloc(buf_len, GFP_ATOMIC);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto errout;
+ }
+ bin2hex(buf, fa_cookie->cookie, fa_cookie->cookie_len);
+ spin_unlock(&nsim_dev->fa_cookie_lock);
+
+ ret = simple_read_from_buffer(data, count, ppos, buf, buf_len);
+
+ kfree(buf);
+ return ret;
+
+errout:
+ spin_unlock(&nsim_dev->fa_cookie_lock);
+ return ret;
+}
+
+static ssize_t nsim_dev_trap_fa_cookie_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev *nsim_dev = file->private_data;
+ struct flow_action_cookie *fa_cookie;
+ size_t cookie_len;
+ ssize_t ret;
+ char *buf;
+
+ if (*ppos != 0)
+ return -EINVAL;
+ cookie_len = (count - 1) / 2;
+ if ((count - 1) % 2)
+ return -EINVAL;
+ buf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = simple_write_to_buffer(buf, count, ppos, data, count);
+ if (ret < 0)
+ goto free_buf;
+
+ fa_cookie = kmalloc(sizeof(*fa_cookie) + cookie_len,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!fa_cookie) {
+ ret = -ENOMEM;
+ goto free_buf;
+ }
+
+ fa_cookie->cookie_len = cookie_len;
+ ret = hex2bin(fa_cookie->cookie, buf, cookie_len);
+ if (ret)
+ goto free_fa_cookie;
+ kfree(buf);
+
+ spin_lock(&nsim_dev->fa_cookie_lock);
+ kfree(nsim_dev->fa_cookie);
+ nsim_dev->fa_cookie = fa_cookie;
+ spin_unlock(&nsim_dev->fa_cookie_lock);
+
+ return count;
+
+free_fa_cookie:
+ kfree(fa_cookie);
+free_buf:
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations nsim_dev_trap_fa_cookie_fops = {
+ .open = simple_open,
+ .read = nsim_dev_trap_fa_cookie_read,
+ .write = nsim_dev_trap_fa_cookie_write,
+ .llseek = generic_file_llseek,
+};
+
static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
{
char dev_ddir_name[sizeof(DRV_NAME) + 10];
@@ -97,6 +214,17 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
&nsim_dev->dont_allow_reload);
debugfs_create_bool("fail_reload", 0600, nsim_dev->ddir,
&nsim_dev->fail_reload);
+ debugfs_create_file("trap_flow_action_cookie", 0600, nsim_dev->ddir,
+ nsim_dev, &nsim_dev_trap_fa_cookie_fops);
+ debugfs_create_bool("fail_trap_group_set", 0600,
+ nsim_dev->ddir,
+ &nsim_dev->fail_trap_group_set);
+ debugfs_create_bool("fail_trap_policer_set", 0600,
+ nsim_dev->ddir,
+ &nsim_dev->fail_trap_policer_set);
+ debugfs_create_bool("fail_trap_policer_counter_get", 0600,
+ nsim_dev->ddir,
+ &nsim_dev->fail_trap_policer_counter_get);
return 0;
}
@@ -245,11 +373,17 @@ static void nsim_devlink_param_load_driverinit_values(struct devlink *devlink)
#define NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX 16
+static const struct devlink_region_ops dummy_region_ops = {
+ .name = "dummy",
+ .destructor = &kfree,
+ .snapshot = nsim_dev_take_snapshot,
+};
+
static int nsim_dev_dummy_region_init(struct nsim_dev *nsim_dev,
struct devlink *devlink)
{
nsim_dev->dummy_region =
- devlink_region_create(devlink, "dummy",
+ devlink_region_create(devlink, &dummy_region_ops,
NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX,
NSIM_DEV_DUMMY_REGION_SIZE);
return PTR_ERR_OR_ZERO(nsim_dev->dummy_region);
@@ -268,6 +402,7 @@ struct nsim_trap_item {
struct nsim_trap_data {
struct delayed_work trap_report_dw;
struct nsim_trap_item *trap_items_arr;
+ u64 *trap_policers_cnt_arr;
struct nsim_dev *nsim_dev;
spinlock_t trap_lock; /* Protects trap_items_arr */
};
@@ -286,18 +421,47 @@ enum {
#define NSIM_TRAP_DROP(_id, _group_id) \
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
- DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
NSIM_TRAP_METADATA)
+#define NSIM_TRAP_DROP_EXT(_id, _group_id, _metadata) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ NSIM_TRAP_METADATA | (_metadata))
#define NSIM_TRAP_EXCEPTION(_id, _group_id) \
DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
- DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
NSIM_TRAP_METADATA)
#define NSIM_TRAP_DRIVER_EXCEPTION(_id, _group_id) \
DEVLINK_TRAP_DRIVER(EXCEPTION, TRAP, NSIM_TRAP_ID_##_id, \
NSIM_TRAP_NAME_##_id, \
- DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
NSIM_TRAP_METADATA)
+#define NSIM_DEV_TRAP_POLICER_MIN_RATE 1
+#define NSIM_DEV_TRAP_POLICER_MAX_RATE 8000
+#define NSIM_DEV_TRAP_POLICER_MIN_BURST 8
+#define NSIM_DEV_TRAP_POLICER_MAX_BURST 65536
+
+#define NSIM_TRAP_POLICER(_id, _rate, _burst) \
+ DEVLINK_TRAP_POLICER(_id, _rate, _burst, \
+ NSIM_DEV_TRAP_POLICER_MAX_RATE, \
+ NSIM_DEV_TRAP_POLICER_MIN_RATE, \
+ NSIM_DEV_TRAP_POLICER_MAX_BURST, \
+ NSIM_DEV_TRAP_POLICER_MIN_BURST)
+
+static const struct devlink_trap_policer nsim_trap_policers_arr[] = {
+ NSIM_TRAP_POLICER(1, 1000, 128),
+ NSIM_TRAP_POLICER(2, 2000, 256),
+ NSIM_TRAP_POLICER(3, 3000, 512),
+};
+
+static const struct devlink_trap_group nsim_trap_groups_arr[] = {
+ DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 1),
+ DEVLINK_TRAP_GROUP_GENERIC(BUFFER_DROPS, 2),
+ DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 3),
+};
+
static const struct devlink_trap nsim_traps_arr[] = {
NSIM_TRAP_DROP(SMAC_MC, L2_DROPS),
NSIM_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
@@ -309,6 +473,10 @@ static const struct devlink_trap nsim_traps_arr[] = {
NSIM_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS),
NSIM_TRAP_EXCEPTION(TTL_ERROR, L3_DROPS),
NSIM_TRAP_DROP(TAIL_DROP, BUFFER_DROPS),
+ NSIM_TRAP_DROP_EXT(INGRESS_FLOW_ACTION_DROP, ACL_DROPS,
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
+ NSIM_TRAP_DROP_EXT(EGRESS_FLOW_ACTION_DROP, ACL_DROPS,
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
};
#define NSIM_TRAP_L4_DATA_LEN 100
@@ -366,8 +534,13 @@ static void nsim_dev_trap_report(struct nsim_dev_port *nsim_dev_port)
spin_lock(&nsim_trap_data->trap_lock);
for (i = 0; i < ARRAY_SIZE(nsim_traps_arr); i++) {
+ struct flow_action_cookie *fa_cookie = NULL;
struct nsim_trap_item *nsim_trap_item;
struct sk_buff *skb;
+ bool has_fa_cookie;
+
+ has_fa_cookie = nsim_traps_arr[i].metadata_cap &
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE;
nsim_trap_item = &nsim_trap_data->trap_items_arr[i];
if (nsim_trap_item->action == DEVLINK_TRAP_ACTION_DROP)
@@ -383,10 +556,12 @@ static void nsim_dev_trap_report(struct nsim_dev_port *nsim_dev_port)
* softIRQs to prevent lockdep from complaining about
* "incosistent lock state".
*/
- local_bh_disable();
+
+ spin_lock_bh(&nsim_dev->fa_cookie_lock);
+ fa_cookie = has_fa_cookie ? nsim_dev->fa_cookie : NULL;
devlink_trap_report(devlink, skb, nsim_trap_item->trap_ctx,
- &nsim_dev_port->devlink_port);
- local_bh_enable();
+ &nsim_dev_port->devlink_port, fa_cookie);
+ spin_unlock_bh(&nsim_dev->fa_cookie_lock);
consume_skb(skb);
}
spin_unlock(&nsim_trap_data->trap_lock);
@@ -422,6 +597,7 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
static int nsim_dev_traps_init(struct devlink *devlink)
{
+ size_t policers_count = ARRAY_SIZE(nsim_trap_policers_arr);
struct nsim_dev *nsim_dev = devlink_priv(devlink);
struct nsim_trap_data *nsim_trap_data;
int err;
@@ -438,6 +614,14 @@ static int nsim_dev_traps_init(struct devlink *devlink)
goto err_trap_data_free;
}
+ nsim_trap_data->trap_policers_cnt_arr = kcalloc(policers_count,
+ sizeof(u64),
+ GFP_KERNEL);
+ if (!nsim_trap_data->trap_policers_cnt_arr) {
+ err = -ENOMEM;
+ goto err_trap_items_free;
+ }
+
/* The lock is used to protect the action state of the registered
* traps. The value is written by user and read in delayed work when
* iterating over all the traps.
@@ -446,10 +630,20 @@ static int nsim_dev_traps_init(struct devlink *devlink)
nsim_trap_data->nsim_dev = nsim_dev;
nsim_dev->trap_data = nsim_trap_data;
+ err = devlink_trap_policers_register(devlink, nsim_trap_policers_arr,
+ policers_count);
+ if (err)
+ goto err_trap_policers_cnt_free;
+
+ err = devlink_trap_groups_register(devlink, nsim_trap_groups_arr,
+ ARRAY_SIZE(nsim_trap_groups_arr));
+ if (err)
+ goto err_trap_policers_unregister;
+
err = devlink_traps_register(devlink, nsim_traps_arr,
ARRAY_SIZE(nsim_traps_arr), NULL);
if (err)
- goto err_trap_items_free;
+ goto err_trap_groups_unregister;
INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw,
nsim_dev_trap_report_work);
@@ -458,6 +652,14 @@ static int nsim_dev_traps_init(struct devlink *devlink)
return 0;
+err_trap_groups_unregister:
+ devlink_trap_groups_unregister(devlink, nsim_trap_groups_arr,
+ ARRAY_SIZE(nsim_trap_groups_arr));
+err_trap_policers_unregister:
+ devlink_trap_policers_unregister(devlink, nsim_trap_policers_arr,
+ ARRAY_SIZE(nsim_trap_policers_arr));
+err_trap_policers_cnt_free:
+ kfree(nsim_trap_data->trap_policers_cnt_arr);
err_trap_items_free:
kfree(nsim_trap_data->trap_items_arr);
err_trap_data_free:
@@ -472,6 +674,11 @@ static void nsim_dev_traps_exit(struct devlink *devlink)
cancel_delayed_work_sync(&nsim_dev->trap_data->trap_report_dw);
devlink_traps_unregister(devlink, nsim_traps_arr,
ARRAY_SIZE(nsim_traps_arr));
+ devlink_trap_groups_unregister(devlink, nsim_trap_groups_arr,
+ ARRAY_SIZE(nsim_trap_groups_arr));
+ devlink_trap_policers_unregister(devlink, nsim_trap_policers_arr,
+ ARRAY_SIZE(nsim_trap_policers_arr));
+ kfree(nsim_dev->trap_data->trap_policers_cnt_arr);
kfree(nsim_dev->trap_data->trap_items_arr);
kfree(nsim_dev->trap_data);
}
@@ -610,6 +817,53 @@ nsim_dev_devlink_trap_action_set(struct devlink *devlink,
return 0;
}
+static int
+nsim_dev_devlink_trap_group_set(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ if (nsim_dev->fail_trap_group_set)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nsim_dev_devlink_trap_policer_set(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ if (nsim_dev->fail_trap_policer_set) {
+ NL_SET_ERR_MSG_MOD(extack, "User setup the operation to fail for testing purposes");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+nsim_dev_devlink_trap_policer_counter_get(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ u64 *cnt;
+
+ if (nsim_dev->fail_trap_policer_counter_get)
+ return -EINVAL;
+
+ cnt = &nsim_dev->trap_data->trap_policers_cnt_arr[policer->id - 1];
+ *p_drops = *cnt;
+ *cnt += jiffies % 64;
+
+ return 0;
+}
+
static const struct devlink_ops nsim_dev_devlink_ops = {
.reload_down = nsim_dev_reload_down,
.reload_up = nsim_dev_reload_up,
@@ -617,6 +871,9 @@ static const struct devlink_ops nsim_dev_devlink_ops = {
.flash_update = nsim_dev_flash_update,
.trap_init = nsim_dev_devlink_trap_init,
.trap_action_set = nsim_dev_devlink_trap_action_set,
+ .trap_group_set = nsim_dev_devlink_trap_group_set,
+ .trap_policer_set = nsim_dev_devlink_trap_policer_set,
+ .trap_policer_counter_get = nsim_dev_devlink_trap_policer_counter_get,
};
#define NSIM_DEV_MAX_MACS_DEFAULT 32
@@ -780,6 +1037,7 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
nsim_dev->fw_update_status = true;
nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
+ spin_lock_init(&nsim_dev->fa_cookie_lock);
dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
index ba8d9ad60feb..62958b238d50 100644
--- a/drivers/net/netdevsim/health.c
+++ b/drivers/net/netdevsim/health.c
@@ -271,14 +271,14 @@ int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
health->empty_reporter =
devlink_health_reporter_create(devlink,
&nsim_dev_empty_reporter_ops,
- 0, false, health);
+ 0, health);
if (IS_ERR(health->empty_reporter))
return PTR_ERR(health->empty_reporter);
health->dummy_reporter =
devlink_health_reporter_create(devlink,
&nsim_dev_dummy_reporter_ops,
- 0, false, health);
+ 0, health);
if (IS_ERR(health->dummy_reporter)) {
err = PTR_ERR(health->dummy_reporter);
goto err_empty_reporter_destroy;
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 2eb7b0dc1594..4ded54a21e1e 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -178,6 +178,11 @@ struct nsim_dev {
bool fail_reload;
struct devlink_region *dummy_region;
struct nsim_dev_health health;
+ struct flow_action_cookie *fa_cookie;
+ spinlock_t fa_cookie_lock; /* protects fa_cookie */
+ bool fail_trap_group_set;
+ bool fail_trap_policer_set;
+ bool fail_trap_policer_counter_get;
};
static inline struct net *nsim_dev_net(struct nsim_dev *nsim_dev)
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 9dabe03a668c..3fa33d27eeba 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -157,6 +157,14 @@ config MDIO_I2C
This is library mode.
+config MDIO_IPQ8064
+ tristate "Qualcomm IPQ8064 MDIO interface support"
+ depends on HAS_IOMEM && OF_MDIO
+ depends on MFD_SYSCON
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the IPQ8064 SoC
+
config MDIO_MOXART
tristate "MOXA ART MDIO interface support"
depends on ARCH_MOXART || COMPILE_TEST
@@ -171,6 +179,13 @@ config MDIO_MSCC_MIIM
This driver supports the MIIM (MDIO) interface found in the network
switches of the Microsemi SoCs
+config MDIO_MVUSB
+ tristate "Marvell USB to MDIO Adapter"
+ depends on USB
+ help
+ A USB to MDIO converter present on development boards for
+ Marvell's Link Street family of Ethernet switches.
+
config MDIO_OCTEON
tristate "Octeon and some ThunderX SOCs MDIO buses"
depends on (64BIT && OF_MDIO) || COMPILE_TEST
@@ -206,6 +221,12 @@ config MDIO_XGENE
This module provides a driver for the MDIO busses found in the
APM X-Gene SoC's.
+config MDIO_XPCS
+ tristate "Synopsys DesignWare XPCS controller"
+ help
+ This module provides helper functions for Synopsys DesignWare XPCS
+ controllers.
+
endif
endif
@@ -326,8 +347,8 @@ config BROADCOM_PHY
BCM5481, BCM54810 and BCM5482 PHYs.
config BCM84881_PHY
- bool "Broadcom BCM84881 PHY"
- depends on PHYLIB=y
+ tristate "Broadcom BCM84881 PHY"
+ depends on PHYLIB
---help---
Support the Broadcom BCM84881 PHY.
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index fe5badf13b65..2f5c7093a65b 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Linux PHY drivers and MDIO bus drivers
-libphy-y := phy.o phy-c45.o phy-core.o phy_device.o
+libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \
+ linkmode.o
mdio-bus-y += mdio_bus.o mdio_device.o
ifdef CONFIG_MDIO_DEVICE
@@ -36,12 +37,15 @@ obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
+obj-$(CONFIG_MDIO_IPQ8064) += mdio-ipq8064.o
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o
+obj-$(CONFIG_MDIO_MVUSB) += mdio-mvusb.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o
+obj-$(CONFIG_MDIO_XPCS) += mdio-xpcs.o
obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += mii_timestamper.o
@@ -86,7 +90,7 @@ obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o
-obj-$(CONFIG_MICROSEMI_PHY) += mscc.o
+obj-$(CONFIG_MICROSEMI_PHY) += mscc/
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 31927b2c7d5a..41e7c1432497 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -290,17 +290,6 @@ static int aqr_read_status(struct phy_device *phydev)
return genphy_c45_read_status(phydev);
}
-static int aqr107_read_downshift_event(struct phy_device *phydev)
-{
- int val;
-
- val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS1);
- if (val < 0)
- return val;
-
- return !!(val & MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT);
-}
-
static int aqr107_read_rate(struct phy_device *phydev)
{
int val;
@@ -377,13 +366,7 @@ static int aqr107_read_status(struct phy_device *phydev)
break;
}
- val = aqr107_read_downshift_event(phydev);
- if (val <= 0)
- return val;
-
- phydev_warn(phydev, "Downshift occurred! Cabling may be defective.\n");
-
- /* Read downshifted rate from vendor register */
+ /* Read possibly downshifted rate from vendor register */
return aqr107_read_rate(phydev);
}
@@ -451,16 +434,11 @@ static int aqr107_set_tunable(struct phy_device *phydev,
*/
static int aqr107_wait_reset_complete(struct phy_device *phydev)
{
- int val, retries = 100;
-
- do {
- val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
- if (val < 0)
- return val;
- msleep(20);
- } while (!val && --retries);
+ int val;
- return val ? 0 : -ETIMEDOUT;
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_FW_ID, val, val != 0,
+ 20000, 2000000, false);
}
static void aqr107_chip_info(struct phy_device *phydev)
@@ -506,9 +484,6 @@ static int aqr107_config_init(struct phy_device *phydev)
if (!ret)
aqr107_chip_info(phydev);
- /* ensure that a latched downshift event is cleared */
- aqr107_read_downshift_event(phydev);
-
return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
}
@@ -533,9 +508,6 @@ static int aqcs109_config_init(struct phy_device *phydev)
if (ret)
return ret;
- /* ensure that a latched downshift event is cleared */
- aqr107_read_downshift_event(phydev);
-
return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
}
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index e0d3310957ff..e77b274a09fd 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -423,6 +423,28 @@ int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(bcm_phy_28nm_a0b0_afe_config_init);
+int bcm_phy_enable_jumbo(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL);
+ if (ret < 0)
+ return ret;
+
+ /* Enable extended length packet reception */
+ ret = bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
+ ret | MII_BCM54XX_AUXCTL_ACTL_EXT_PKT_LEN);
+ if (ret < 0)
+ return ret;
+
+ /* Enable the elastic FIFO for raising the transmission limit from
+ * 4.5KB to 10KB, at the expense of an additional 16 ns in propagation
+ * latency.
+ */
+ return phy_set_bits(phydev, MII_BCM54XX_ECR, MII_BCM54XX_ECR_FIFOE);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_enable_jumbo);
+
MODULE_DESCRIPTION("Broadcom PHY Library");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index c86fb9d1240c..129df819be8c 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -65,5 +65,6 @@ void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow,
struct ethtool_stats *stats, u64 *data);
void bcm_phy_r_rc_cal_reset(struct phy_device *phydev);
int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev);
+int bcm_phy_enable_jumbo(struct phy_device *phydev);
#endif /* _LINUX_BCM_PHY_LIB_H */
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index af8eabe7a6d4..692048d86ab1 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -181,6 +181,10 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
if (ret)
return ret;
+ ret = bcm_phy_enable_jumbo(phydev);
+ if (ret)
+ return ret;
+
ret = bcm_phy_downshift_get(phydev, &count);
if (ret)
return ret;
diff --git a/drivers/net/phy/bcm84881.c b/drivers/net/phy/bcm84881.c
index 14d55a77eb28..3840d2adbbb9 100644
--- a/drivers/net/phy/bcm84881.c
+++ b/drivers/net/phy/bcm84881.c
@@ -22,30 +22,11 @@ enum {
static int bcm84881_wait_init(struct phy_device *phydev)
{
- unsigned int tries = 20;
- int ret, val;
-
- do {
- val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
- if (val < 0) {
- ret = val;
- break;
- }
- if (!(val & MDIO_CTRL1_RESET)) {
- ret = 0;
- break;
- }
- if (!--tries) {
- ret = -ETIMEDOUT;
- break;
- }
- msleep(100);
- } while (1);
+ int val;
- if (ret)
- phydev_err(phydev, "%s failed: %d\n", __func__, ret);
-
- return ret;
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ val, !(val & MDIO_CTRL1_RESET),
+ 100000, 2000000, false);
}
static int bcm84881_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index a62229a8b1a4..ae4873f2f86e 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -194,7 +194,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
/* Abort if we are using an untested phy. */
if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 &&
BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M)
+ BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M &&
+ BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54810)
return;
val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3);
@@ -272,10 +273,7 @@ static int bcm54xx_config_init(struct phy_device *phydev)
(phydev->dev_flags & PHY_BRCM_CLEAR_RGMII_MODE))
bcm_phy_write_shadow(phydev, BCM54XX_SHD_RGMII_MODE, 0);
- if ((phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) ||
- (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) ||
- (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
- bcm54xx_adjust_rxrefclk(phydev);
+ bcm54xx_adjust_rxrefclk(phydev);
if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E) {
err = bcm54210e_config_init(phydev);
@@ -315,6 +313,20 @@ static int bcm54xx_config_init(struct phy_device *phydev)
return 0;
}
+static int bcm54xx_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Writes to register other than BMCR would be ignored
+ * unless we clear the PDOWN bit first
+ */
+ ret = genphy_resume(phydev);
+ if (ret < 0)
+ return ret;
+
+ return bcm54xx_config_init(phydev);
+}
+
static int bcm5482_config_init(struct phy_device *phydev)
{
int err, reg;
@@ -708,6 +720,8 @@ static struct phy_driver broadcom_drivers[] = {
.config_aneg = bcm5481_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = bcm54xx_resume,
}, {
.phy_id = PHY_ID_BCM5482,
.phy_id_mask = 0xfffffff0,
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index ac72a324fcd1..415c27310982 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -628,7 +628,7 @@ static void recalibrate(struct dp83640_clock *clock)
u16 cal_gpio, cfg0, evnt, ptp_trig, trigger, val;
trigger = CAL_TRIGGER;
- cal_gpio = 1 + ptp_find_pin(clock->ptp_clock, PTP_PF_PHYSYNC, 0);
+ cal_gpio = 1 + ptp_find_pin_unlocked(clock->ptp_clock, PTP_PF_PHYSYNC, 0);
if (cal_gpio < 1) {
pr_err("PHY calibration pin not available - PHY is not calibrated.");
return;
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 9a07ad137c2e..b55e3c0403ed 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/bitfield.h>
#include <dt-bindings/net/ti-dp83867.h>
@@ -21,6 +22,7 @@
#define DP83867_DEVADDR 0x1f
#define MII_DP83867_PHYCTRL 0x10
+#define MII_DP83867_PHYSTS 0x11
#define MII_DP83867_MICR 0x12
#define MII_DP83867_ISR 0x13
#define DP83867_CFG2 0x14
@@ -120,6 +122,24 @@
#define DP83867_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8)
#define DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT 8
+/* PHY STS bits */
+#define DP83867_PHYSTS_1000 BIT(15)
+#define DP83867_PHYSTS_100 BIT(14)
+#define DP83867_PHYSTS_DUPLEX BIT(13)
+#define DP83867_PHYSTS_LINK BIT(10)
+
+/* CFG2 bits */
+#define DP83867_DOWNSHIFT_EN (BIT(8) | BIT(9))
+#define DP83867_DOWNSHIFT_ATTEMPT_MASK (BIT(10) | BIT(11))
+#define DP83867_DOWNSHIFT_1_COUNT_VAL 0
+#define DP83867_DOWNSHIFT_2_COUNT_VAL 1
+#define DP83867_DOWNSHIFT_4_COUNT_VAL 2
+#define DP83867_DOWNSHIFT_8_COUNT_VAL 3
+#define DP83867_DOWNSHIFT_1_COUNT 1
+#define DP83867_DOWNSHIFT_2_COUNT 2
+#define DP83867_DOWNSHIFT_4_COUNT 4
+#define DP83867_DOWNSHIFT_8_COUNT 8
+
/* CFG3 bits */
#define DP83867_CFG3_INT_OE BIT(7)
#define DP83867_CFG3_ROBUST_AUTO_MDIX BIT(9)
@@ -292,6 +312,126 @@ static int dp83867_config_intr(struct phy_device *phydev)
return phy_write(phydev, MII_DP83867_MICR, micr_status);
}
+static int dp83867_read_status(struct phy_device *phydev)
+{
+ int status = phy_read(phydev, MII_DP83867_PHYSTS);
+ int ret;
+
+ ret = genphy_read_status(phydev);
+ if (ret)
+ return ret;
+
+ if (status < 0)
+ return status;
+
+ if (status & DP83867_PHYSTS_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (status & DP83867_PHYSTS_1000)
+ phydev->speed = SPEED_1000;
+ else if (status & DP83867_PHYSTS_100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+
+ return 0;
+}
+
+static int dp83867_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, cnt, enable, count;
+
+ val = phy_read(phydev, DP83867_CFG2);
+ if (val < 0)
+ return val;
+
+ enable = FIELD_GET(DP83867_DOWNSHIFT_EN, val);
+ cnt = FIELD_GET(DP83867_DOWNSHIFT_ATTEMPT_MASK, val);
+
+ switch (cnt) {
+ case DP83867_DOWNSHIFT_1_COUNT_VAL:
+ count = DP83867_DOWNSHIFT_1_COUNT;
+ break;
+ case DP83867_DOWNSHIFT_2_COUNT_VAL:
+ count = DP83867_DOWNSHIFT_2_COUNT;
+ break;
+ case DP83867_DOWNSHIFT_4_COUNT_VAL:
+ count = DP83867_DOWNSHIFT_4_COUNT;
+ break;
+ case DP83867_DOWNSHIFT_8_COUNT_VAL:
+ count = DP83867_DOWNSHIFT_8_COUNT;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ *data = enable ? count : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int dp83867_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ int val, count;
+
+ if (cnt > DP83867_DOWNSHIFT_8_COUNT)
+ return -E2BIG;
+
+ if (!cnt)
+ return phy_clear_bits(phydev, DP83867_CFG2,
+ DP83867_DOWNSHIFT_EN);
+
+ switch (cnt) {
+ case DP83867_DOWNSHIFT_1_COUNT:
+ count = DP83867_DOWNSHIFT_1_COUNT_VAL;
+ break;
+ case DP83867_DOWNSHIFT_2_COUNT:
+ count = DP83867_DOWNSHIFT_2_COUNT_VAL;
+ break;
+ case DP83867_DOWNSHIFT_4_COUNT:
+ count = DP83867_DOWNSHIFT_4_COUNT_VAL;
+ break;
+ case DP83867_DOWNSHIFT_8_COUNT:
+ count = DP83867_DOWNSHIFT_8_COUNT_VAL;
+ break;
+ default:
+ phydev_err(phydev,
+ "Downshift count must be 1, 2, 4 or 8\n");
+ return -EINVAL;
+ };
+
+ val = DP83867_DOWNSHIFT_EN;
+ val |= FIELD_PREP(DP83867_DOWNSHIFT_ATTEMPT_MASK, count);
+
+ return phy_modify(phydev, DP83867_CFG2,
+ DP83867_DOWNSHIFT_EN | DP83867_DOWNSHIFT_ATTEMPT_MASK,
+ val);
+}
+
+static int dp83867_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return dp83867_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dp83867_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return dp83867_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int dp83867_config_port_mirroring(struct phy_device *phydev)
{
struct dp83867_private *dp83867 =
@@ -472,6 +612,12 @@ static int dp83867_config_init(struct phy_device *phydev)
int ret, val, bs;
u16 delay;
+ /* Force speed optimization for the PHY even if it strapped */
+ ret = phy_modify(phydev, DP83867_CFG2, DP83867_DOWNSHIFT_EN,
+ DP83867_DOWNSHIFT_EN);
+ if (ret)
+ return ret;
+
ret = dp83867_verify_rgmii_cfg(phydev);
if (ret)
return ret;
@@ -674,6 +820,10 @@ static struct phy_driver dp83867_driver[] = {
.config_init = dp83867_config_init,
.soft_reset = dp83867_phy_reset,
+ .read_status = dp83867_read_status,
+ .get_tunable = dp83867_get_tunable,
+ .set_tunable = dp83867_set_tunable,
+
.get_wol = dp83867_get_wol,
.set_wol = dp83867_set_wol,
diff --git a/drivers/net/phy/linkmode.c b/drivers/net/phy/linkmode.c
new file mode 100644
index 000000000000..f60560fe3499
--- /dev/null
+++ b/drivers/net/phy/linkmode.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/linkmode.h>
+
+/**
+ * linkmode_resolve_pause - resolve the allowable pause modes
+ * @local_adv: local advertisement in ethtool format
+ * @partner_adv: partner advertisement in ethtool format
+ * @tx_pause: pointer to bool to indicate whether transmit pause should be
+ * enabled.
+ * @rx_pause: pointer to bool to indicate whether receive pause should be
+ * enabled.
+ *
+ * Flow control is resolved according to our and the link partners
+ * advertisements using the following drawn from the 802.3 specs:
+ * Local device Link partner
+ * Pause AsymDir Pause AsymDir Result
+ * 0 X 0 X Disabled
+ * 0 1 1 0 Disabled
+ * 0 1 1 1 TX
+ * 1 0 0 X Disabled
+ * 1 X 1 X TX+RX
+ * 1 1 0 1 RX
+ */
+void linkmode_resolve_pause(const unsigned long *local_adv,
+ const unsigned long *partner_adv,
+ bool *tx_pause, bool *rx_pause)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(m);
+
+ linkmode_and(m, local_adv, partner_adv);
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, m)) {
+ *tx_pause = true;
+ *rx_pause = true;
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, m)) {
+ *tx_pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ partner_adv);
+ *rx_pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ local_adv);
+ } else {
+ *tx_pause = false;
+ *rx_pause = false;
+ }
+}
+EXPORT_SYMBOL_GPL(linkmode_resolve_pause);
+
+/**
+ * linkmode_set_pause - set the pause mode advertisement
+ * @advertisement: advertisement in ethtool format
+ * @tx: boolean from ethtool struct ethtool_pauseparam tx_pause member
+ * @rx: boolean from ethtool struct ethtool_pauseparam rx_pause member
+ *
+ * Configure the advertised Pause and Asym_Pause bits according to the
+ * capabilities of provided in @tx and @rx.
+ *
+ * We convert as follows:
+ * tx rx Pause AsymDir
+ * 0 0 0 0
+ * 0 1 1 1
+ * 1 0 0 1
+ * 1 1 1 0
+ *
+ * Note: this translation from ethtool tx/rx notation to the advertisement
+ * is actually very problematical. Here are some examples:
+ *
+ * For tx=0 rx=1, meaning transmit is unsupported, receive is supported:
+ *
+ * Local device Link partner
+ * Pause AsymDir Pause AsymDir Result
+ * 1 1 1 0 TX + RX - but we have no TX support.
+ * 1 1 0 1 Only this gives RX only
+ *
+ * For tx=1 rx=1, meaning we have the capability to transmit and receive
+ * pause frames:
+ *
+ * Local device Link partner
+ * Pause AsymDir Pause AsymDir Result
+ * 1 0 0 1 Disabled - but since we do support tx and rx,
+ * this should resolve to RX only.
+ *
+ * Hence, asking for:
+ * rx=1 tx=0 gives Pause+AsymDir advertisement, but we may end up
+ * resolving to tx+rx pause or only rx pause depending on
+ * the partners advertisement.
+ * rx=0 tx=1 gives AsymDir only, which will only give tx pause if
+ * the partners advertisement allows it.
+ * rx=1 tx=1 gives Pause only, which will only allow tx+rx pause
+ * if the other end also advertises Pause.
+ */
+void linkmode_set_pause(unsigned long *advertisement, bool tx, bool rx)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertisement, rx);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertisement,
+ rx ^ tx);
+}
+EXPORT_SYMBOL_GPL(linkmode_set_pause);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 9a8badafea8a..4714ca0e0d4b 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -867,21 +867,6 @@ static int m88e1011_set_tunable(struct phy_device *phydev,
}
}
-static void m88e1011_link_change_notify(struct phy_device *phydev)
-{
- int status;
-
- if (phydev->state != PHY_RUNNING)
- return;
-
- /* we may be on fiber page currently */
- status = phy_read_paged(phydev, MII_MARVELL_COPPER_PAGE,
- MII_M1011_PHY_SSR);
-
- if (status > 0 && status & MII_M1011_PHY_SSR_DOWNSHIFT)
- phydev_warn(phydev, "Downshift occurred! Cabling may be defective.\n");
-}
-
static int m88e1116r_config_init(struct phy_device *phydev)
{
int err;
@@ -2201,7 +2186,6 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1011_get_tunable,
.set_tunable = m88e1011_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1111,
@@ -2223,7 +2207,6 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1111_get_tunable,
.set_tunable = m88e1111_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1118,
@@ -2264,7 +2247,6 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1011_get_tunable,
.set_tunable = m88e1011_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1318S,
@@ -2308,7 +2290,6 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1111_get_tunable,
.set_tunable = m88e1111_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1149R,
@@ -2364,7 +2345,6 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1011_get_tunable,
.set_tunable = m88e1011_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1510,
@@ -2390,7 +2370,6 @@ static struct phy_driver marvell_drivers[] = {
.set_loopback = genphy_loopback,
.get_tunable = m88e1011_get_tunable,
.set_tunable = m88e1011_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1540,
@@ -2413,7 +2392,6 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E1545,
@@ -2436,7 +2414,6 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
{
.phy_id = MARVELL_PHY_ID_88E3016,
@@ -2479,7 +2456,6 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1540_get_tunable,
.set_tunable = m88e1540_set_tunable,
- .link_change_notify = m88e1011_link_change_notify,
},
};
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 64c9f3bba2cd..7621badae64d 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -23,6 +23,7 @@
* link takes priority and the other port is completely locked out.
*/
#include <linux/ctype.h>
+#include <linux/delay.h>
#include <linux/hwmon.h>
#include <linux/marvell_phy.h>
#include <linux/phy.h>
@@ -39,10 +40,29 @@ enum {
MV_PCS_BASE_R = 0x1000,
MV_PCS_1000BASEX = 0x2000,
- MV_PCS_PAIRSWAP = 0x8182,
- MV_PCS_PAIRSWAP_MASK = 0x0003,
- MV_PCS_PAIRSWAP_AB = 0x0002,
- MV_PCS_PAIRSWAP_NONE = 0x0003,
+ MV_PCS_CSCR1 = 0x8000,
+ MV_PCS_CSCR1_ED_MASK = 0x0300,
+ MV_PCS_CSCR1_ED_OFF = 0x0000,
+ MV_PCS_CSCR1_ED_RX = 0x0200,
+ MV_PCS_CSCR1_ED_NLP = 0x0300,
+ MV_PCS_CSCR1_MDIX_MASK = 0x0060,
+ MV_PCS_CSCR1_MDIX_MDI = 0x0000,
+ MV_PCS_CSCR1_MDIX_MDIX = 0x0020,
+ MV_PCS_CSCR1_MDIX_AUTO = 0x0060,
+
+ MV_PCS_CSSR1 = 0x8008,
+ MV_PCS_CSSR1_SPD1_MASK = 0xc000,
+ MV_PCS_CSSR1_SPD1_SPD2 = 0xc000,
+ MV_PCS_CSSR1_SPD1_1000 = 0x8000,
+ MV_PCS_CSSR1_SPD1_100 = 0x4000,
+ MV_PCS_CSSR1_SPD1_10 = 0x0000,
+ MV_PCS_CSSR1_DUPLEX_FULL= BIT(13),
+ MV_PCS_CSSR1_RESOLVED = BIT(11),
+ MV_PCS_CSSR1_MDIX = BIT(6),
+ MV_PCS_CSSR1_SPD2_MASK = 0x000c,
+ MV_PCS_CSSR1_SPD2_5000 = 0x0008,
+ MV_PCS_CSSR1_SPD2_2500 = 0x0004,
+ MV_PCS_CSSR1_SPD2_10000 = 0x0000,
/* These registers appear at 0x800X and 0xa00X - the 0xa00X control
* registers appear to set themselves to the 0x800X when AN is
@@ -207,6 +227,86 @@ static int mv3310_hwmon_probe(struct phy_device *phydev)
}
#endif
+static int mv3310_power_down(struct phy_device *phydev)
+{
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_PORT_CTRL_PWRDOWN);
+}
+
+static int mv3310_power_up(struct phy_device *phydev)
+{
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_PORT_CTRL_PWRDOWN);
+}
+
+static int mv3310_reset(struct phy_device *phydev, u32 unit)
+{
+ int val, err;
+
+ err = phy_modify_mmd(phydev, MDIO_MMD_PCS, unit + MDIO_CTRL1,
+ MDIO_CTRL1_RESET, MDIO_CTRL1_RESET);
+ if (err < 0)
+ return err;
+
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PCS,
+ unit + MDIO_CTRL1, val,
+ !(val & MDIO_CTRL1_RESET),
+ 5000, 100000, true);
+}
+
+static int mv3310_get_edpd(struct phy_device *phydev, u16 *edpd)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_CSCR1);
+ if (val < 0)
+ return val;
+
+ switch (val & MV_PCS_CSCR1_ED_MASK) {
+ case MV_PCS_CSCR1_ED_NLP:
+ *edpd = 1000;
+ break;
+ case MV_PCS_CSCR1_ED_RX:
+ *edpd = ETHTOOL_PHY_EDPD_NO_TX;
+ break;
+ default:
+ *edpd = ETHTOOL_PHY_EDPD_DISABLE;
+ break;
+ }
+ return 0;
+}
+
+static int mv3310_set_edpd(struct phy_device *phydev, u16 edpd)
+{
+ u16 val;
+ int err;
+
+ switch (edpd) {
+ case 1000:
+ case ETHTOOL_PHY_EDPD_DFLT_TX_MSECS:
+ val = MV_PCS_CSCR1_ED_NLP;
+ break;
+
+ case ETHTOOL_PHY_EDPD_NO_TX:
+ val = MV_PCS_CSCR1_ED_RX;
+ break;
+
+ case ETHTOOL_PHY_EDPD_DISABLE:
+ val = MV_PCS_CSCR1_ED_OFF;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ err = phy_modify_mmd_changed(phydev, MDIO_MMD_PCS, MV_PCS_CSCR1,
+ MV_PCS_CSCR1_ED_MASK, val);
+ if (err > 0)
+ err = mv3310_reset(phydev, MV_PCS_BASE_T);
+
+ return err;
+}
+
static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
struct phy_device *phydev = upstream;
@@ -255,6 +355,11 @@ static int mv3310_probe(struct phy_device *phydev)
dev_set_drvdata(&phydev->mdio.dev, priv);
+ /* Powering down the port when not in use saves about 600mW */
+ ret = mv3310_power_down(phydev);
+ if (ret)
+ return ret;
+
ret = mv3310_hwmon_probe(phydev);
if (ret)
return ret;
@@ -264,16 +369,14 @@ static int mv3310_probe(struct phy_device *phydev)
static int mv3310_suspend(struct phy_device *phydev)
{
- return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
- MV_V2_PORT_CTRL_PWRDOWN);
+ return mv3310_power_down(phydev);
}
static int mv3310_resume(struct phy_device *phydev)
{
int ret;
- ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
- MV_V2_PORT_CTRL_PWRDOWN);
+ ret = mv3310_power_up(phydev);
if (ret)
return ret;
@@ -299,6 +402,8 @@ static bool mv3310_has_pma_ngbaset_quirk(struct phy_device *phydev)
static int mv3310_config_init(struct phy_device *phydev)
{
+ int err;
+
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
@@ -307,7 +412,15 @@ static int mv3310_config_init(struct phy_device *phydev)
phydev->interface != PHY_INTERFACE_MODE_10GBASER)
return -ENODEV;
- return 0;
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ /* Power up so reset works */
+ err = mv3310_power_up(phydev);
+ if (err)
+ return err;
+
+ /* Enable EDPD mode - saving 600mW */
+ return mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS);
}
static int mv3310_get_features(struct phy_device *phydev)
@@ -336,14 +449,42 @@ static int mv3310_get_features(struct phy_device *phydev)
return 0;
}
+static int mv3310_config_mdix(struct phy_device *phydev)
+{
+ u16 val;
+ int err;
+
+ switch (phydev->mdix_ctrl) {
+ case ETH_TP_MDI_AUTO:
+ val = MV_PCS_CSCR1_MDIX_AUTO;
+ break;
+ case ETH_TP_MDI_X:
+ val = MV_PCS_CSCR1_MDIX_MDIX;
+ break;
+ case ETH_TP_MDI:
+ val = MV_PCS_CSCR1_MDIX_MDI;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = phy_modify_mmd_changed(phydev, MDIO_MMD_PCS, MV_PCS_CSCR1,
+ MV_PCS_CSCR1_MDIX_MASK, val);
+ if (err > 0)
+ err = mv3310_reset(phydev, MV_PCS_BASE_T);
+
+ return err;
+}
+
static int mv3310_config_aneg(struct phy_device *phydev)
{
bool changed = false;
u16 reg;
int ret;
- /* We don't support manual MDI control */
- phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ ret = mv3310_config_mdix(phydev);
+ if (ret < 0)
+ return ret;
if (phydev->autoneg == AUTONEG_DISABLE)
return genphy_c45_pma_setup_forced(phydev);
@@ -413,35 +554,18 @@ static void mv3310_update_interface(struct phy_device *phydev)
}
/* 10GBASE-ER,LR,LRM,SR do not support autonegotiation. */
-static int mv3310_read_10gbr_status(struct phy_device *phydev)
+static int mv3310_read_status_10gbaser(struct phy_device *phydev)
{
phydev->link = 1;
phydev->speed = SPEED_10000;
phydev->duplex = DUPLEX_FULL;
- mv3310_update_interface(phydev);
-
return 0;
}
-static int mv3310_read_status(struct phy_device *phydev)
+static int mv3310_read_status_copper(struct phy_device *phydev)
{
- int val;
-
- phydev->speed = SPEED_UNKNOWN;
- phydev->duplex = DUPLEX_UNKNOWN;
- linkmode_zero(phydev->lp_advertising);
- phydev->link = 0;
- phydev->pause = 0;
- phydev->asym_pause = 0;
- phydev->mdix = 0;
-
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_BASE_R + MDIO_STAT1);
- if (val < 0)
- return val;
-
- if (val & MDIO_STAT1_LSTATUS)
- return mv3310_read_10gbr_status(phydev);
+ int cssr1, speed, val;
val = genphy_c45_read_link(phydev);
if (val < 0)
@@ -451,6 +575,52 @@ static int mv3310_read_status(struct phy_device *phydev)
if (val < 0)
return val;
+ cssr1 = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_CSSR1);
+ if (cssr1 < 0)
+ return val;
+
+ /* If the link settings are not resolved, mark the link down */
+ if (!(cssr1 & MV_PCS_CSSR1_RESOLVED)) {
+ phydev->link = 0;
+ return 0;
+ }
+
+ /* Read the copper link settings */
+ speed = cssr1 & MV_PCS_CSSR1_SPD1_MASK;
+ if (speed == MV_PCS_CSSR1_SPD1_SPD2)
+ speed |= cssr1 & MV_PCS_CSSR1_SPD2_MASK;
+
+ switch (speed) {
+ case MV_PCS_CSSR1_SPD1_SPD2 | MV_PCS_CSSR1_SPD2_10000:
+ phydev->speed = SPEED_10000;
+ break;
+
+ case MV_PCS_CSSR1_SPD1_SPD2 | MV_PCS_CSSR1_SPD2_5000:
+ phydev->speed = SPEED_5000;
+ break;
+
+ case MV_PCS_CSSR1_SPD1_SPD2 | MV_PCS_CSSR1_SPD2_2500:
+ phydev->speed = SPEED_2500;
+ break;
+
+ case MV_PCS_CSSR1_SPD1_1000:
+ phydev->speed = SPEED_1000;
+ break;
+
+ case MV_PCS_CSSR1_SPD1_100:
+ phydev->speed = SPEED_100;
+ break;
+
+ case MV_PCS_CSSR1_SPD1_10:
+ phydev->speed = SPEED_10;
+ break;
+ }
+
+ phydev->duplex = cssr1 & MV_PCS_CSSR1_DUPLEX_FULL ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ phydev->mdix = cssr1 & MV_PCS_CSSR1_MDIX ?
+ ETH_TP_MDI_X : ETH_TP_MDI;
+
if (val & MDIO_AN_STAT1_COMPLETE) {
val = genphy_c45_read_lpa(phydev);
if (val < 0)
@@ -463,43 +633,64 @@ static int mv3310_read_status(struct phy_device *phydev)
mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val);
- if (phydev->autoneg == AUTONEG_ENABLE)
- phy_resolve_aneg_linkmode(phydev);
+ /* Update the pause status */
+ phy_resolve_aneg_pause(phydev);
}
- if (phydev->autoneg != AUTONEG_ENABLE) {
- val = genphy_c45_read_pma(phydev);
- if (val < 0)
- return val;
- }
+ return 0;
+}
- if (phydev->speed == SPEED_10000) {
- val = genphy_c45_read_mdix(phydev);
- if (val < 0)
- return val;
- } else {
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PAIRSWAP);
- if (val < 0)
- return val;
+static int mv3310_read_status(struct phy_device *phydev)
+{
+ int err, val;
- switch (val & MV_PCS_PAIRSWAP_MASK) {
- case MV_PCS_PAIRSWAP_AB:
- phydev->mdix = ETH_TP_MDI_X;
- break;
- case MV_PCS_PAIRSWAP_NONE:
- phydev->mdix = ETH_TP_MDI;
- break;
- default:
- phydev->mdix = ETH_TP_MDI_INVALID;
- break;
- }
- }
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ linkmode_zero(phydev->lp_advertising);
+ phydev->link = 0;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ phydev->mdix = ETH_TP_MDI_INVALID;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_BASE_R + MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ if (val & MDIO_STAT1_LSTATUS)
+ err = mv3310_read_status_10gbaser(phydev);
+ else
+ err = mv3310_read_status_copper(phydev);
+ if (err < 0)
+ return err;
- mv3310_update_interface(phydev);
+ if (phydev->link)
+ mv3310_update_interface(phydev);
return 0;
}
+static int mv3310_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_EDPD:
+ return mv3310_get_edpd(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mv3310_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_EDPD:
+ return mv3310_set_edpd(phydev, *(u16 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static struct phy_driver mv3310_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88X3310,
@@ -514,6 +705,8 @@ static struct phy_driver mv3310_drivers[] = {
.config_aneg = mv3310_config_aneg,
.aneg_done = mv3310_aneg_done,
.read_status = mv3310_read_status,
+ .get_tunable = mv3310_get_tunable,
+ .set_tunable = mv3310_set_tunable,
},
{
.phy_id = MARVELL_PHY_ID_88E2110,
@@ -527,6 +720,8 @@ static struct phy_driver mv3310_drivers[] = {
.config_aneg = mv3310_config_aneg,
.aneg_done = mv3310_aneg_done,
.read_status = mv3310_read_status,
+ .get_tunable = mv3310_get_tunable,
+ .set_tunable = mv3310_set_tunable,
},
};
diff --git a/drivers/net/phy/mdio-ipq8064.c b/drivers/net/phy/mdio-ipq8064.c
new file mode 100644
index 000000000000..1bd18857e1c5
--- /dev/null
+++ b/drivers/net/phy/mdio-ipq8064.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Qualcomm IPQ8064 MDIO interface driver
+ *
+ * Copyright (C) 2019 Christian Lamparter <chunkeey@gmail.com>
+ * Copyright (C) 2020 Ansuel Smith <ansuelsmth@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+
+/* MII address register definitions */
+#define MII_ADDR_REG_ADDR 0x10
+#define MII_BUSY BIT(0)
+#define MII_WRITE BIT(1)
+#define MII_CLKRANGE_60_100M (0 << 2)
+#define MII_CLKRANGE_100_150M (1 << 2)
+#define MII_CLKRANGE_20_35M (2 << 2)
+#define MII_CLKRANGE_35_60M (3 << 2)
+#define MII_CLKRANGE_150_250M (4 << 2)
+#define MII_CLKRANGE_250_300M (5 << 2)
+#define MII_CLKRANGE_MASK GENMASK(4, 2)
+#define MII_REG_SHIFT 6
+#define MII_REG_MASK GENMASK(10, 6)
+#define MII_ADDR_SHIFT 11
+#define MII_ADDR_MASK GENMASK(15, 11)
+
+#define MII_DATA_REG_ADDR 0x14
+
+#define MII_MDIO_DELAY_USEC (1000)
+#define MII_MDIO_RETRY_MSEC (10)
+
+struct ipq8064_mdio {
+ struct regmap *base; /* NSS_GMAC0_BASE */
+};
+
+static int
+ipq8064_mdio_wait_busy(struct ipq8064_mdio *priv)
+{
+ u32 busy;
+
+ return regmap_read_poll_timeout(priv->base, MII_ADDR_REG_ADDR, busy,
+ !(busy & MII_BUSY), MII_MDIO_DELAY_USEC,
+ MII_MDIO_RETRY_MSEC * USEC_PER_MSEC);
+}
+
+static int
+ipq8064_mdio_read(struct mii_bus *bus, int phy_addr, int reg_offset)
+{
+ u32 miiaddr = MII_BUSY | MII_CLKRANGE_250_300M;
+ struct ipq8064_mdio *priv = bus->priv;
+ u32 ret_val;
+ int err;
+
+ /* Reject clause 45 */
+ if (reg_offset & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ miiaddr |= ((phy_addr << MII_ADDR_SHIFT) & MII_ADDR_MASK) |
+ ((reg_offset << MII_REG_SHIFT) & MII_REG_MASK);
+
+ regmap_write(priv->base, MII_ADDR_REG_ADDR, miiaddr);
+ usleep_range(8, 10);
+
+ err = ipq8064_mdio_wait_busy(priv);
+ if (err)
+ return err;
+
+ regmap_read(priv->base, MII_DATA_REG_ADDR, &ret_val);
+ return (int)ret_val;
+}
+
+static int
+ipq8064_mdio_write(struct mii_bus *bus, int phy_addr, int reg_offset, u16 data)
+{
+ u32 miiaddr = MII_WRITE | MII_BUSY | MII_CLKRANGE_250_300M;
+ struct ipq8064_mdio *priv = bus->priv;
+
+ /* Reject clause 45 */
+ if (reg_offset & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ regmap_write(priv->base, MII_DATA_REG_ADDR, data);
+
+ miiaddr |= ((phy_addr << MII_ADDR_SHIFT) & MII_ADDR_MASK) |
+ ((reg_offset << MII_REG_SHIFT) & MII_REG_MASK);
+
+ regmap_write(priv->base, MII_ADDR_REG_ADDR, miiaddr);
+ usleep_range(8, 10);
+
+ return ipq8064_mdio_wait_busy(priv);
+}
+
+static int
+ipq8064_mdio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct ipq8064_mdio *priv;
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "ipq8064_mdio_bus";
+ bus->read = ipq8064_mdio_read;
+ bus->write = ipq8064_mdio_write;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
+ bus->parent = &pdev->dev;
+
+ priv = bus->priv;
+ priv->base = device_node_to_regmap(np);
+ if (IS_ERR(priv->base)) {
+ if (priv->base == ERR_PTR(-EPROBE_DEFER))
+ return -EPROBE_DEFER;
+
+ dev_err(&pdev->dev, "error getting device regmap, error=%pe\n",
+ priv->base);
+ return PTR_ERR(priv->base);
+ }
+
+ ret = of_mdiobus_register(bus, np);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, bus);
+ return 0;
+}
+
+static int
+ipq8064_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(pdev);
+
+ mdiobus_unregister(bus);
+
+ return 0;
+}
+
+static const struct of_device_id ipq8064_mdio_dt_ids[] = {
+ { .compatible = "qcom,ipq8064-mdio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ipq8064_mdio_dt_ids);
+
+static struct platform_driver ipq8064_mdio_driver = {
+ .probe = ipq8064_mdio_probe,
+ .remove = ipq8064_mdio_remove,
+ .driver = {
+ .name = "ipq8064-mdio",
+ .of_match_table = ipq8064_mdio_dt_ids,
+ },
+};
+
+module_platform_driver(ipq8064_mdio_driver);
+
+MODULE_DESCRIPTION("Qualcomm IPQ8064 MDIO interface driver");
+MODULE_AUTHOR("Christian Lamparter <chunkeey@gmail.com>");
+MODULE_AUTHOR("Ansuel Smith <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
index aad6809ebe39..42fb5f166136 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -10,6 +10,7 @@
#include <linux/phy.h>
#include <linux/mdio-mux.h>
#include <linux/delay.h>
+#include <linux/iopoll.h>
#define MDIO_RATE_ADJ_EXT_OFFSET 0x000
#define MDIO_RATE_ADJ_INT_OFFSET 0x004
@@ -78,18 +79,11 @@ static void mdio_mux_iproc_config(struct iproc_mdiomux_desc *md)
static int iproc_mdio_wait_for_idle(void __iomem *base, bool result)
{
- unsigned int timeout = 1000; /* loop for 1s */
u32 val;
- do {
- val = readl(base + MDIO_STAT_OFFSET);
- if ((val & MDIO_STAT_DONE) == result)
- return 0;
-
- usleep_range(1000, 2000);
- } while (timeout--);
-
- return -ETIMEDOUT;
+ return readl_poll_timeout(base + MDIO_STAT_OFFSET, val,
+ (val & MDIO_STAT_DONE) == result,
+ 2000, 1000000);
}
/* start_miim_ops- Program and start MDIO transaction over mdio bus.
diff --git a/drivers/net/phy/mdio-mvusb.c b/drivers/net/phy/mdio-mvusb.c
new file mode 100644
index 000000000000..d5eabddfdf51
--- /dev/null
+++ b/drivers/net/phy/mdio-mvusb.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/usb.h>
+
+#define USB_MARVELL_VID 0x1286
+
+static const struct usb_device_id mvusb_mdio_table[] = {
+ { USB_DEVICE(USB_MARVELL_VID, 0x1fa4) },
+
+ {}
+};
+MODULE_DEVICE_TABLE(usb, mvusb_mdio_table);
+
+enum {
+ MVUSB_CMD_PREAMBLE0,
+ MVUSB_CMD_PREAMBLE1,
+ MVUSB_CMD_ADDR,
+ MVUSB_CMD_VAL,
+};
+
+struct mvusb_mdio {
+ struct usb_device *udev;
+ struct mii_bus *mdio;
+
+ __le16 buf[4];
+};
+
+static int mvusb_mdio_read(struct mii_bus *mdio, int dev, int reg)
+{
+ struct mvusb_mdio *mvusb = mdio->priv;
+ int err, alen;
+
+ if (dev & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ mvusb->buf[MVUSB_CMD_ADDR] = cpu_to_le16(0xa400 | (dev << 5) | reg);
+
+ err = usb_bulk_msg(mvusb->udev, usb_sndbulkpipe(mvusb->udev, 2),
+ mvusb->buf, 6, &alen, 100);
+ if (err)
+ return err;
+
+ err = usb_bulk_msg(mvusb->udev, usb_rcvbulkpipe(mvusb->udev, 6),
+ &mvusb->buf[MVUSB_CMD_VAL], 2, &alen, 100);
+ if (err)
+ return err;
+
+ return le16_to_cpu(mvusb->buf[MVUSB_CMD_VAL]);
+}
+
+static int mvusb_mdio_write(struct mii_bus *mdio, int dev, int reg, u16 val)
+{
+ struct mvusb_mdio *mvusb = mdio->priv;
+ int alen;
+
+ if (dev & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ mvusb->buf[MVUSB_CMD_ADDR] = cpu_to_le16(0x8000 | (dev << 5) | reg);
+ mvusb->buf[MVUSB_CMD_VAL] = cpu_to_le16(val);
+
+ return usb_bulk_msg(mvusb->udev, usb_sndbulkpipe(mvusb->udev, 2),
+ mvusb->buf, 8, &alen, 100);
+}
+
+static int mvusb_mdio_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct device *dev = &interface->dev;
+ struct mvusb_mdio *mvusb;
+ struct mii_bus *mdio;
+
+ mdio = devm_mdiobus_alloc_size(dev, sizeof(*mvusb));
+ if (!mdio)
+ return -ENOMEM;
+
+ mvusb = mdio->priv;
+ mvusb->mdio = mdio;
+ mvusb->udev = usb_get_dev(interface_to_usbdev(interface));
+
+ /* Reversed from USB PCAPs, no idea what these mean. */
+ mvusb->buf[MVUSB_CMD_PREAMBLE0] = cpu_to_le16(0xe800);
+ mvusb->buf[MVUSB_CMD_PREAMBLE1] = cpu_to_le16(0x0001);
+
+ snprintf(mdio->id, MII_BUS_ID_SIZE, "mvusb-%s", dev_name(dev));
+ mdio->name = mdio->id;
+ mdio->parent = dev;
+ mdio->read = mvusb_mdio_read;
+ mdio->write = mvusb_mdio_write;
+
+ usb_set_intfdata(interface, mvusb);
+ return of_mdiobus_register(mdio, dev->of_node);
+}
+
+static void mvusb_mdio_disconnect(struct usb_interface *interface)
+{
+ struct mvusb_mdio *mvusb = usb_get_intfdata(interface);
+ struct usb_device *udev = mvusb->udev;
+
+ mdiobus_unregister(mvusb->mdio);
+ usb_set_intfdata(interface, NULL);
+ usb_put_dev(udev);
+}
+
+static struct usb_driver mvusb_mdio_driver = {
+ .name = "mvusb_mdio",
+ .id_table = mvusb_mdio_table,
+ .probe = mvusb_mdio_probe,
+ .disconnect = mvusb_mdio_disconnect,
+};
+
+module_usb_driver(mvusb_mdio_driver);
+
+MODULE_AUTHOR("Tobias Waldekranz <tobias@waldekranz.com>");
+MODULE_DESCRIPTION("Marvell USB MDIO Adapter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-xpcs.c b/drivers/net/phy/mdio-xpcs.c
new file mode 100644
index 000000000000..0d66a8ba7eb6
--- /dev/null
+++ b/drivers/net/phy/mdio-xpcs.c
@@ -0,0 +1,716 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare XPCS helpers
+ *
+ * Author: Jose Abreu <Jose.Abreu@synopsys.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/mdio.h>
+#include <linux/mdio-xpcs.h>
+#include <linux/phylink.h>
+#include <linux/workqueue.h>
+
+#define SYNOPSYS_XPCS_USXGMII_ID 0x7996ced0
+#define SYNOPSYS_XPCS_10GKR_ID 0x7996ced0
+#define SYNOPSYS_XPCS_XLGMII_ID 0x7996ced0
+#define SYNOPSYS_XPCS_MASK 0xffffffff
+
+/* Vendor regs access */
+#define DW_VENDOR BIT(15)
+
+/* VR_XS_PCS */
+#define DW_USXGMII_RST BIT(10)
+#define DW_USXGMII_EN BIT(9)
+#define DW_VR_XS_PCS_DIG_STS 0x0010
+#define DW_RXFIFO_ERR GENMASK(6, 5)
+
+/* SR_MII */
+#define DW_USXGMII_FULL BIT(8)
+#define DW_USXGMII_SS_MASK (BIT(13) | BIT(6) | BIT(5))
+#define DW_USXGMII_10000 (BIT(13) | BIT(6))
+#define DW_USXGMII_5000 (BIT(13) | BIT(5))
+#define DW_USXGMII_2500 (BIT(5))
+#define DW_USXGMII_1000 (BIT(6))
+#define DW_USXGMII_100 (BIT(13))
+#define DW_USXGMII_10 (0)
+
+/* SR_AN */
+#define DW_SR_AN_ADV1 0x10
+#define DW_SR_AN_ADV2 0x11
+#define DW_SR_AN_ADV3 0x12
+#define DW_SR_AN_LP_ABL1 0x13
+#define DW_SR_AN_LP_ABL2 0x14
+#define DW_SR_AN_LP_ABL3 0x15
+
+/* Clause 73 Defines */
+/* AN_LP_ABL1 */
+#define DW_C73_PAUSE BIT(10)
+#define DW_C73_ASYM_PAUSE BIT(11)
+#define DW_C73_AN_ADV_SF 0x1
+/* AN_LP_ABL2 */
+#define DW_C73_1000KX BIT(5)
+#define DW_C73_10000KX4 BIT(6)
+#define DW_C73_10000KR BIT(7)
+/* AN_LP_ABL3 */
+#define DW_C73_2500KX BIT(0)
+#define DW_C73_5000KR BIT(1)
+
+static const int xpcs_usxgmii_features[] = {
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_Autoneg_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
+static const int xpcs_10gkr_features[] = {
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
+static const int xpcs_xlgmii_features[] = {
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
+static const phy_interface_t xpcs_usxgmii_interfaces[] = {
+ PHY_INTERFACE_MODE_USXGMII,
+ PHY_INTERFACE_MODE_MAX,
+};
+
+static const phy_interface_t xpcs_10gkr_interfaces[] = {
+ PHY_INTERFACE_MODE_10GKR,
+ PHY_INTERFACE_MODE_MAX,
+};
+
+static const phy_interface_t xpcs_xlgmii_interfaces[] = {
+ PHY_INTERFACE_MODE_XLGMII,
+ PHY_INTERFACE_MODE_MAX,
+};
+
+static struct xpcs_id {
+ u32 id;
+ u32 mask;
+ const int *supported;
+ const phy_interface_t *interface;
+} xpcs_id_list[] = {
+ {
+ .id = SYNOPSYS_XPCS_USXGMII_ID,
+ .mask = SYNOPSYS_XPCS_MASK,
+ .supported = xpcs_usxgmii_features,
+ .interface = xpcs_usxgmii_interfaces,
+ }, {
+ .id = SYNOPSYS_XPCS_10GKR_ID,
+ .mask = SYNOPSYS_XPCS_MASK,
+ .supported = xpcs_10gkr_features,
+ .interface = xpcs_10gkr_interfaces,
+ }, {
+ .id = SYNOPSYS_XPCS_XLGMII_ID,
+ .mask = SYNOPSYS_XPCS_MASK,
+ .supported = xpcs_xlgmii_features,
+ .interface = xpcs_xlgmii_interfaces,
+ },
+};
+
+static int xpcs_read(struct mdio_xpcs_args *xpcs, int dev, u32 reg)
+{
+ u32 reg_addr = MII_ADDR_C45 | dev << 16 | reg;
+
+ return mdiobus_read(xpcs->bus, xpcs->addr, reg_addr);
+}
+
+static int xpcs_write(struct mdio_xpcs_args *xpcs, int dev, u32 reg, u16 val)
+{
+ u32 reg_addr = MII_ADDR_C45 | dev << 16 | reg;
+
+ return mdiobus_write(xpcs->bus, xpcs->addr, reg_addr, val);
+}
+
+static int xpcs_read_vendor(struct mdio_xpcs_args *xpcs, int dev, u32 reg)
+{
+ return xpcs_read(xpcs, dev, DW_VENDOR | reg);
+}
+
+static int xpcs_write_vendor(struct mdio_xpcs_args *xpcs, int dev, int reg,
+ u16 val)
+{
+ return xpcs_write(xpcs, dev, DW_VENDOR | reg, val);
+}
+
+static int xpcs_read_vpcs(struct mdio_xpcs_args *xpcs, int reg)
+{
+ return xpcs_read_vendor(xpcs, MDIO_MMD_PCS, reg);
+}
+
+static int xpcs_write_vpcs(struct mdio_xpcs_args *xpcs, int reg, u16 val)
+{
+ return xpcs_write_vendor(xpcs, MDIO_MMD_PCS, reg, val);
+}
+
+static int xpcs_poll_reset(struct mdio_xpcs_args *xpcs, int dev)
+{
+ /* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
+ unsigned int retries = 12;
+ int ret;
+
+ do {
+ msleep(50);
+ ret = xpcs_read(xpcs, dev, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+ } while (ret & MDIO_CTRL1_RESET && --retries);
+
+ return (ret & MDIO_CTRL1_RESET) ? -ETIMEDOUT : 0;
+}
+
+static int xpcs_soft_reset(struct mdio_xpcs_args *xpcs, int dev)
+{
+ int ret;
+
+ ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET);
+ if (ret < 0)
+ return ret;
+
+ return xpcs_poll_reset(xpcs, dev);
+}
+
+#define xpcs_warn(__xpcs, __state, __args...) \
+({ \
+ if ((__state)->link) \
+ dev_warn(&(__xpcs)->bus->dev, ##__args); \
+})
+
+static int xpcs_read_fault(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_STAT1_FAULT) {
+ xpcs_warn(xpcs, state, "Link fault condition detected!\n");
+ return -EFAULT;
+ }
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_STAT2_RXFAULT)
+ xpcs_warn(xpcs, state, "Receiver fault detected!\n");
+ if (ret & MDIO_STAT2_TXFAULT)
+ xpcs_warn(xpcs, state, "Transmitter fault detected!\n");
+
+ ret = xpcs_read_vendor(xpcs, MDIO_MMD_PCS, DW_VR_XS_PCS_DIG_STS);
+ if (ret < 0)
+ return ret;
+
+ if (ret & DW_RXFIFO_ERR) {
+ xpcs_warn(xpcs, state, "FIFO fault condition detected!\n");
+ return -EFAULT;
+ }
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_PCS_10GBRT_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MDIO_PCS_10GBRT_STAT1_BLKLK))
+ xpcs_warn(xpcs, state, "Link is not locked!\n");
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_PCS_10GBRT_STAT2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_PCS_10GBRT_STAT2_ERR) {
+ xpcs_warn(xpcs, state, "Link has errors!\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int xpcs_read_link(struct mdio_xpcs_args *xpcs, bool an)
+{
+ bool link = true;
+ int ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MDIO_STAT1_LSTATUS))
+ link = false;
+
+ if (an) {
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MDIO_STAT1_LSTATUS))
+ link = false;
+ }
+
+ return link;
+}
+
+static int xpcs_get_max_usxgmii_speed(const unsigned long *supported)
+{
+ int max = SPEED_UNKNOWN;
+
+ if (phylink_test(supported, 1000baseKX_Full))
+ max = SPEED_1000;
+ if (phylink_test(supported, 2500baseX_Full))
+ max = SPEED_2500;
+ if (phylink_test(supported, 10000baseKX4_Full))
+ max = SPEED_10000;
+ if (phylink_test(supported, 10000baseKR_Full))
+ max = SPEED_10000;
+
+ return max;
+}
+
+static int xpcs_config_usxgmii(struct mdio_xpcs_args *xpcs, int speed)
+{
+ int ret, speed_sel;
+
+ switch (speed) {
+ case SPEED_10:
+ speed_sel = DW_USXGMII_10;
+ break;
+ case SPEED_100:
+ speed_sel = DW_USXGMII_100;
+ break;
+ case SPEED_1000:
+ speed_sel = DW_USXGMII_1000;
+ break;
+ case SPEED_2500:
+ speed_sel = DW_USXGMII_2500;
+ break;
+ case SPEED_5000:
+ speed_sel = DW_USXGMII_5000;
+ break;
+ case SPEED_10000:
+ speed_sel = DW_USXGMII_10000;
+ break;
+ default:
+ /* Nothing to do here */
+ return -EINVAL;
+ }
+
+ ret = xpcs_read_vpcs(xpcs, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_EN);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~DW_USXGMII_SS_MASK;
+ ret |= speed_sel | DW_USXGMII_FULL;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, ret);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_read_vpcs(xpcs, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ return xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_RST);
+}
+
+static int xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
+{
+ int ret, adv;
+
+ /* By default, in USXGMII mode XPCS operates at 10G baud and
+ * replicates data to achieve lower speeds. Hereby, in this
+ * default configuration we need to advertise all supported
+ * modes and not only the ones we want to use.
+ */
+
+ /* SR_AN_ADV3 */
+ adv = 0;
+ if (phylink_test(xpcs->supported, 2500baseX_Full))
+ adv |= DW_C73_2500KX;
+
+ /* TODO: 5000baseKR */
+
+ ret = xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV3, adv);
+ if (ret < 0)
+ return ret;
+
+ /* SR_AN_ADV2 */
+ adv = 0;
+ if (phylink_test(xpcs->supported, 1000baseKX_Full))
+ adv |= DW_C73_1000KX;
+ if (phylink_test(xpcs->supported, 10000baseKX4_Full))
+ adv |= DW_C73_10000KX4;
+ if (phylink_test(xpcs->supported, 10000baseKR_Full))
+ adv |= DW_C73_10000KR;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV2, adv);
+ if (ret < 0)
+ return ret;
+
+ /* SR_AN_ADV1 */
+ adv = DW_C73_AN_ADV_SF;
+ if (phylink_test(xpcs->supported, Pause))
+ adv |= DW_C73_PAUSE;
+ if (phylink_test(xpcs->supported, Asym_Pause))
+ adv |= DW_C73_ASYM_PAUSE;
+
+ return xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV1, adv);
+}
+
+static int xpcs_config_aneg(struct mdio_xpcs_args *xpcs)
+{
+ int ret;
+
+ ret = xpcs_config_aneg_c73(xpcs);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ ret |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART;
+
+ return xpcs_write(xpcs, MDIO_MMD_AN, MDIO_CTRL1, ret);
+}
+
+static int xpcs_aneg_done(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_AN_STAT1_COMPLETE) {
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL1);
+ if (ret < 0)
+ return ret;
+
+ /* Check if Aneg outcome is valid */
+ if (!(ret & DW_C73_AN_ADV_SF)) {
+ xpcs_config_aneg(xpcs);
+ return 0;
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int xpcs_read_lpa(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MDIO_AN_STAT1_LPABLE)) {
+ phylink_clear(state->lp_advertising, Autoneg);
+ return 0;
+ }
+
+ phylink_set(state->lp_advertising, Autoneg);
+
+ /* Clause 73 outcome */
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL3);
+ if (ret < 0)
+ return ret;
+
+ if (ret & DW_C73_2500KX)
+ phylink_set(state->lp_advertising, 2500baseX_Full);
+
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & DW_C73_1000KX)
+ phylink_set(state->lp_advertising, 1000baseKX_Full);
+ if (ret & DW_C73_10000KX4)
+ phylink_set(state->lp_advertising, 10000baseKX4_Full);
+ if (ret & DW_C73_10000KR)
+ phylink_set(state->lp_advertising, 10000baseKR_Full);
+
+ ret = xpcs_read(xpcs, MDIO_MMD_AN, DW_SR_AN_LP_ABL1);
+ if (ret < 0)
+ return ret;
+
+ if (ret & DW_C73_PAUSE)
+ phylink_set(state->lp_advertising, Pause);
+ if (ret & DW_C73_ASYM_PAUSE)
+ phylink_set(state->lp_advertising, Asym_Pause);
+
+ linkmode_and(state->lp_advertising, state->lp_advertising,
+ state->advertising);
+ return 0;
+}
+
+static void xpcs_resolve_lpa(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int max_speed = xpcs_get_max_usxgmii_speed(state->lp_advertising);
+
+ state->pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
+ state->speed = max_speed;
+ state->duplex = DUPLEX_FULL;
+}
+
+static int xpcs_get_max_xlgmii_speed(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ unsigned long *adv = state->advertising;
+ int speed = SPEED_UNKNOWN;
+ int bit;
+
+ for_each_set_bit(bit, adv, __ETHTOOL_LINK_MODE_MASK_NBITS) {
+ int new_speed = SPEED_UNKNOWN;
+
+ switch (bit) {
+ case ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
+ case ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
+ case ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
+ new_speed = SPEED_25000;
+ break;
+ case ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
+ case ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
+ case ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
+ case ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
+ new_speed = SPEED_40000;
+ break;
+ case ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT:
+ case ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT:
+ case ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT:
+ case ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
+ case ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
+ case ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
+ case ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
+ case ETHTOOL_LINK_MODE_50000baseDR_Full_BIT:
+ new_speed = SPEED_50000;
+ break;
+ case ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
+ case ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
+ case ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
+ case ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
+ case ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT:
+ case ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT:
+ case ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT:
+ case ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT:
+ case ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT:
+ new_speed = SPEED_100000;
+ break;
+ default:
+ continue;
+ }
+
+ if (new_speed > speed)
+ speed = new_speed;
+ }
+
+ return speed;
+}
+
+static void xpcs_resolve_pma(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ state->pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
+ state->duplex = DUPLEX_FULL;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_10GKR:
+ state->speed = SPEED_10000;
+ break;
+ case PHY_INTERFACE_MODE_XLGMII:
+ state->speed = xpcs_get_max_xlgmii_speed(xpcs, state);
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+}
+
+static int xpcs_validate(struct mdio_xpcs_args *xpcs,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ linkmode_and(supported, supported, xpcs->supported);
+ linkmode_and(state->advertising, state->advertising, xpcs->supported);
+ return 0;
+}
+
+static int xpcs_config(struct mdio_xpcs_args *xpcs,
+ const struct phylink_link_state *state)
+{
+ int ret;
+
+ if (state->an_enabled) {
+ ret = xpcs_config_aneg(xpcs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xpcs_get_state(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state)
+{
+ int ret;
+
+ /* Link needs to be read first ... */
+ state->link = xpcs_read_link(xpcs, state->an_enabled) > 0 ? 1 : 0;
+
+ /* ... and then we check the faults. */
+ ret = xpcs_read_fault(xpcs, state);
+ if (ret) {
+ ret = xpcs_soft_reset(xpcs, MDIO_MMD_PCS);
+ if (ret)
+ return ret;
+
+ state->link = 0;
+
+ return xpcs_config(xpcs, state);
+ }
+
+ if (state->an_enabled && xpcs_aneg_done(xpcs, state)) {
+ state->an_complete = true;
+ xpcs_read_lpa(xpcs, state);
+ xpcs_resolve_lpa(xpcs, state);
+ } else if (state->an_enabled) {
+ state->link = 0;
+ } else if (state->link) {
+ xpcs_resolve_pma(xpcs, state);
+ }
+
+ return 0;
+}
+
+static int xpcs_link_up(struct mdio_xpcs_args *xpcs, int speed,
+ phy_interface_t interface)
+{
+ if (interface == PHY_INTERFACE_MODE_USXGMII)
+ return xpcs_config_usxgmii(xpcs, speed);
+
+ return 0;
+}
+
+static u32 xpcs_get_id(struct mdio_xpcs_args *xpcs)
+{
+ int ret;
+ u32 id;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID1);
+ if (ret < 0)
+ return 0xffffffff;
+
+ id = ret << 16;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID2);
+ if (ret < 0)
+ return 0xffffffff;
+
+ return id | ret;
+}
+
+static bool xpcs_check_features(struct mdio_xpcs_args *xpcs,
+ struct xpcs_id *match,
+ phy_interface_t interface)
+{
+ int i;
+
+ for (i = 0; match->interface[i] != PHY_INTERFACE_MODE_MAX; i++) {
+ if (match->interface[i] == interface)
+ break;
+ }
+
+ if (match->interface[i] == PHY_INTERFACE_MODE_MAX)
+ return false;
+
+ for (i = 0; match->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
+ set_bit(match->supported[i], xpcs->supported);
+
+ return true;
+}
+
+static int xpcs_probe(struct mdio_xpcs_args *xpcs, phy_interface_t interface)
+{
+ u32 xpcs_id = xpcs_get_id(xpcs);
+ struct xpcs_id *match = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(xpcs_id_list); i++) {
+ struct xpcs_id *entry = &xpcs_id_list[i];
+
+ if ((xpcs_id & entry->mask) == entry->id) {
+ match = entry;
+
+ if (xpcs_check_features(xpcs, match, interface))
+ return xpcs_soft_reset(xpcs, MDIO_MMD_PCS);
+ }
+ }
+
+ return -ENODEV;
+}
+
+static struct mdio_xpcs_ops xpcs_ops = {
+ .validate = xpcs_validate,
+ .config = xpcs_config,
+ .get_state = xpcs_get_state,
+ .link_up = xpcs_link_up,
+ .probe = xpcs_probe,
+};
+
+struct mdio_xpcs_ops *mdio_xpcs_get_ops(void)
+{
+ return &xpcs_ops;
+}
+EXPORT_SYMBOL_GPL(mdio_xpcs_get_ops);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 9bb9f37f21dc..522760c8bca6 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -462,6 +462,23 @@ static struct class mdio_bus_class = {
.dev_groups = mdio_bus_groups,
};
+/**
+ * mdio_find_bus - Given the name of a mdiobus, find the mii_bus.
+ * @mdio_bus_np: Pointer to the mii_bus.
+ *
+ * Returns a reference to the mii_bus, or NULL if none found. The
+ * embedded struct device will have its reference count incremented,
+ * and this must be put_deviced'ed once the bus is finished with.
+ */
+struct mii_bus *mdio_find_bus(const char *mdio_name)
+{
+ struct device *d;
+
+ d = class_find_device_by_name(&mdio_bus_class, mdio_name);
+ return d ? to_mii_bus(d) : NULL;
+}
+EXPORT_SYMBOL(mdio_find_bus);
+
#if IS_ENABLED(CONFIG_OF_MDIO)
/**
* of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
@@ -808,6 +825,38 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
EXPORT_SYMBOL(__mdiobus_write);
/**
+ * __mdiobus_modify_changed - Unlocked version of the mdiobus_modify function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to modify
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Read, modify, and if any change, write the register value back to the
+ * device. Any error returns a negative number.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
+ u16 mask, u16 set)
+{
+ int new, ret;
+
+ ret = __mdiobus_read(bus, addr, regnum);
+ if (ret < 0)
+ return ret;
+
+ new = (ret & ~mask) | set;
+ if (new == ret)
+ return 0;
+
+ ret = __mdiobus_write(bus, addr, regnum, new);
+
+ return ret < 0 ? ret : 1;
+}
+EXPORT_SYMBOL_GPL(__mdiobus_modify_changed);
+
+/**
* mdiobus_read_nested - Nested version of the mdiobus_read function
* @bus: the mii_bus struct
* @addr: the phy address
@@ -824,7 +873,8 @@ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum)
{
int retval;
- BUG_ON(in_interrupt());
+ if (WARN_ON_ONCE(in_interrupt()))
+ return -EINVAL;
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
retval = __mdiobus_read(bus, addr, regnum);
@@ -848,7 +898,8 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
{
int retval;
- BUG_ON(in_interrupt());
+ if (WARN_ON_ONCE(in_interrupt()))
+ return -EINVAL;
mutex_lock(&bus->mdio_lock);
retval = __mdiobus_read(bus, addr, regnum);
@@ -876,7 +927,8 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val)
{
int err;
- BUG_ON(in_interrupt());
+ if (WARN_ON_ONCE(in_interrupt()))
+ return -EINVAL;
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
err = __mdiobus_write(bus, addr, regnum, val);
@@ -901,7 +953,8 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
{
int err;
- BUG_ON(in_interrupt());
+ if (WARN_ON_ONCE(in_interrupt()))
+ return -EINVAL;
mutex_lock(&bus->mdio_lock);
err = __mdiobus_write(bus, addr, regnum, val);
@@ -912,6 +965,30 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
EXPORT_SYMBOL(mdiobus_write);
/**
+ * mdiobus_modify - Convenience function for modifying a given mdio device
+ * register
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ */
+int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set)
+{
+ int err;
+
+ if (WARN_ON_ONCE(in_interrupt()))
+ return -EINVAL;
+
+ mutex_lock(&bus->mdio_lock);
+ err = __mdiobus_modify_changed(bus, addr, regnum, mask, set);
+ mutex_unlock(&bus->mdio_lock);
+
+ return err < 0 ? err : 0;
+}
+EXPORT_SYMBOL_GPL(mdiobus_modify);
+
+/**
* mdio_bus_match - determine if given MDIO driver supports the given
* MDIO device
* @dev: target MDIO device
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 63dedec0433d..2ec19e5540bf 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -704,6 +704,50 @@ static int ksz9131_of_load_skew_values(struct phy_device *phydev,
return phy_write_mmd(phydev, 2, reg, newval);
}
+#define KSZ9131RN_MMD_COMMON_CTRL_REG 2
+#define KSZ9131RN_RXC_DLL_CTRL 76
+#define KSZ9131RN_TXC_DLL_CTRL 77
+#define KSZ9131RN_DLL_CTRL_BYPASS BIT_MASK(12)
+#define KSZ9131RN_DLL_ENABLE_DELAY 0
+#define KSZ9131RN_DLL_DISABLE_DELAY BIT(12)
+
+static int ksz9131_config_rgmii_delay(struct phy_device *phydev)
+{
+ u16 rxcdll_val, txcdll_val;
+ int ret;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ rxcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ txcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ rxcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
+ txcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ rxcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
+ txcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ rxcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ txcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
+ break;
+ default:
+ return 0;
+ }
+
+ ret = phy_modify_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ KSZ9131RN_RXC_DLL_CTRL, KSZ9131RN_DLL_CTRL_BYPASS,
+ rxcdll_val);
+ if (ret < 0)
+ return ret;
+
+ return phy_modify_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ KSZ9131RN_TXC_DLL_CTRL, KSZ9131RN_DLL_CTRL_BYPASS,
+ txcdll_val);
+}
+
static int ksz9131_config_init(struct phy_device *phydev)
{
const struct device *dev = &phydev->mdio.dev;
@@ -730,6 +774,12 @@ static int ksz9131_config_init(struct phy_device *phydev)
if (!of_node)
return 0;
+ if (phy_interface_is_rgmii(phydev)) {
+ ret = ksz9131_config_rgmii_delay(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
ret = ksz9131_of_load_skew_values(phydev, of_node,
MII_KSZ9031RN_CLK_PAD_SKEW, 5,
clk_skews, 2);
diff --git a/drivers/net/phy/mscc/Makefile b/drivers/net/phy/mscc/Makefile
new file mode 100644
index 000000000000..10af42cd9839
--- /dev/null
+++ b/drivers/net/phy/mscc/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for MSCC networking PHY driver
+
+obj-$(CONFIG_MICROSEMI_PHY) := mscc.o
+mscc-objs := mscc_main.o
+
+ifdef CONFIG_MACSEC
+mscc-objs += mscc_macsec.o
+endif
diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
new file mode 100644
index 000000000000..030bf8b600df
--- /dev/null
+++ b/drivers/net/phy/mscc/mscc.h
@@ -0,0 +1,400 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Driver for Microsemi VSC85xx PHYs
+ *
+ * Copyright (c) 2016 Microsemi Corporation
+ */
+
+#ifndef _MSCC_PHY_H_
+#define _MSCC_PHY_H_
+
+#if IS_ENABLED(CONFIG_MACSEC)
+#include "mscc_macsec.h"
+#endif
+
+enum rgmii_clock_delay {
+ RGMII_CLK_DELAY_0_2_NS = 0,
+ RGMII_CLK_DELAY_0_8_NS = 1,
+ RGMII_CLK_DELAY_1_1_NS = 2,
+ RGMII_CLK_DELAY_1_7_NS = 3,
+ RGMII_CLK_DELAY_2_0_NS = 4,
+ RGMII_CLK_DELAY_2_3_NS = 5,
+ RGMII_CLK_DELAY_2_6_NS = 6,
+ RGMII_CLK_DELAY_3_4_NS = 7
+};
+
+/* Microsemi VSC85xx PHY registers */
+/* IEEE 802. Std Registers */
+#define MSCC_PHY_BYPASS_CONTROL 18
+#define DISABLE_HP_AUTO_MDIX_MASK 0x0080
+#define DISABLE_PAIR_SWAP_CORR_MASK 0x0020
+#define DISABLE_POLARITY_CORR_MASK 0x0010
+#define PARALLEL_DET_IGNORE_ADVERTISED 0x0008
+
+#define MSCC_PHY_EXT_CNTL_STATUS 22
+#define SMI_BROADCAST_WR_EN 0x0001
+
+#define MSCC_PHY_ERR_RX_CNT 19
+#define MSCC_PHY_ERR_FALSE_CARRIER_CNT 20
+#define MSCC_PHY_ERR_LINK_DISCONNECT_CNT 21
+#define ERR_CNT_MASK GENMASK(7, 0)
+
+#define MSCC_PHY_EXT_PHY_CNTL_1 23
+#define MAC_IF_SELECTION_MASK 0x1800
+#define MAC_IF_SELECTION_GMII 0
+#define MAC_IF_SELECTION_RMII 1
+#define MAC_IF_SELECTION_RGMII 2
+#define MAC_IF_SELECTION_POS 11
+#define VSC8584_MAC_IF_SELECTION_MASK 0x1000
+#define VSC8584_MAC_IF_SELECTION_SGMII 0
+#define VSC8584_MAC_IF_SELECTION_1000BASEX 1
+#define VSC8584_MAC_IF_SELECTION_POS 12
+#define FAR_END_LOOPBACK_MODE_MASK 0x0008
+#define MEDIA_OP_MODE_MASK 0x0700
+#define MEDIA_OP_MODE_COPPER 0
+#define MEDIA_OP_MODE_SERDES 1
+#define MEDIA_OP_MODE_1000BASEX 2
+#define MEDIA_OP_MODE_100BASEFX 3
+#define MEDIA_OP_MODE_AMS_COPPER_SERDES 5
+#define MEDIA_OP_MODE_AMS_COPPER_1000BASEX 6
+#define MEDIA_OP_MODE_AMS_COPPER_100BASEFX 7
+#define MEDIA_OP_MODE_POS 8
+
+#define MSCC_PHY_EXT_PHY_CNTL_2 24
+
+#define MII_VSC85XX_INT_MASK 25
+#define MII_VSC85XX_INT_MASK_MDINT BIT(15)
+#define MII_VSC85XX_INT_MASK_LINK_CHG BIT(13)
+#define MII_VSC85XX_INT_MASK_WOL BIT(6)
+#define MII_VSC85XX_INT_MASK_EXT BIT(5)
+#define MII_VSC85XX_INT_STATUS 26
+
+#define MII_VSC85XX_INT_MASK_MASK (MII_VSC85XX_INT_MASK_MDINT | \
+ MII_VSC85XX_INT_MASK_LINK_CHG | \
+ MII_VSC85XX_INT_MASK_EXT)
+
+#define MSCC_PHY_WOL_MAC_CONTROL 27
+#define EDGE_RATE_CNTL_POS 5
+#define EDGE_RATE_CNTL_MASK 0x00E0
+
+#define MSCC_PHY_DEV_AUX_CNTL 28
+#define HP_AUTO_MDIX_X_OVER_IND_MASK 0x2000
+
+#define MSCC_PHY_LED_MODE_SEL 29
+#define LED_MODE_SEL_POS(x) ((x) * 4)
+#define LED_MODE_SEL_MASK(x) (GENMASK(3, 0) << LED_MODE_SEL_POS(x))
+#define LED_MODE_SEL(x, mode) (((mode) << LED_MODE_SEL_POS(x)) & LED_MODE_SEL_MASK(x))
+
+#define MSCC_EXT_PAGE_CSR_CNTL_17 17
+#define MSCC_EXT_PAGE_CSR_CNTL_18 18
+
+#define MSCC_EXT_PAGE_CSR_CNTL_19 19
+#define MSCC_PHY_CSR_CNTL_19_REG_ADDR(x) (x)
+#define MSCC_PHY_CSR_CNTL_19_TARGET(x) ((x) << 12)
+#define MSCC_PHY_CSR_CNTL_19_READ BIT(14)
+#define MSCC_PHY_CSR_CNTL_19_CMD BIT(15)
+
+#define MSCC_EXT_PAGE_CSR_CNTL_20 20
+#define MSCC_PHY_CSR_CNTL_20_TARGET(x) (x)
+
+#define PHY_MCB_TARGET 0x07
+#define PHY_MCB_S6G_WRITE BIT(31)
+#define PHY_MCB_S6G_READ BIT(30)
+
+#define PHY_S6G_PLL5G_CFG0 0x06
+#define PHY_S6G_LCPLL_CFG 0x11
+#define PHY_S6G_PLL_CFG 0x2b
+#define PHY_S6G_COMMON_CFG 0x2c
+#define PHY_S6G_GPC_CFG 0x2e
+#define PHY_S6G_MISC_CFG 0x3b
+#define PHY_MCB_S6G_CFG 0x3f
+#define PHY_S6G_DFT_CFG2 0x3e
+#define PHY_S6G_PLL_STATUS 0x31
+#define PHY_S6G_IB_STATUS0 0x2f
+
+#define PHY_S6G_SYS_RST_POS 31
+#define PHY_S6G_ENA_LANE_POS 18
+#define PHY_S6G_ENA_LOOP_POS 8
+#define PHY_S6G_QRATE_POS 6
+#define PHY_S6G_IF_MODE_POS 4
+#define PHY_S6G_PLL_ENA_OFFS_POS 21
+#define PHY_S6G_PLL_FSM_CTRL_DATA_POS 8
+#define PHY_S6G_PLL_FSM_ENA_POS 7
+
+#define MSCC_EXT_PAGE_ACCESS 31
+#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
+#define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */
+#define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */
+#define MSCC_PHY_PAGE_EXTENDED_3 0x0003 /* Extended reg - page 3 */
+#define MSCC_PHY_PAGE_EXTENDED_4 0x0004 /* Extended reg - page 4 */
+#define MSCC_PHY_PAGE_CSR_CNTL MSCC_PHY_PAGE_EXTENDED_4
+#define MSCC_PHY_PAGE_MACSEC MSCC_PHY_PAGE_EXTENDED_4
+/* Extended reg - GPIO; this is a bank of registers that are shared for all PHYs
+ * in the same package.
+ */
+#define MSCC_PHY_PAGE_EXTENDED_GPIO 0x0010 /* Extended reg - GPIO */
+#define MSCC_PHY_PAGE_TEST 0x2a30 /* Test reg */
+#define MSCC_PHY_PAGE_TR 0x52b5 /* Token ring registers */
+
+/* Extended Page 1 Registers */
+#define MSCC_PHY_CU_MEDIA_CRC_VALID_CNT 18
+#define VALID_CRC_CNT_CRC_MASK GENMASK(13, 0)
+
+#define MSCC_PHY_EXT_MODE_CNTL 19
+#define FORCE_MDI_CROSSOVER_MASK 0x000C
+#define FORCE_MDI_CROSSOVER_MDIX 0x000C
+#define FORCE_MDI_CROSSOVER_MDI 0x0008
+
+#define MSCC_PHY_ACTIPHY_CNTL 20
+#define PHY_ADDR_REVERSED 0x0200
+#define DOWNSHIFT_CNTL_MASK 0x001C
+#define DOWNSHIFT_EN 0x0010
+#define DOWNSHIFT_CNTL_POS 2
+
+#define MSCC_PHY_EXT_PHY_CNTL_4 23
+#define PHY_CNTL_4_ADDR_POS 11
+
+#define MSCC_PHY_VERIPHY_CNTL_2 25
+
+#define MSCC_PHY_VERIPHY_CNTL_3 26
+
+/* Extended Page 2 Registers */
+#define MSCC_PHY_CU_PMD_TX_CNTL 16
+
+/* RGMII setting controls at address 18E2, for VSC8572 and similar */
+#define VSC8572_RGMII_CNTL 18
+#define VSC8572_RGMII_RX_DELAY_MASK 0x000E
+#define VSC8572_RGMII_TX_DELAY_MASK 0x0070
+
+/* RGMII controls at address 20E2, for VSC8502 and similar */
+#define VSC8502_RGMII_CNTL 20
+#define VSC8502_RGMII_RX_DELAY_MASK 0x0070
+#define VSC8502_RGMII_TX_DELAY_MASK 0x0007
+
+#define MSCC_PHY_WOL_LOWER_MAC_ADDR 21
+#define MSCC_PHY_WOL_MID_MAC_ADDR 22
+#define MSCC_PHY_WOL_UPPER_MAC_ADDR 23
+#define MSCC_PHY_WOL_LOWER_PASSWD 24
+#define MSCC_PHY_WOL_MID_PASSWD 25
+#define MSCC_PHY_WOL_UPPER_PASSWD 26
+
+#define MSCC_PHY_WOL_MAC_CONTROL 27
+#define SECURE_ON_ENABLE 0x8000
+#define SECURE_ON_PASSWD_LEN_4 0x4000
+
+#define MSCC_PHY_EXTENDED_INT 28
+#define MSCC_PHY_EXTENDED_INT_MS_EGR BIT(9)
+
+/* Extended Page 3 Registers */
+#define MSCC_PHY_SERDES_TX_VALID_CNT 21
+#define MSCC_PHY_SERDES_TX_CRC_ERR_CNT 22
+#define MSCC_PHY_SERDES_RX_VALID_CNT 28
+#define MSCC_PHY_SERDES_RX_CRC_ERR_CNT 29
+
+/* Extended page GPIO Registers */
+#define MSCC_DW8051_CNTL_STATUS 0
+#define MICRO_NSOFT_RESET 0x8000
+#define RUN_FROM_INT_ROM 0x4000
+#define AUTOINC_ADDR 0x2000
+#define PATCH_RAM_CLK 0x1000
+#define MICRO_PATCH_EN 0x0080
+#define DW8051_CLK_EN 0x0010
+#define MICRO_CLK_EN 0x0008
+#define MICRO_CLK_DIVIDE(x) ((x) >> 1)
+#define MSCC_DW8051_VLD_MASK 0xf1ff
+
+/* x Address in range 1-4 */
+#define MSCC_TRAP_ROM_ADDR(x) ((x) * 2 + 1)
+#define MSCC_PATCH_RAM_ADDR(x) (((x) + 1) * 2)
+#define MSCC_INT_MEM_ADDR 11
+
+#define MSCC_INT_MEM_CNTL 12
+#define READ_SFR 0x6000
+#define READ_PRAM 0x4000
+#define READ_ROM 0x2000
+#define READ_RAM 0x0000
+#define INT_MEM_WRITE_EN 0x1000
+#define EN_PATCH_RAM_TRAP_ADDR(x) (0x0100 << ((x) - 1))
+#define INT_MEM_DATA_M 0x00ff
+#define INT_MEM_DATA(x) (INT_MEM_DATA_M & (x))
+
+#define MSCC_PHY_PROC_CMD 18
+#define PROC_CMD_NCOMPLETED 0x8000
+#define PROC_CMD_FAILED 0x4000
+#define PROC_CMD_SGMII_PORT(x) ((x) << 8)
+#define PROC_CMD_FIBER_PORT(x) (0x0100 << (x) % 4)
+#define PROC_CMD_QSGMII_PORT 0x0c00
+#define PROC_CMD_RST_CONF_PORT 0x0080
+#define PROC_CMD_RECONF_PORT 0x0000
+#define PROC_CMD_READ_MOD_WRITE_PORT 0x0040
+#define PROC_CMD_WRITE 0x0040
+#define PROC_CMD_READ 0x0000
+#define PROC_CMD_FIBER_DISABLE 0x0020
+#define PROC_CMD_FIBER_100BASE_FX 0x0010
+#define PROC_CMD_FIBER_1000BASE_X 0x0000
+#define PROC_CMD_SGMII_MAC 0x0030
+#define PROC_CMD_QSGMII_MAC 0x0020
+#define PROC_CMD_NO_MAC_CONF 0x0000
+#define PROC_CMD_1588_DEFAULT_INIT 0x0010
+#define PROC_CMD_NOP 0x000f
+#define PROC_CMD_PHY_INIT 0x000a
+#define PROC_CMD_CRC16 0x0008
+#define PROC_CMD_FIBER_MEDIA_CONF 0x0001
+#define PROC_CMD_MCB_ACCESS_MAC_CONF 0x0000
+#define PROC_CMD_NCOMPLETED_TIMEOUT_MS 500
+
+#define MSCC_PHY_MAC_CFG_FASTLINK 19
+#define MAC_CFG_MASK 0xc000
+#define MAC_CFG_SGMII 0x0000
+#define MAC_CFG_QSGMII 0x4000
+#define MAC_CFG_RGMII 0x8000
+
+/* Test page Registers */
+#define MSCC_PHY_TEST_PAGE_5 5
+#define MSCC_PHY_TEST_PAGE_8 8
+#define MSCC_PHY_TEST_PAGE_9 9
+#define MSCC_PHY_TEST_PAGE_20 20
+#define MSCC_PHY_TEST_PAGE_24 24
+
+/* Token ring page Registers */
+#define MSCC_PHY_TR_CNTL 16
+#define TR_WRITE 0x8000
+#define TR_ADDR(x) (0x7fff & (x))
+#define MSCC_PHY_TR_LSB 17
+#define MSCC_PHY_TR_MSB 18
+
+/* Microsemi PHY ID's
+ * Code assumes lowest nibble is 0
+ */
+#define PHY_ID_VSC8502 0x00070630
+#define PHY_ID_VSC8504 0x000704c0
+#define PHY_ID_VSC8514 0x00070670
+#define PHY_ID_VSC8530 0x00070560
+#define PHY_ID_VSC8531 0x00070570
+#define PHY_ID_VSC8540 0x00070760
+#define PHY_ID_VSC8541 0x00070770
+#define PHY_ID_VSC8552 0x000704e0
+#define PHY_ID_VSC856X 0x000707e0
+#define PHY_ID_VSC8572 0x000704d0
+#define PHY_ID_VSC8574 0x000704a0
+#define PHY_ID_VSC8575 0x000707d0
+#define PHY_ID_VSC8582 0x000707b0
+#define PHY_ID_VSC8584 0x000707c0
+
+#define MSCC_VDDMAC_1500 1500
+#define MSCC_VDDMAC_1800 1800
+#define MSCC_VDDMAC_2500 2500
+#define MSCC_VDDMAC_3300 3300
+
+#define DOWNSHIFT_COUNT_MAX 5
+
+#define MAX_LEDS 4
+
+#define VSC8584_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \
+ BIT(VSC8531_LINK_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_100_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_ACTIVITY) | \
+ BIT(VSC8531_LINK_100_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_100_ACTIVITY) | \
+ BIT(VSC8584_LINK_100FX_1000X_ACTIVITY) | \
+ BIT(VSC8531_DUPLEX_COLLISION) | \
+ BIT(VSC8531_COLLISION) | \
+ BIT(VSC8531_ACTIVITY) | \
+ BIT(VSC8584_100FX_1000X_ACTIVITY) | \
+ BIT(VSC8531_AUTONEG_FAULT) | \
+ BIT(VSC8531_SERIAL_MODE) | \
+ BIT(VSC8531_FORCE_LED_OFF) | \
+ BIT(VSC8531_FORCE_LED_ON))
+
+#define VSC85XX_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \
+ BIT(VSC8531_LINK_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_100_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_ACTIVITY) | \
+ BIT(VSC8531_LINK_100_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_1000_ACTIVITY) | \
+ BIT(VSC8531_LINK_10_100_ACTIVITY) | \
+ BIT(VSC8531_DUPLEX_COLLISION) | \
+ BIT(VSC8531_COLLISION) | \
+ BIT(VSC8531_ACTIVITY) | \
+ BIT(VSC8531_AUTONEG_FAULT) | \
+ BIT(VSC8531_SERIAL_MODE) | \
+ BIT(VSC8531_FORCE_LED_OFF) | \
+ BIT(VSC8531_FORCE_LED_ON))
+
+#define MSCC_VSC8584_REVB_INT8051_FW "microchip/mscc_vsc8584_revb_int8051_fb48.bin"
+#define MSCC_VSC8584_REVB_INT8051_FW_START_ADDR 0xe800
+#define MSCC_VSC8584_REVB_INT8051_FW_CRC 0xfb48
+
+#define MSCC_VSC8574_REVB_INT8051_FW "microchip/mscc_vsc8574_revb_int8051_29e8.bin"
+#define MSCC_VSC8574_REVB_INT8051_FW_START_ADDR 0x4000
+#define MSCC_VSC8574_REVB_INT8051_FW_CRC 0x29e8
+
+#define VSC8584_REVB 0x0001
+#define MSCC_DEV_REV_MASK GENMASK(3, 0)
+
+struct reg_val {
+ u16 reg;
+ u32 val;
+};
+
+struct vsc85xx_hw_stat {
+ const char *string;
+ u8 reg;
+ u16 page;
+ u16 mask;
+};
+
+struct vsc8531_private {
+ int rate_magic;
+ u16 supp_led_modes;
+ u32 leds_mode[MAX_LEDS];
+ u8 nleds;
+ const struct vsc85xx_hw_stat *hw_stats;
+ u64 *stats;
+ int nstats;
+ bool pkg_init;
+ /* For multiple port PHYs; the MDIO address of the base PHY in the
+ * package.
+ */
+ unsigned int base_addr;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ /* MACsec fields:
+ * - One SecY per device (enforced at the s/w implementation level)
+ * - macsec_flows: list of h/w flows
+ * - ingr_flows: bitmap of ingress flows
+ * - egr_flows: bitmap of egress flows
+ */
+ struct macsec_secy *secy;
+ struct list_head macsec_flows;
+ unsigned long ingr_flows;
+ unsigned long egr_flows;
+#endif
+};
+
+#ifdef CONFIG_OF_MDIO
+struct vsc8531_edge_rate_table {
+ u32 vddmac;
+ u32 slowdown[8];
+};
+#endif /* CONFIG_OF_MDIO */
+
+#if IS_ENABLED(CONFIG_MACSEC)
+int vsc8584_macsec_init(struct phy_device *phydev);
+void vsc8584_handle_macsec_interrupt(struct phy_device *phydev);
+void vsc8584_config_macsec_intr(struct phy_device *phydev);
+#else
+static inline int vsc8584_macsec_init(struct phy_device *phydev)
+{
+ return 0;
+}
+static inline void vsc8584_handle_macsec_interrupt(struct phy_device *phydev)
+{
+}
+static inline void vsc8584_config_macsec_intr(struct phy_device *phydev)
+{
+}
+#endif
+
+#endif /* _MSCC_PHY_H_ */
diff --git a/drivers/net/phy/mscc_fc_buffer.h b/drivers/net/phy/mscc/mscc_fc_buffer.h
index 7e9c0e877895..3803e826c37d 100644
--- a/drivers/net/phy/mscc_fc_buffer.h
+++ b/drivers/net/phy/mscc/mscc_fc_buffer.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
- * Microsemi Ocelot Switch driver
+ * Driver for Microsemi VSC85xx PHYs
*
* Copyright (C) 2019 Microsemi Corporation
*/
-#ifndef _MSCC_OCELOT_FC_BUFFER_H_
-#define _MSCC_OCELOT_FC_BUFFER_H_
+#ifndef _MSCC_PHY_FC_BUFFER_H_
+#define _MSCC_PHY_FC_BUFFER_H_
#define MSCC_FCBUF_ENA_CFG 0x00
#define MSCC_FCBUF_MODE_CFG 0x01
@@ -61,4 +61,4 @@
#define MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH(x) ((x) << 16)
#define MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH_M GENMASK(31, 16)
-#endif
+#endif /* _MSCC_PHY_FC_BUFFER_H_ */
diff --git a/drivers/net/phy/mscc_mac.h b/drivers/net/phy/mscc/mscc_mac.h
index 9420ee5175a6..fcb5ba5e5d03 100644
--- a/drivers/net/phy/mscc_mac.h
+++ b/drivers/net/phy/mscc/mscc_mac.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
- * Microsemi Ocelot Switch driver
+ * Driver for Microsemi VSC85xx PHYs
*
* Copyright (c) 2017 Microsemi Corporation
*/
-#ifndef _MSCC_OCELOT_LINE_MAC_H_
-#define _MSCC_OCELOT_LINE_MAC_H_
+#ifndef _MSCC_PHY_LINE_MAC_H_
+#define _MSCC_PHY_LINE_MAC_H_
#define MSCC_MAC_CFG_ENA_CFG 0x00
#define MSCC_MAC_CFG_MODE_CFG 0x01
@@ -156,4 +156,4 @@
#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x) (x)
#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M GENMASK(2, 0)
-#endif /* _MSCC_OCELOT_LINE_MAC_H_ */
+#endif /* _MSCC_PHY_LINE_MAC_H_ */
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
new file mode 100644
index 000000000000..e99e2cd72a0c
--- /dev/null
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -0,0 +1,1051 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Driver for Microsemi VSC85xx PHYs
+ *
+ * Author: Nagaraju Lakkaraju
+ * License: Dual MIT/GPL
+ * Copyright (c) 2016 Microsemi Corporation
+ */
+
+#include <linux/phy.h>
+#include <dt-bindings/net/mscc-phy-vsc8531.h>
+
+#include <crypto/skcipher.h>
+
+#include <net/macsec.h>
+
+#include "mscc.h"
+#include "mscc_mac.h"
+#include "mscc_macsec.h"
+#include "mscc_fc_buffer.h"
+
+static u32 vsc8584_macsec_phy_read(struct phy_device *phydev,
+ enum macsec_bank bank, u32 reg)
+{
+ u32 val, val_l = 0, val_h = 0;
+ unsigned long deadline;
+ int rc;
+
+ rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
+ if (rc < 0)
+ goto failed;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
+ MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
+
+ if (bank >> 2 == 0x1)
+ /* non-MACsec access */
+ bank &= 0x3;
+ else
+ bank = 0;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
+ MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_READ |
+ MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
+ MSCC_PHY_MACSEC_19_TARGET(bank));
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
+ } while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
+
+ val_l = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_17);
+ val_h = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_18);
+
+failed:
+ phy_restore_page(phydev, rc, rc);
+
+ return (val_h << 16) | val_l;
+}
+
+static void vsc8584_macsec_phy_write(struct phy_device *phydev,
+ enum macsec_bank bank, u32 reg, u32 val)
+{
+ unsigned long deadline;
+ int rc;
+
+ rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
+ if (rc < 0)
+ goto failed;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
+ MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
+
+ if ((bank >> 2 == 0x1) || (bank >> 2 == 0x3))
+ bank &= 0x3;
+ else
+ /* MACsec access */
+ bank = 0;
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_17, (u16)val);
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_18, (u16)(val >> 16));
+
+ __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
+ MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
+ MSCC_PHY_MACSEC_19_TARGET(bank));
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
+ } while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
+
+failed:
+ phy_restore_page(phydev, rc, rc);
+}
+
+static void vsc8584_macsec_classification(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ /* enable VLAN tag parsing */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_CP_TAG,
+ MSCC_MS_SAM_CP_TAG_PARSE_STAG |
+ MSCC_MS_SAM_CP_TAG_PARSE_QTAG |
+ MSCC_MS_SAM_CP_TAG_PARSE_QINQ);
+}
+
+static void vsc8584_macsec_flow_default_action(struct phy_device *phydev,
+ enum macsec_bank bank,
+ bool block)
+{
+ u32 port = (bank == MACSEC_INGR) ?
+ MSCC_MS_PORT_UNCONTROLLED : MSCC_MS_PORT_COMMON;
+ u32 action = MSCC_MS_FLOW_BYPASS;
+
+ if (block)
+ action = MSCC_MS_FLOW_DROP;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_NCP,
+ /* MACsec untagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DEST_PORT(port) |
+ /* MACsec tagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DEST_PORT(port) |
+ /* Bad tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DEST_PORT(port) |
+ /* Kay tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_DEST_PORT(port));
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_CP,
+ /* MACsec untagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DEST_PORT(port) |
+ /* MACsec tagged */
+ MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DEST_PORT(port) |
+ /* Bad tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DEST_PORT(port) |
+ /* Kay tag */
+ MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
+ MSCC_MS_SAM_NM_FLOW_CP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_NM_FLOW_CP_KAY_DEST_PORT(port));
+}
+
+static void vsc8584_macsec_integrity_checks(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ u32 val;
+
+ if (bank != MACSEC_INGR)
+ return;
+
+ /* Set default rules to pass unmatched frames */
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MS_PARAMS2_IG_CC_CONTROL);
+ val |= MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_CTRL_ACT |
+ MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_ACT;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CC_CONTROL,
+ val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CP_TAG,
+ MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_STAG |
+ MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QTAG |
+ MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QINQ);
+}
+
+static void vsc8584_macsec_block_init(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ u32 val;
+ int i;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
+ MSCC_MS_ENA_CFG_SW_RST |
+ MSCC_MS_ENA_CFG_MACSEC_BYPASS_ENA);
+
+ /* Set the MACsec block out of s/w reset and enable clocks */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
+ MSCC_MS_ENA_CFG_CLK_ENA);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_STATUS_CONTEXT_CTRL,
+ bank == MACSEC_INGR ? 0xe5880214 : 0xe5880218);
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_MISC_CONTROL,
+ MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX(bank == MACSEC_INGR ? 57 : 40) |
+ MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE(bank == MACSEC_INGR ? 1 : 2));
+
+ /* Clear the counters */
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
+ val |= MSCC_MS_COUNT_CONTROL_AUTO_CNTR_RESET;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
+
+ /* Enable octet increment mode */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PP_CTRL,
+ MSCC_MS_PP_CTRL_MACSEC_OCTET_INCR_MODE);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_BLOCK_CTX_UPDATE, 0x3);
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
+ val |= MSCC_MS_COUNT_CONTROL_RESET_ALL;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
+
+ /* Set the MTU */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_NON_VLAN_MTU_CHECK,
+ MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE(32761) |
+ MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMP_DROP);
+
+ for (i = 0; i < 8; i++)
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_VLAN_MTU_CHECK(i),
+ MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE(32761) |
+ MSCC_MS_VLAN_MTU_CHECK_MTU_COMP_DROP);
+
+ if (bank == MACSEC_EGR) {
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_INTR_CTRL_STATUS);
+ val &= ~MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE_M;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_INTR_CTRL_STATUS, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_FC_CFG,
+ MSCC_MS_FC_CFG_FCBUF_ENA |
+ MSCC_MS_FC_CFG_LOW_THRESH(0x1) |
+ MSCC_MS_FC_CFG_HIGH_THRESH(0x4) |
+ MSCC_MS_FC_CFG_LOW_BYTES_VAL(0x4) |
+ MSCC_MS_FC_CFG_HIGH_BYTES_VAL(0x6));
+ }
+
+ vsc8584_macsec_classification(phydev, bank);
+ vsc8584_macsec_flow_default_action(phydev, bank, false);
+ vsc8584_macsec_integrity_checks(phydev, bank);
+
+ /* Enable the MACsec block */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
+ MSCC_MS_ENA_CFG_CLK_ENA |
+ MSCC_MS_ENA_CFG_MACSEC_ENA |
+ MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE(0x5));
+}
+
+static void vsc8584_macsec_mac_init(struct phy_device *phydev,
+ enum macsec_bank bank)
+{
+ u32 val;
+ int i;
+
+ /* Clear host & line stats */
+ for (i = 0; i < 36; i++)
+ vsc8584_macsec_phy_write(phydev, bank, 0x1c + i, 0);
+
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL);
+ val &= ~MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE_M;
+ val |= MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE(2) |
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE(0xffff);
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL, val);
+
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2);
+ val |= 0xffff;
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2, val);
+
+ val = vsc8584_macsec_phy_read(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL);
+ if (bank == HOST_MAC)
+ val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_TIMER_ENA |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA;
+ else
+ val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_REACT_ENA |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_MODE |
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_EARLY_PAUSE_DETECT_ENA;
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_PKTINF_CFG,
+ MSCC_MAC_CFG_PKTINF_CFG_STRIP_FCS_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_INSERT_FCS_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_LPI_RELAY_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_STRIP_PREAMBLE_ENA |
+ MSCC_MAC_CFG_PKTINF_CFG_INSERT_PREAMBLE_ENA |
+ (bank == HOST_MAC ?
+ MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING : 0));
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MODE_CFG);
+ val &= ~MSCC_MAC_CFG_MODE_CFG_DISABLE_DIC;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MODE_CFG, val);
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG);
+ val &= ~MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_M;
+ val |= MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN(10240);
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ADV_CHK_CFG,
+ MSCC_MAC_CFG_ADV_CHK_CFG_SFD_CHK_ENA |
+ MSCC_MAC_CFG_ADV_CHK_CFG_PRM_CHK_ENA |
+ MSCC_MAC_CFG_ADV_CHK_CFG_OOR_ERR_ENA |
+ MSCC_MAC_CFG_ADV_CHK_CFG_INR_ERR_ENA);
+
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_LFS_CFG);
+ val &= ~MSCC_MAC_CFG_LFS_CFG_LFS_MODE_ENA;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_LFS_CFG, val);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ENA_CFG,
+ MSCC_MAC_CFG_ENA_CFG_RX_CLK_ENA |
+ MSCC_MAC_CFG_ENA_CFG_TX_CLK_ENA |
+ MSCC_MAC_CFG_ENA_CFG_RX_ENA |
+ MSCC_MAC_CFG_ENA_CFG_TX_ENA);
+}
+
+/* Must be called with mdio_lock taken */
+static int __vsc8584_macsec_init(struct phy_device *phydev)
+{
+ u32 val;
+
+ vsc8584_macsec_block_init(phydev, MACSEC_INGR);
+ vsc8584_macsec_block_init(phydev, MACSEC_EGR);
+ vsc8584_macsec_mac_init(phydev, HOST_MAC);
+ vsc8584_macsec_mac_init(phydev, LINE_MAC);
+
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER,
+ MSCC_FCBUF_FC_READ_THRESH_CFG,
+ MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH(4) |
+ MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH(5));
+
+ val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG);
+ val |= MSCC_FCBUF_MODE_CFG_PAUSE_GEN_ENA |
+ MSCC_FCBUF_MODE_CFG_RX_PPM_RATE_ADAPT_ENA |
+ MSCC_FCBUF_MODE_CFG_TX_PPM_RATE_ADAPT_ENA;
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG, val);
+
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG,
+ MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH(8) |
+ MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET(9));
+
+ val = vsc8584_macsec_phy_read(phydev, FC_BUFFER,
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG);
+ val &= ~(MSCC_FCBUF_TX_DATA_QUEUE_CFG_START_M |
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG_END_M);
+ val |= MSCC_FCBUF_TX_DATA_QUEUE_CFG_START(0) |
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG_END(5119);
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER,
+ MSCC_FCBUF_TX_DATA_QUEUE_CFG, val);
+
+ val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG);
+ val |= MSCC_FCBUF_ENA_CFG_TX_ENA | MSCC_FCBUF_ENA_CFG_RX_ENA;
+ vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG, val);
+
+ val = vsc8584_macsec_phy_read(phydev, IP_1588,
+ MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL);
+ val &= ~MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M;
+ val |= MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4);
+ vsc8584_macsec_phy_write(phydev, IP_1588,
+ MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL, val);
+
+ return 0;
+}
+
+static void vsc8584_macsec_flow(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ enum macsec_bank bank = flow->bank;
+ u32 val, match = 0, mask = 0, action = 0, idx = flow->index;
+
+ if (flow->match.tagged)
+ match |= MSCC_MS_SAM_MISC_MATCH_TAGGED;
+ if (flow->match.untagged)
+ match |= MSCC_MS_SAM_MISC_MATCH_UNTAGGED;
+
+ if (bank == MACSEC_INGR && flow->assoc_num >= 0) {
+ match |= MSCC_MS_SAM_MISC_MATCH_AN(flow->assoc_num);
+ mask |= MSCC_MS_SAM_MASK_AN_MASK(0x3);
+ }
+
+ if (bank == MACSEC_INGR && flow->match.sci && flow->rx_sa->sc->sci) {
+ match |= MSCC_MS_SAM_MISC_MATCH_TCI(BIT(3));
+ mask |= MSCC_MS_SAM_MASK_TCI_MASK(BIT(3)) |
+ MSCC_MS_SAM_MASK_SCI_MASK;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_LO(idx),
+ lower_32_bits(flow->rx_sa->sc->sci));
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_HI(idx),
+ upper_32_bits(flow->rx_sa->sc->sci));
+ }
+
+ if (flow->match.etype) {
+ mask |= MSCC_MS_SAM_MASK_MAC_ETYPE_MASK;
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MAC_SA_MATCH_HI(idx),
+ MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE(htons(flow->etype)));
+ }
+
+ match |= MSCC_MS_SAM_MISC_MATCH_PRIORITY(flow->priority);
+
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MISC_MATCH(idx), match);
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MASK(idx), mask);
+
+ /* Action for matching packets */
+ if (flow->action.drop)
+ action = MSCC_MS_FLOW_DROP;
+ else if (flow->action.bypass || flow->port == MSCC_MS_PORT_UNCONTROLLED)
+ action = MSCC_MS_FLOW_BYPASS;
+ else
+ action = (bank == MACSEC_INGR) ?
+ MSCC_MS_FLOW_INGRESS : MSCC_MS_FLOW_EGRESS;
+
+ val = MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE(action) |
+ MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION(MSCC_MS_ACTION_DROP) |
+ MSCC_MS_SAM_FLOW_CTRL_DEST_PORT(flow->port);
+
+ if (action == MSCC_MS_FLOW_BYPASS)
+ goto write_ctrl;
+
+ if (bank == MACSEC_INGR) {
+ if (priv->secy->replay_protect)
+ val |= MSCC_MS_SAM_FLOW_CTRL_REPLAY_PROTECT;
+ if (priv->secy->validate_frames == MACSEC_VALIDATE_STRICT)
+ val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_STRICT);
+ else if (priv->secy->validate_frames == MACSEC_VALIDATE_CHECK)
+ val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_CHECK);
+ } else if (bank == MACSEC_EGR) {
+ if (priv->secy->protect_frames)
+ val |= MSCC_MS_SAM_FLOW_CTRL_PROTECT_FRAME;
+ if (priv->secy->tx_sc.encrypt)
+ val |= MSCC_MS_SAM_FLOW_CTRL_CONF_PROTECT;
+ if (priv->secy->tx_sc.send_sci)
+ val |= MSCC_MS_SAM_FLOW_CTRL_INCLUDE_SCI;
+ }
+
+write_ctrl:
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
+}
+
+static struct macsec_flow *vsc8584_macsec_find_flow(struct macsec_context *ctx,
+ enum macsec_bank bank)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, &priv->macsec_flows, list)
+ if (pos->assoc_num == ctx->sa.assoc_num && pos->bank == bank)
+ return pos;
+
+ return ERR_PTR(-ENOENT);
+}
+
+static void vsc8584_macsec_flow_enable(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ enum macsec_bank bank = flow->bank;
+ u32 val, idx = flow->index;
+
+ if ((flow->bank == MACSEC_INGR && flow->rx_sa && !flow->rx_sa->active) ||
+ (flow->bank == MACSEC_EGR && flow->tx_sa && !flow->tx_sa->active))
+ return;
+
+ /* Enable */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_SET1, BIT(idx));
+
+ /* Set in-use */
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
+ val |= MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
+}
+
+static void vsc8584_macsec_flow_disable(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ enum macsec_bank bank = flow->bank;
+ u32 val, idx = flow->index;
+
+ /* Disable */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_CLEAR1, BIT(idx));
+
+ /* Clear in-use */
+ val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
+ val &= ~MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
+}
+
+static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
+{
+ if (flow->bank == MACSEC_INGR)
+ return flow->index + MSCC_MS_MAX_FLOWS;
+
+ return flow->index;
+}
+
+/* Derive the AES key to get a key for the hash autentication */
+static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
+ u16 key_len, u8 hkey[16])
+{
+ struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
+ struct skcipher_request *req = NULL;
+ struct scatterlist src, dst;
+ DECLARE_CRYPTO_WAIT(wait);
+ u32 input[4] = {0};
+ int ret;
+
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done,
+ &wait);
+ ret = crypto_skcipher_setkey(tfm, key, key_len);
+ if (ret < 0)
+ goto out;
+
+ sg_init_one(&src, input, 16);
+ sg_init_one(&dst, hkey, 16);
+ skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
+
+ ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+
+out:
+ skcipher_request_free(req);
+ crypto_free_skcipher(tfm);
+ return ret;
+}
+
+static int vsc8584_macsec_transformation(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ enum macsec_bank bank = flow->bank;
+ int i, ret, index = flow->index;
+ u32 rec = 0, control = 0;
+ u8 hkey[16];
+ sci_t sci;
+
+ ret = vsc8584_macsec_derive_key(flow->key, priv->secy->key_len, hkey);
+ if (ret)
+ return ret;
+
+ switch (priv->secy->key_len) {
+ case 16:
+ control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_128);
+ break;
+ case 32:
+ control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_256);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ control |= (bank == MACSEC_EGR) ?
+ (CONTROL_TYPE_EGRESS | CONTROL_AN(priv->secy->tx_sc.encoding_sa)) :
+ (CONTROL_TYPE_INGRESS | CONTROL_SEQ_MASK);
+
+ control |= CONTROL_UPDATE_SEQ | CONTROL_ENCRYPT_AUTH | CONTROL_KEY_IN_CTX |
+ CONTROL_IV0 | CONTROL_IV1 | CONTROL_IV_IN_SEQ |
+ CONTROL_DIGEST_TYPE(0x2) | CONTROL_SEQ_TYPE(0x1) |
+ CONTROL_AUTH_ALG(AUTH_ALG_AES_GHAS) | CONTROL_CONTEXT_ID;
+
+ /* Set the control word */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ control);
+
+ /* Set the context ID. Must be unique. */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ vsc8584_macsec_flow_context_id(flow));
+
+ /* Set the encryption/decryption key */
+ for (i = 0; i < priv->secy->key_len / sizeof(u32); i++)
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MS_XFORM_REC(index, rec++),
+ ((u32 *)flow->key)[i]);
+
+ /* Set the authentication key */
+ for (i = 0; i < 4; i++)
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MS_XFORM_REC(index, rec++),
+ ((u32 *)hkey)[i]);
+
+ /* Initial sequence number */
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ bank == MACSEC_INGR ?
+ flow->rx_sa->next_pn : flow->tx_sa->next_pn);
+
+ if (bank == MACSEC_INGR)
+ /* Set the mask (replay window size) */
+ vsc8584_macsec_phy_write(phydev, bank,
+ MSCC_MS_XFORM_REC(index, rec++),
+ priv->secy->replay_window);
+
+ /* Set the input vectors */
+ sci = bank == MACSEC_INGR ? flow->rx_sa->sc->sci : priv->secy->sci;
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ lower_32_bits(sci));
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ upper_32_bits(sci));
+
+ while (rec < 20)
+ vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
+ 0);
+
+ flow->has_transformation = true;
+ return 0;
+}
+
+static struct macsec_flow *vsc8584_macsec_alloc_flow(struct vsc8531_private *priv,
+ enum macsec_bank bank)
+{
+ unsigned long *bitmap = bank == MACSEC_INGR ?
+ &priv->ingr_flows : &priv->egr_flows;
+ struct macsec_flow *flow;
+ int index;
+
+ index = find_first_zero_bit(bitmap, MSCC_MS_MAX_FLOWS);
+
+ if (index == MSCC_MS_MAX_FLOWS)
+ return ERR_PTR(-ENOMEM);
+
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ set_bit(index, bitmap);
+ flow->index = index;
+ flow->bank = bank;
+ flow->priority = 8;
+ flow->assoc_num = -1;
+
+ list_add_tail(&flow->list, &priv->macsec_flows);
+ return flow;
+}
+
+static void vsc8584_macsec_free_flow(struct vsc8531_private *priv,
+ struct macsec_flow *flow)
+{
+ unsigned long *bitmap = flow->bank == MACSEC_INGR ?
+ &priv->ingr_flows : &priv->egr_flows;
+
+ list_del(&flow->list);
+ clear_bit(flow->index, bitmap);
+ kfree(flow);
+}
+
+static int vsc8584_macsec_add_flow(struct phy_device *phydev,
+ struct macsec_flow *flow, bool update)
+{
+ int ret;
+
+ flow->port = MSCC_MS_PORT_CONTROLLED;
+ vsc8584_macsec_flow(phydev, flow);
+
+ if (update)
+ return 0;
+
+ ret = vsc8584_macsec_transformation(phydev, flow);
+ if (ret) {
+ vsc8584_macsec_free_flow(phydev->priv, flow);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vsc8584_macsec_default_flows(struct phy_device *phydev)
+{
+ struct macsec_flow *flow;
+
+ /* Add a rule to let the MKA traffic go through, ingress */
+ flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ flow->priority = 15;
+ flow->port = MSCC_MS_PORT_UNCONTROLLED;
+ flow->match.tagged = 1;
+ flow->match.untagged = 1;
+ flow->match.etype = 1;
+ flow->etype = ETH_P_PAE;
+ flow->action.bypass = 1;
+
+ vsc8584_macsec_flow(phydev, flow);
+ vsc8584_macsec_flow_enable(phydev, flow);
+
+ /* Add a rule to let the MKA traffic go through, egress */
+ flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ flow->priority = 15;
+ flow->port = MSCC_MS_PORT_COMMON;
+ flow->match.untagged = 1;
+ flow->match.etype = 1;
+ flow->etype = ETH_P_PAE;
+ flow->action.bypass = 1;
+
+ vsc8584_macsec_flow(phydev, flow);
+ vsc8584_macsec_flow_enable(phydev, flow);
+
+ return 0;
+}
+
+static void vsc8584_macsec_del_flow(struct phy_device *phydev,
+ struct macsec_flow *flow)
+{
+ vsc8584_macsec_flow_disable(phydev, flow);
+ vsc8584_macsec_free_flow(phydev->priv, flow);
+}
+
+static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
+ struct macsec_flow *flow, bool update)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+
+ if (!flow) {
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+ }
+
+ flow->assoc_num = ctx->sa.assoc_num;
+ flow->rx_sa = ctx->sa.rx_sa;
+
+ /* Always match tagged packets on ingress */
+ flow->match.tagged = 1;
+ flow->match.sci = 1;
+
+ if (priv->secy->validate_frames != MACSEC_VALIDATE_DISABLED)
+ flow->match.untagged = 1;
+
+ return vsc8584_macsec_add_flow(phydev, flow, update);
+}
+
+static int __vsc8584_macsec_add_txsa(struct macsec_context *ctx,
+ struct macsec_flow *flow, bool update)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+
+ if (!flow) {
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+ }
+
+ flow->assoc_num = ctx->sa.assoc_num;
+ flow->tx_sa = ctx->sa.tx_sa;
+
+ /* Always match untagged packets on egress */
+ flow->match.untagged = 1;
+
+ return vsc8584_macsec_add_flow(phydev, flow, update);
+}
+
+static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+
+ return 0;
+}
+
+static int vsc8584_macsec_dev_stop(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
+
+ return 0;
+}
+
+static int vsc8584_macsec_add_secy(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_secy *secy = ctx->secy;
+
+ if (ctx->prepare) {
+ if (priv->secy)
+ return -EEXIST;
+
+ return 0;
+ }
+
+ priv->secy = secy;
+
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR,
+ secy->validate_frames != MACSEC_VALIDATE_DISABLED);
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR,
+ secy->validate_frames != MACSEC_VALIDATE_DISABLED);
+
+ return vsc8584_macsec_default_flows(ctx->phydev);
+}
+
+static int vsc8584_macsec_del_secy(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR, false);
+ vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR, false);
+
+ priv->secy = NULL;
+ return 0;
+}
+
+static int vsc8584_macsec_upd_secy(struct macsec_context *ctx)
+{
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ vsc8584_macsec_del_secy(ctx);
+ return vsc8584_macsec_add_secy(ctx);
+}
+
+static int vsc8584_macsec_add_rxsc(struct macsec_context *ctx)
+{
+ /* Nothing to do */
+ return 0;
+}
+
+static int vsc8584_macsec_upd_rxsc(struct macsec_context *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static int vsc8584_macsec_del_rxsc(struct macsec_context *ctx)
+{
+ struct vsc8531_private *priv = ctx->phydev->priv;
+ struct macsec_flow *flow, *tmp;
+
+ /* No operation to perform before the commit step */
+ if (ctx->prepare)
+ return 0;
+
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
+ if (flow->bank == MACSEC_INGR && flow->rx_sa &&
+ flow->rx_sa->sc->sci == ctx->rx_sc->sci)
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+ }
+
+ return 0;
+}
+
+static int vsc8584_macsec_add_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow = NULL;
+
+ if (ctx->prepare)
+ return __vsc8584_macsec_add_rxsa(ctx, flow, false);
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ if (ctx->prepare) {
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
+
+ return __vsc8584_macsec_add_rxsa(ctx, flow, true);
+ }
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_del_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+ if (ctx->prepare)
+ return 0;
+
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_add_txsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow = NULL;
+
+ if (ctx->prepare)
+ return __vsc8584_macsec_add_txsa(ctx, flow, false);
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ if (ctx->prepare) {
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
+
+ return __vsc8584_macsec_add_txsa(ctx, flow, true);
+ }
+
+ vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ return 0;
+}
+
+static int vsc8584_macsec_del_txsa(struct macsec_context *ctx)
+{
+ struct macsec_flow *flow;
+
+ flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+ if (ctx->prepare)
+ return 0;
+
+ vsc8584_macsec_del_flow(ctx->phydev, flow);
+ return 0;
+}
+
+static struct macsec_ops vsc8584_macsec_ops = {
+ .mdo_dev_open = vsc8584_macsec_dev_open,
+ .mdo_dev_stop = vsc8584_macsec_dev_stop,
+ .mdo_add_secy = vsc8584_macsec_add_secy,
+ .mdo_upd_secy = vsc8584_macsec_upd_secy,
+ .mdo_del_secy = vsc8584_macsec_del_secy,
+ .mdo_add_rxsc = vsc8584_macsec_add_rxsc,
+ .mdo_upd_rxsc = vsc8584_macsec_upd_rxsc,
+ .mdo_del_rxsc = vsc8584_macsec_del_rxsc,
+ .mdo_add_rxsa = vsc8584_macsec_add_rxsa,
+ .mdo_upd_rxsa = vsc8584_macsec_upd_rxsa,
+ .mdo_del_rxsa = vsc8584_macsec_del_rxsa,
+ .mdo_add_txsa = vsc8584_macsec_add_txsa,
+ .mdo_upd_txsa = vsc8584_macsec_upd_txsa,
+ .mdo_del_txsa = vsc8584_macsec_del_txsa,
+};
+
+int vsc8584_macsec_init(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531 = phydev->priv;
+
+ switch (phydev->phy_id & phydev->drv->phy_id_mask) {
+ case PHY_ID_VSC856X:
+ case PHY_ID_VSC8575:
+ case PHY_ID_VSC8582:
+ case PHY_ID_VSC8584:
+ INIT_LIST_HEAD(&vsc8531->macsec_flows);
+ vsc8531->secy = NULL;
+
+ phydev->macsec_ops = &vsc8584_macsec_ops;
+
+ return __vsc8584_macsec_init(phydev);
+ }
+
+ return 0;
+}
+
+void vsc8584_handle_macsec_interrupt(struct phy_device *phydev)
+{
+ struct vsc8531_private *priv = phydev->priv;
+ struct macsec_flow *flow, *tmp;
+ u32 cause, rec;
+
+ /* Check MACsec PN rollover */
+ cause = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
+ MSCC_MS_INTR_CTRL_STATUS);
+ cause &= MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS_M;
+ if (!(cause & MACSEC_INTR_CTRL_STATUS_ROLLOVER))
+ return;
+
+ rec = 6 + priv->secy->key_len / sizeof(u32);
+ list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
+ u32 val;
+
+ if (flow->bank != MACSEC_EGR || !flow->has_transformation)
+ continue;
+
+ val = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
+ MSCC_MS_XFORM_REC(flow->index, rec));
+ if (val == 0xffffffff) {
+ vsc8584_macsec_flow_disable(phydev, flow);
+ macsec_pn_wrapped(priv->secy, flow->tx_sa);
+ return;
+ }
+ }
+}
+
+void vsc8584_config_macsec_intr(struct phy_device *phydev)
+{
+ phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_2);
+ phy_write(phydev, MSCC_PHY_EXTENDED_INT, MSCC_PHY_EXTENDED_INT_MS_EGR);
+ phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ vsc8584_macsec_phy_write(phydev, MACSEC_EGR, MSCC_MS_AIC_CTRL, 0xf);
+ vsc8584_macsec_phy_write(phydev, MACSEC_EGR, MSCC_MS_INTR_CTRL_STATUS,
+ MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE(MACSEC_INTR_CTRL_STATUS_ROLLOVER));
+}
diff --git a/drivers/net/phy/mscc_macsec.h b/drivers/net/phy/mscc/mscc_macsec.h
index d9ab6aba7482..d0783944d106 100644
--- a/drivers/net/phy/mscc_macsec.h
+++ b/drivers/net/phy/mscc/mscc_macsec.h
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
- * Microsemi Ocelot Switch driver
+ * Driver for Microsemi VSC85xx PHYs
*
* Copyright (c) 2018 Microsemi Corporation
*/
-#ifndef _MSCC_OCELOT_MACSEC_H_
-#define _MSCC_OCELOT_MACSEC_H_
+#ifndef _MSCC_PHY_MACSEC_H_
+#define _MSCC_PHY_MACSEC_H_
+
+#include <net/macsec.h>
#define MSCC_MS_MAX_FLOWS 16
@@ -58,6 +60,62 @@ enum mscc_macsec_validate_levels {
MSCC_MS_VALIDATE_STRICT = 2,
};
+enum macsec_bank {
+ FC_BUFFER = 0x04,
+ HOST_MAC = 0x05,
+ LINE_MAC = 0x06,
+ IP_1588 = 0x0e,
+ MACSEC_INGR = 0x38,
+ MACSEC_EGR = 0x3c,
+};
+
+struct macsec_flow {
+ struct list_head list;
+ enum mscc_macsec_destination_ports port;
+ enum macsec_bank bank;
+ u32 index;
+ int assoc_num;
+ bool has_transformation;
+
+ /* Highest takes precedence [0..15] */
+ u8 priority;
+
+ u8 key[MACSEC_KEYID_LEN];
+
+ union {
+ struct macsec_rx_sa *rx_sa;
+ struct macsec_tx_sa *tx_sa;
+ };
+
+ /* Matching */
+ struct {
+ u8 sci:1;
+ u8 tagged:1;
+ u8 untagged:1;
+ u8 etype:1;
+ } match;
+
+ u16 etype;
+
+ /* Action */
+ struct {
+ u8 bypass:1;
+ u8 drop:1;
+ } action;
+};
+
+#define MSCC_EXT_PAGE_MACSEC_17 17
+#define MSCC_EXT_PAGE_MACSEC_18 18
+
+#define MSCC_EXT_PAGE_MACSEC_19 19
+#define MSCC_PHY_MACSEC_19_REG_ADDR(x) (x)
+#define MSCC_PHY_MACSEC_19_TARGET(x) ((x) << 12)
+#define MSCC_PHY_MACSEC_19_READ BIT(14)
+#define MSCC_PHY_MACSEC_19_CMD BIT(15)
+
+#define MSCC_EXT_PAGE_MACSEC_20 20
+#define MSCC_PHY_MACSEC_20_TARGET(x) (x)
+
#define MSCC_MS_XFORM_REC(x, y) (((x) << 5) + (y))
#define MSCC_MS_ENA_CFG 0x800
#define MSCC_MS_FC_CFG 0x804
@@ -263,4 +321,4 @@ enum mscc_macsec_validate_levels {
#define MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE_M GENMASK(31, 16)
#define MACSEC_INTR_CTRL_STATUS_ROLLOVER BIT(5)
-#endif
+#endif /* _MSCC_PHY_MACSEC_H_ */
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc/mscc_main.c
index f686f40f6bdc..acddef79f4e8 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -18,355 +18,7 @@
#include <linux/netdevice.h>
#include <dt-bindings/net/mscc-phy-vsc8531.h>
-#include <linux/scatterlist.h>
-#include <crypto/skcipher.h>
-
-#if IS_ENABLED(CONFIG_MACSEC)
-#include <net/macsec.h>
-#endif
-
-#include "mscc_macsec.h"
-#include "mscc_mac.h"
-#include "mscc_fc_buffer.h"
-
-enum rgmii_rx_clock_delay {
- RGMII_RX_CLK_DELAY_0_2_NS = 0,
- RGMII_RX_CLK_DELAY_0_8_NS = 1,
- RGMII_RX_CLK_DELAY_1_1_NS = 2,
- RGMII_RX_CLK_DELAY_1_7_NS = 3,
- RGMII_RX_CLK_DELAY_2_0_NS = 4,
- RGMII_RX_CLK_DELAY_2_3_NS = 5,
- RGMII_RX_CLK_DELAY_2_6_NS = 6,
- RGMII_RX_CLK_DELAY_3_4_NS = 7
-};
-
-/* Microsemi VSC85xx PHY registers */
-/* IEEE 802. Std Registers */
-#define MSCC_PHY_BYPASS_CONTROL 18
-#define DISABLE_HP_AUTO_MDIX_MASK 0x0080
-#define DISABLE_PAIR_SWAP_CORR_MASK 0x0020
-#define DISABLE_POLARITY_CORR_MASK 0x0010
-#define PARALLEL_DET_IGNORE_ADVERTISED 0x0008
-
-#define MSCC_PHY_EXT_CNTL_STATUS 22
-#define SMI_BROADCAST_WR_EN 0x0001
-
-#define MSCC_PHY_ERR_RX_CNT 19
-#define MSCC_PHY_ERR_FALSE_CARRIER_CNT 20
-#define MSCC_PHY_ERR_LINK_DISCONNECT_CNT 21
-#define ERR_CNT_MASK GENMASK(7, 0)
-
-#define MSCC_PHY_EXT_PHY_CNTL_1 23
-#define MAC_IF_SELECTION_MASK 0x1800
-#define MAC_IF_SELECTION_GMII 0
-#define MAC_IF_SELECTION_RMII 1
-#define MAC_IF_SELECTION_RGMII 2
-#define MAC_IF_SELECTION_POS 11
-#define VSC8584_MAC_IF_SELECTION_MASK 0x1000
-#define VSC8584_MAC_IF_SELECTION_SGMII 0
-#define VSC8584_MAC_IF_SELECTION_1000BASEX 1
-#define VSC8584_MAC_IF_SELECTION_POS 12
-#define FAR_END_LOOPBACK_MODE_MASK 0x0008
-#define MEDIA_OP_MODE_MASK 0x0700
-#define MEDIA_OP_MODE_COPPER 0
-#define MEDIA_OP_MODE_SERDES 1
-#define MEDIA_OP_MODE_1000BASEX 2
-#define MEDIA_OP_MODE_100BASEFX 3
-#define MEDIA_OP_MODE_AMS_COPPER_SERDES 5
-#define MEDIA_OP_MODE_AMS_COPPER_1000BASEX 6
-#define MEDIA_OP_MODE_AMS_COPPER_100BASEFX 7
-#define MEDIA_OP_MODE_POS 8
-
-#define MSCC_PHY_EXT_PHY_CNTL_2 24
-
-#define MII_VSC85XX_INT_MASK 25
-#define MII_VSC85XX_INT_MASK_MASK 0xa020
-#define MII_VSC85XX_INT_MASK_WOL 0x0040
-#define MII_VSC85XX_INT_STATUS 26
-
-#define MSCC_PHY_WOL_MAC_CONTROL 27
-#define EDGE_RATE_CNTL_POS 5
-#define EDGE_RATE_CNTL_MASK 0x00E0
-
-#define MSCC_PHY_DEV_AUX_CNTL 28
-#define HP_AUTO_MDIX_X_OVER_IND_MASK 0x2000
-
-#define MSCC_PHY_LED_MODE_SEL 29
-#define LED_MODE_SEL_POS(x) ((x) * 4)
-#define LED_MODE_SEL_MASK(x) (GENMASK(3, 0) << LED_MODE_SEL_POS(x))
-#define LED_MODE_SEL(x, mode) (((mode) << LED_MODE_SEL_POS(x)) & LED_MODE_SEL_MASK(x))
-
-#define MSCC_EXT_PAGE_CSR_CNTL_17 17
-#define MSCC_EXT_PAGE_CSR_CNTL_18 18
-
-#define MSCC_EXT_PAGE_CSR_CNTL_19 19
-#define MSCC_PHY_CSR_CNTL_19_REG_ADDR(x) (x)
-#define MSCC_PHY_CSR_CNTL_19_TARGET(x) ((x) << 12)
-#define MSCC_PHY_CSR_CNTL_19_READ BIT(14)
-#define MSCC_PHY_CSR_CNTL_19_CMD BIT(15)
-
-#define MSCC_EXT_PAGE_CSR_CNTL_20 20
-#define MSCC_PHY_CSR_CNTL_20_TARGET(x) (x)
-
-#define PHY_MCB_TARGET 0x07
-#define PHY_MCB_S6G_WRITE BIT(31)
-#define PHY_MCB_S6G_READ BIT(30)
-
-#define PHY_S6G_PLL5G_CFG0 0x06
-#define PHY_S6G_LCPLL_CFG 0x11
-#define PHY_S6G_PLL_CFG 0x2b
-#define PHY_S6G_COMMON_CFG 0x2c
-#define PHY_S6G_GPC_CFG 0x2e
-#define PHY_S6G_MISC_CFG 0x3b
-#define PHY_MCB_S6G_CFG 0x3f
-#define PHY_S6G_DFT_CFG2 0x3e
-#define PHY_S6G_PLL_STATUS 0x31
-#define PHY_S6G_IB_STATUS0 0x2f
-
-#define PHY_S6G_SYS_RST_POS 31
-#define PHY_S6G_ENA_LANE_POS 18
-#define PHY_S6G_ENA_LOOP_POS 8
-#define PHY_S6G_QRATE_POS 6
-#define PHY_S6G_IF_MODE_POS 4
-#define PHY_S6G_PLL_ENA_OFFS_POS 21
-#define PHY_S6G_PLL_FSM_CTRL_DATA_POS 8
-#define PHY_S6G_PLL_FSM_ENA_POS 7
-
-#define MSCC_EXT_PAGE_MACSEC_17 17
-#define MSCC_EXT_PAGE_MACSEC_18 18
-
-#define MSCC_EXT_PAGE_MACSEC_19 19
-#define MSCC_PHY_MACSEC_19_REG_ADDR(x) (x)
-#define MSCC_PHY_MACSEC_19_TARGET(x) ((x) << 12)
-#define MSCC_PHY_MACSEC_19_READ BIT(14)
-#define MSCC_PHY_MACSEC_19_CMD BIT(15)
-
-#define MSCC_EXT_PAGE_MACSEC_20 20
-#define MSCC_PHY_MACSEC_20_TARGET(x) (x)
-enum macsec_bank {
- FC_BUFFER = 0x04,
- HOST_MAC = 0x05,
- LINE_MAC = 0x06,
- IP_1588 = 0x0e,
- MACSEC_INGR = 0x38,
- MACSEC_EGR = 0x3c,
-};
-
-#define MSCC_EXT_PAGE_ACCESS 31
-#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
-#define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */
-#define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */
-#define MSCC_PHY_PAGE_EXTENDED_3 0x0003 /* Extended reg - page 3 */
-#define MSCC_PHY_PAGE_EXTENDED_4 0x0004 /* Extended reg - page 4 */
-#define MSCC_PHY_PAGE_CSR_CNTL MSCC_PHY_PAGE_EXTENDED_4
-#define MSCC_PHY_PAGE_MACSEC MSCC_PHY_PAGE_EXTENDED_4
-/* Extended reg - GPIO; this is a bank of registers that are shared for all PHYs
- * in the same package.
- */
-#define MSCC_PHY_PAGE_EXTENDED_GPIO 0x0010 /* Extended reg - GPIO */
-#define MSCC_PHY_PAGE_TEST 0x2a30 /* Test reg */
-#define MSCC_PHY_PAGE_TR 0x52b5 /* Token ring registers */
-
-/* Extended Page 1 Registers */
-#define MSCC_PHY_CU_MEDIA_CRC_VALID_CNT 18
-#define VALID_CRC_CNT_CRC_MASK GENMASK(13, 0)
-
-#define MSCC_PHY_EXT_MODE_CNTL 19
-#define FORCE_MDI_CROSSOVER_MASK 0x000C
-#define FORCE_MDI_CROSSOVER_MDIX 0x000C
-#define FORCE_MDI_CROSSOVER_MDI 0x0008
-
-#define MSCC_PHY_ACTIPHY_CNTL 20
-#define PHY_ADDR_REVERSED 0x0200
-#define DOWNSHIFT_CNTL_MASK 0x001C
-#define DOWNSHIFT_EN 0x0010
-#define DOWNSHIFT_CNTL_POS 2
-
-#define MSCC_PHY_EXT_PHY_CNTL_4 23
-#define PHY_CNTL_4_ADDR_POS 11
-
-#define MSCC_PHY_VERIPHY_CNTL_2 25
-
-#define MSCC_PHY_VERIPHY_CNTL_3 26
-
-/* Extended Page 2 Registers */
-#define MSCC_PHY_CU_PMD_TX_CNTL 16
-
-#define MSCC_PHY_RGMII_CNTL 20
-#define RGMII_RX_CLK_DELAY_MASK 0x0070
-#define RGMII_RX_CLK_DELAY_POS 4
-
-#define MSCC_PHY_WOL_LOWER_MAC_ADDR 21
-#define MSCC_PHY_WOL_MID_MAC_ADDR 22
-#define MSCC_PHY_WOL_UPPER_MAC_ADDR 23
-#define MSCC_PHY_WOL_LOWER_PASSWD 24
-#define MSCC_PHY_WOL_MID_PASSWD 25
-#define MSCC_PHY_WOL_UPPER_PASSWD 26
-
-#define MSCC_PHY_WOL_MAC_CONTROL 27
-#define SECURE_ON_ENABLE 0x8000
-#define SECURE_ON_PASSWD_LEN_4 0x4000
-
-#define MSCC_PHY_EXTENDED_INT 28
-#define MSCC_PHY_EXTENDED_INT_MS_EGR BIT(9)
-
-/* Extended Page 3 Registers */
-#define MSCC_PHY_SERDES_TX_VALID_CNT 21
-#define MSCC_PHY_SERDES_TX_CRC_ERR_CNT 22
-#define MSCC_PHY_SERDES_RX_VALID_CNT 28
-#define MSCC_PHY_SERDES_RX_CRC_ERR_CNT 29
-
-/* Extended page GPIO Registers */
-#define MSCC_DW8051_CNTL_STATUS 0
-#define MICRO_NSOFT_RESET 0x8000
-#define RUN_FROM_INT_ROM 0x4000
-#define AUTOINC_ADDR 0x2000
-#define PATCH_RAM_CLK 0x1000
-#define MICRO_PATCH_EN 0x0080
-#define DW8051_CLK_EN 0x0010
-#define MICRO_CLK_EN 0x0008
-#define MICRO_CLK_DIVIDE(x) ((x) >> 1)
-#define MSCC_DW8051_VLD_MASK 0xf1ff
-
-/* x Address in range 1-4 */
-#define MSCC_TRAP_ROM_ADDR(x) ((x) * 2 + 1)
-#define MSCC_PATCH_RAM_ADDR(x) (((x) + 1) * 2)
-#define MSCC_INT_MEM_ADDR 11
-
-#define MSCC_INT_MEM_CNTL 12
-#define READ_SFR 0x6000
-#define READ_PRAM 0x4000
-#define READ_ROM 0x2000
-#define READ_RAM 0x0000
-#define INT_MEM_WRITE_EN 0x1000
-#define EN_PATCH_RAM_TRAP_ADDR(x) (0x0100 << ((x) - 1))
-#define INT_MEM_DATA_M 0x00ff
-#define INT_MEM_DATA(x) (INT_MEM_DATA_M & (x))
-
-#define MSCC_PHY_PROC_CMD 18
-#define PROC_CMD_NCOMPLETED 0x8000
-#define PROC_CMD_FAILED 0x4000
-#define PROC_CMD_SGMII_PORT(x) ((x) << 8)
-#define PROC_CMD_FIBER_PORT(x) (0x0100 << (x) % 4)
-#define PROC_CMD_QSGMII_PORT 0x0c00
-#define PROC_CMD_RST_CONF_PORT 0x0080
-#define PROC_CMD_RECONF_PORT 0x0000
-#define PROC_CMD_READ_MOD_WRITE_PORT 0x0040
-#define PROC_CMD_WRITE 0x0040
-#define PROC_CMD_READ 0x0000
-#define PROC_CMD_FIBER_DISABLE 0x0020
-#define PROC_CMD_FIBER_100BASE_FX 0x0010
-#define PROC_CMD_FIBER_1000BASE_X 0x0000
-#define PROC_CMD_SGMII_MAC 0x0030
-#define PROC_CMD_QSGMII_MAC 0x0020
-#define PROC_CMD_NO_MAC_CONF 0x0000
-#define PROC_CMD_1588_DEFAULT_INIT 0x0010
-#define PROC_CMD_NOP 0x000f
-#define PROC_CMD_PHY_INIT 0x000a
-#define PROC_CMD_CRC16 0x0008
-#define PROC_CMD_FIBER_MEDIA_CONF 0x0001
-#define PROC_CMD_MCB_ACCESS_MAC_CONF 0x0000
-#define PROC_CMD_NCOMPLETED_TIMEOUT_MS 500
-
-#define MSCC_PHY_MAC_CFG_FASTLINK 19
-#define MAC_CFG_MASK 0xc000
-#define MAC_CFG_SGMII 0x0000
-#define MAC_CFG_QSGMII 0x4000
-
-/* Test page Registers */
-#define MSCC_PHY_TEST_PAGE_5 5
-#define MSCC_PHY_TEST_PAGE_8 8
-#define MSCC_PHY_TEST_PAGE_9 9
-#define MSCC_PHY_TEST_PAGE_20 20
-#define MSCC_PHY_TEST_PAGE_24 24
-
-/* Token ring page Registers */
-#define MSCC_PHY_TR_CNTL 16
-#define TR_WRITE 0x8000
-#define TR_ADDR(x) (0x7fff & (x))
-#define MSCC_PHY_TR_LSB 17
-#define MSCC_PHY_TR_MSB 18
-
-/* Microsemi PHY ID's
- * Code assumes lowest nibble is 0
- */
-#define PHY_ID_VSC8504 0x000704c0
-#define PHY_ID_VSC8514 0x00070670
-#define PHY_ID_VSC8530 0x00070560
-#define PHY_ID_VSC8531 0x00070570
-#define PHY_ID_VSC8540 0x00070760
-#define PHY_ID_VSC8541 0x00070770
-#define PHY_ID_VSC8552 0x000704e0
-#define PHY_ID_VSC856X 0x000707e0
-#define PHY_ID_VSC8572 0x000704d0
-#define PHY_ID_VSC8574 0x000704a0
-#define PHY_ID_VSC8575 0x000707d0
-#define PHY_ID_VSC8582 0x000707b0
-#define PHY_ID_VSC8584 0x000707c0
-
-#define MSCC_VDDMAC_1500 1500
-#define MSCC_VDDMAC_1800 1800
-#define MSCC_VDDMAC_2500 2500
-#define MSCC_VDDMAC_3300 3300
-
-#define DOWNSHIFT_COUNT_MAX 5
-
-#define MAX_LEDS 4
-
-#define VSC8584_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \
- BIT(VSC8531_LINK_1000_ACTIVITY) | \
- BIT(VSC8531_LINK_100_ACTIVITY) | \
- BIT(VSC8531_LINK_10_ACTIVITY) | \
- BIT(VSC8531_LINK_100_1000_ACTIVITY) | \
- BIT(VSC8531_LINK_10_1000_ACTIVITY) | \
- BIT(VSC8531_LINK_10_100_ACTIVITY) | \
- BIT(VSC8584_LINK_100FX_1000X_ACTIVITY) | \
- BIT(VSC8531_DUPLEX_COLLISION) | \
- BIT(VSC8531_COLLISION) | \
- BIT(VSC8531_ACTIVITY) | \
- BIT(VSC8584_100FX_1000X_ACTIVITY) | \
- BIT(VSC8531_AUTONEG_FAULT) | \
- BIT(VSC8531_SERIAL_MODE) | \
- BIT(VSC8531_FORCE_LED_OFF) | \
- BIT(VSC8531_FORCE_LED_ON))
-
-#define VSC85XX_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \
- BIT(VSC8531_LINK_1000_ACTIVITY) | \
- BIT(VSC8531_LINK_100_ACTIVITY) | \
- BIT(VSC8531_LINK_10_ACTIVITY) | \
- BIT(VSC8531_LINK_100_1000_ACTIVITY) | \
- BIT(VSC8531_LINK_10_1000_ACTIVITY) | \
- BIT(VSC8531_LINK_10_100_ACTIVITY) | \
- BIT(VSC8531_DUPLEX_COLLISION) | \
- BIT(VSC8531_COLLISION) | \
- BIT(VSC8531_ACTIVITY) | \
- BIT(VSC8531_AUTONEG_FAULT) | \
- BIT(VSC8531_SERIAL_MODE) | \
- BIT(VSC8531_FORCE_LED_OFF) | \
- BIT(VSC8531_FORCE_LED_ON))
-
-#define MSCC_VSC8584_REVB_INT8051_FW "microchip/mscc_vsc8584_revb_int8051_fb48.bin"
-#define MSCC_VSC8584_REVB_INT8051_FW_START_ADDR 0xe800
-#define MSCC_VSC8584_REVB_INT8051_FW_CRC 0xfb48
-
-#define MSCC_VSC8574_REVB_INT8051_FW "microchip/mscc_vsc8574_revb_int8051_29e8.bin"
-#define MSCC_VSC8574_REVB_INT8051_FW_START_ADDR 0x4000
-#define MSCC_VSC8574_REVB_INT8051_FW_CRC 0x29e8
-
-#define VSC8584_REVB 0x0001
-#define MSCC_DEV_REV_MASK GENMASK(3, 0)
-
-struct reg_val {
- u16 reg;
- u32 val;
-};
-
-struct vsc85xx_hw_stat {
- const char *string;
- u8 reg;
- u16 page;
- u16 mask;
-};
+#include "mscc.h"
static const struct vsc85xx_hw_stat vsc85xx_hw_stats[] = {
{
@@ -446,85 +98,14 @@ static const struct vsc85xx_hw_stat vsc8584_hw_stats[] = {
},
};
-#if IS_ENABLED(CONFIG_MACSEC)
-struct macsec_flow {
- struct list_head list;
- enum mscc_macsec_destination_ports port;
- enum macsec_bank bank;
- u32 index;
- int assoc_num;
- bool has_transformation;
-
- /* Highest takes precedence [0..15] */
- u8 priority;
-
- u8 key[MACSEC_KEYID_LEN];
-
- union {
- struct macsec_rx_sa *rx_sa;
- struct macsec_tx_sa *tx_sa;
- };
-
- /* Matching */
- struct {
- u8 sci:1;
- u8 tagged:1;
- u8 untagged:1;
- u8 etype:1;
- } match;
-
- u16 etype;
-
- /* Action */
- struct {
- u8 bypass:1;
- u8 drop:1;
- } action;
-
-};
-#endif
-
-struct vsc8531_private {
- int rate_magic;
- u16 supp_led_modes;
- u32 leds_mode[MAX_LEDS];
- u8 nleds;
- const struct vsc85xx_hw_stat *hw_stats;
- u64 *stats;
- int nstats;
- bool pkg_init;
- /* For multiple port PHYs; the MDIO address of the base PHY in the
- * package.
- */
- unsigned int base_addr;
-
-#if IS_ENABLED(CONFIG_MACSEC)
- /* MACsec fields:
- * - One SecY per device (enforced at the s/w implementation level)
- * - macsec_flows: list of h/w flows
- * - ingr_flows: bitmap of ingress flows
- * - egr_flows: bitmap of egress flows
- */
- struct macsec_secy *secy;
- struct list_head macsec_flows;
- unsigned long ingr_flows;
- unsigned long egr_flows;
-#endif
-};
-
#ifdef CONFIG_OF_MDIO
-struct vsc8531_edge_rate_table {
- u32 vddmac;
- u32 slowdown[8];
-};
-
static const struct vsc8531_edge_rate_table edge_table[] = {
{MSCC_VDDMAC_3300, { 0, 2, 4, 7, 10, 17, 29, 53} },
{MSCC_VDDMAC_2500, { 0, 3, 6, 10, 14, 23, 37, 63} },
{MSCC_VDDMAC_1800, { 0, 5, 9, 16, 23, 35, 52, 76} },
{MSCC_VDDMAC_1500, { 0, 6, 14, 21, 29, 42, 58, 77} },
};
-#endif /* CONFIG_OF_MDIO */
+#endif
static int vsc85xx_phy_read_page(struct phy_device *phydev)
{
@@ -910,6 +491,9 @@ static int vsc85xx_mac_if_set(struct phy_device *phydev,
reg_val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
reg_val &= ~(MAC_IF_SELECTION_MASK);
switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
reg_val |= (MAC_IF_SELECTION_RGMII << MAC_IF_SELECTION_POS);
break;
@@ -936,18 +520,34 @@ out_unlock:
return rc;
}
-static int vsc85xx_default_config(struct phy_device *phydev)
+/* Set the RGMII RX and TX clock skews individually, according to the PHY
+ * interface type, to:
+ * * 0.2 ns (their default, and lowest, hardware value) if delays should
+ * not be enabled
+ * * 2.0 ns (which causes the data to be sampled at exactly half way between
+ * clock transitions at 1000 Mbps) if delays should be enabled
+ */
+static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl,
+ u16 rgmii_rx_delay_mask,
+ u16 rgmii_tx_delay_mask)
{
+ u16 rgmii_rx_delay_pos = ffs(rgmii_rx_delay_mask) - 1;
+ u16 rgmii_tx_delay_pos = ffs(rgmii_tx_delay_mask) - 1;
+ u16 reg_val = 0;
int rc;
- u16 reg_val;
- phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
mutex_lock(&phydev->lock);
- reg_val = RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ reg_val |= RGMII_CLK_DELAY_2_0_NS << rgmii_rx_delay_pos;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ reg_val |= RGMII_CLK_DELAY_2_0_NS << rgmii_tx_delay_pos;
rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
- MSCC_PHY_RGMII_CNTL, RGMII_RX_CLK_DELAY_MASK,
+ rgmii_cntl,
+ rgmii_rx_delay_mask | rgmii_tx_delay_mask,
reg_val);
mutex_unlock(&phydev->lock);
@@ -955,6 +555,23 @@ static int vsc85xx_default_config(struct phy_device *phydev)
return rc;
}
+static int vsc85xx_default_config(struct phy_device *phydev)
+{
+ int rc;
+
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ if (phy_interface_mode_is_rgmii(phydev->interface)) {
+ rc = vsc85xx_rgmii_set_skews(phydev, VSC8502_RGMII_CNTL,
+ VSC8502_RGMII_RX_DELAY_MASK,
+ VSC8502_RGMII_TX_DELAY_MASK);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static int vsc85xx_get_tunable(struct phy_device *phydev,
struct ethtool_tunable *tuna, void *data)
{
@@ -1670,978 +1287,6 @@ out:
return ret;
}
-#if IS_ENABLED(CONFIG_MACSEC)
-static u32 vsc8584_macsec_phy_read(struct phy_device *phydev,
- enum macsec_bank bank, u32 reg)
-{
- u32 val, val_l = 0, val_h = 0;
- unsigned long deadline;
- int rc;
-
- rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
- if (rc < 0)
- goto failed;
-
- __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
- MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
-
- if (bank >> 2 == 0x1)
- /* non-MACsec access */
- bank &= 0x3;
- else
- bank = 0;
-
- __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
- MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_READ |
- MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
- MSCC_PHY_MACSEC_19_TARGET(bank));
-
- deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
- do {
- val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
- } while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
-
- val_l = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_17);
- val_h = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_18);
-
-failed:
- phy_restore_page(phydev, rc, rc);
-
- return (val_h << 16) | val_l;
-}
-
-static void vsc8584_macsec_phy_write(struct phy_device *phydev,
- enum macsec_bank bank, u32 reg, u32 val)
-{
- unsigned long deadline;
- int rc;
-
- rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
- if (rc < 0)
- goto failed;
-
- __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
- MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
-
- if ((bank >> 2 == 0x1) || (bank >> 2 == 0x3))
- bank &= 0x3;
- else
- /* MACsec access */
- bank = 0;
-
- __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_17, (u16)val);
- __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_18, (u16)(val >> 16));
-
- __phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
- MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
- MSCC_PHY_MACSEC_19_TARGET(bank));
-
- deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
- do {
- val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
- } while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
-
-failed:
- phy_restore_page(phydev, rc, rc);
-}
-
-static void vsc8584_macsec_classification(struct phy_device *phydev,
- enum macsec_bank bank)
-{
- /* enable VLAN tag parsing */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_CP_TAG,
- MSCC_MS_SAM_CP_TAG_PARSE_STAG |
- MSCC_MS_SAM_CP_TAG_PARSE_QTAG |
- MSCC_MS_SAM_CP_TAG_PARSE_QINQ);
-}
-
-static void vsc8584_macsec_flow_default_action(struct phy_device *phydev,
- enum macsec_bank bank,
- bool block)
-{
- u32 port = (bank == MACSEC_INGR) ?
- MSCC_MS_PORT_UNCONTROLLED : MSCC_MS_PORT_COMMON;
- u32 action = MSCC_MS_FLOW_BYPASS;
-
- if (block)
- action = MSCC_MS_FLOW_DROP;
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_NCP,
- /* MACsec untagged */
- MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
- MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DEST_PORT(port) |
- /* MACsec tagged */
- MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
- MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DEST_PORT(port) |
- /* Bad tag */
- MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
- MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DEST_PORT(port) |
- /* Kay tag */
- MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
- MSCC_MS_SAM_NM_FLOW_NCP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_NM_FLOW_NCP_KAY_DEST_PORT(port));
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_CP,
- /* MACsec untagged */
- MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
- MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DEST_PORT(port) |
- /* MACsec tagged */
- MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
- MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DEST_PORT(port) |
- /* Bad tag */
- MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
- MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DEST_PORT(port) |
- /* Kay tag */
- MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
- MSCC_MS_SAM_NM_FLOW_CP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_NM_FLOW_CP_KAY_DEST_PORT(port));
-}
-
-static void vsc8584_macsec_integrity_checks(struct phy_device *phydev,
- enum macsec_bank bank)
-{
- u32 val;
-
- if (bank != MACSEC_INGR)
- return;
-
- /* Set default rules to pass unmatched frames */
- val = vsc8584_macsec_phy_read(phydev, bank,
- MSCC_MS_PARAMS2_IG_CC_CONTROL);
- val |= MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_CTRL_ACT |
- MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_ACT;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CC_CONTROL,
- val);
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CP_TAG,
- MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_STAG |
- MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QTAG |
- MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QINQ);
-}
-
-static void vsc8584_macsec_block_init(struct phy_device *phydev,
- enum macsec_bank bank)
-{
- u32 val;
- int i;
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
- MSCC_MS_ENA_CFG_SW_RST |
- MSCC_MS_ENA_CFG_MACSEC_BYPASS_ENA);
-
- /* Set the MACsec block out of s/w reset and enable clocks */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
- MSCC_MS_ENA_CFG_CLK_ENA);
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_STATUS_CONTEXT_CTRL,
- bank == MACSEC_INGR ? 0xe5880214 : 0xe5880218);
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_MISC_CONTROL,
- MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX(bank == MACSEC_INGR ? 57 : 40) |
- MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE(bank == MACSEC_INGR ? 1 : 2));
-
- /* Clear the counters */
- val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
- val |= MSCC_MS_COUNT_CONTROL_AUTO_CNTR_RESET;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
-
- /* Enable octet increment mode */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PP_CTRL,
- MSCC_MS_PP_CTRL_MACSEC_OCTET_INCR_MODE);
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_BLOCK_CTX_UPDATE, 0x3);
-
- val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
- val |= MSCC_MS_COUNT_CONTROL_RESET_ALL;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
-
- /* Set the MTU */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_NON_VLAN_MTU_CHECK,
- MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE(32761) |
- MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMP_DROP);
-
- for (i = 0; i < 8; i++)
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_VLAN_MTU_CHECK(i),
- MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE(32761) |
- MSCC_MS_VLAN_MTU_CHECK_MTU_COMP_DROP);
-
- if (bank == MACSEC_EGR) {
- val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_INTR_CTRL_STATUS);
- val &= ~MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE_M;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_INTR_CTRL_STATUS, val);
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_FC_CFG,
- MSCC_MS_FC_CFG_FCBUF_ENA |
- MSCC_MS_FC_CFG_LOW_THRESH(0x1) |
- MSCC_MS_FC_CFG_HIGH_THRESH(0x4) |
- MSCC_MS_FC_CFG_LOW_BYTES_VAL(0x4) |
- MSCC_MS_FC_CFG_HIGH_BYTES_VAL(0x6));
- }
-
- vsc8584_macsec_classification(phydev, bank);
- vsc8584_macsec_flow_default_action(phydev, bank, false);
- vsc8584_macsec_integrity_checks(phydev, bank);
-
- /* Enable the MACsec block */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
- MSCC_MS_ENA_CFG_CLK_ENA |
- MSCC_MS_ENA_CFG_MACSEC_ENA |
- MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE(0x5));
-}
-
-static void vsc8584_macsec_mac_init(struct phy_device *phydev,
- enum macsec_bank bank)
-{
- u32 val;
- int i;
-
- /* Clear host & line stats */
- for (i = 0; i < 36; i++)
- vsc8584_macsec_phy_write(phydev, bank, 0x1c + i, 0);
-
- val = vsc8584_macsec_phy_read(phydev, bank,
- MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL);
- val &= ~MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE_M;
- val |= MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE(2) |
- MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE(0xffff);
- vsc8584_macsec_phy_write(phydev, bank,
- MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL, val);
-
- val = vsc8584_macsec_phy_read(phydev, bank,
- MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2);
- val |= 0xffff;
- vsc8584_macsec_phy_write(phydev, bank,
- MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2, val);
-
- val = vsc8584_macsec_phy_read(phydev, bank,
- MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL);
- if (bank == HOST_MAC)
- val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_TIMER_ENA |
- MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA;
- else
- val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_REACT_ENA |
- MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA |
- MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_MODE |
- MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_EARLY_PAUSE_DETECT_ENA;
- vsc8584_macsec_phy_write(phydev, bank,
- MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL, val);
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_PKTINF_CFG,
- MSCC_MAC_CFG_PKTINF_CFG_STRIP_FCS_ENA |
- MSCC_MAC_CFG_PKTINF_CFG_INSERT_FCS_ENA |
- MSCC_MAC_CFG_PKTINF_CFG_LPI_RELAY_ENA |
- MSCC_MAC_CFG_PKTINF_CFG_STRIP_PREAMBLE_ENA |
- MSCC_MAC_CFG_PKTINF_CFG_INSERT_PREAMBLE_ENA |
- (bank == HOST_MAC ?
- MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING : 0));
-
- val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MODE_CFG);
- val &= ~MSCC_MAC_CFG_MODE_CFG_DISABLE_DIC;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MODE_CFG, val);
-
- val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG);
- val &= ~MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_M;
- val |= MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN(10240);
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG, val);
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ADV_CHK_CFG,
- MSCC_MAC_CFG_ADV_CHK_CFG_SFD_CHK_ENA |
- MSCC_MAC_CFG_ADV_CHK_CFG_PRM_CHK_ENA |
- MSCC_MAC_CFG_ADV_CHK_CFG_OOR_ERR_ENA |
- MSCC_MAC_CFG_ADV_CHK_CFG_INR_ERR_ENA);
-
- val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_LFS_CFG);
- val &= ~MSCC_MAC_CFG_LFS_CFG_LFS_MODE_ENA;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_LFS_CFG, val);
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ENA_CFG,
- MSCC_MAC_CFG_ENA_CFG_RX_CLK_ENA |
- MSCC_MAC_CFG_ENA_CFG_TX_CLK_ENA |
- MSCC_MAC_CFG_ENA_CFG_RX_ENA |
- MSCC_MAC_CFG_ENA_CFG_TX_ENA);
-}
-
-/* Must be called with mdio_lock taken */
-static int vsc8584_macsec_init(struct phy_device *phydev)
-{
- u32 val;
-
- vsc8584_macsec_block_init(phydev, MACSEC_INGR);
- vsc8584_macsec_block_init(phydev, MACSEC_EGR);
- vsc8584_macsec_mac_init(phydev, HOST_MAC);
- vsc8584_macsec_mac_init(phydev, LINE_MAC);
-
- vsc8584_macsec_phy_write(phydev, FC_BUFFER,
- MSCC_FCBUF_FC_READ_THRESH_CFG,
- MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH(4) |
- MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH(5));
-
- val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG);
- val |= MSCC_FCBUF_MODE_CFG_PAUSE_GEN_ENA |
- MSCC_FCBUF_MODE_CFG_RX_PPM_RATE_ADAPT_ENA |
- MSCC_FCBUF_MODE_CFG_TX_PPM_RATE_ADAPT_ENA;
- vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG, val);
-
- vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG,
- MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH(8) |
- MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET(9));
-
- val = vsc8584_macsec_phy_read(phydev, FC_BUFFER,
- MSCC_FCBUF_TX_DATA_QUEUE_CFG);
- val &= ~(MSCC_FCBUF_TX_DATA_QUEUE_CFG_START_M |
- MSCC_FCBUF_TX_DATA_QUEUE_CFG_END_M);
- val |= MSCC_FCBUF_TX_DATA_QUEUE_CFG_START(0) |
- MSCC_FCBUF_TX_DATA_QUEUE_CFG_END(5119);
- vsc8584_macsec_phy_write(phydev, FC_BUFFER,
- MSCC_FCBUF_TX_DATA_QUEUE_CFG, val);
-
- val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG);
- val |= MSCC_FCBUF_ENA_CFG_TX_ENA | MSCC_FCBUF_ENA_CFG_RX_ENA;
- vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG, val);
-
- val = vsc8584_macsec_phy_read(phydev, IP_1588,
- MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL);
- val &= ~MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M;
- val |= MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4);
- vsc8584_macsec_phy_write(phydev, IP_1588,
- MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL, val);
-
- return 0;
-}
-
-static void vsc8584_macsec_flow(struct phy_device *phydev,
- struct macsec_flow *flow)
-{
- struct vsc8531_private *priv = phydev->priv;
- enum macsec_bank bank = flow->bank;
- u32 val, match = 0, mask = 0, action = 0, idx = flow->index;
-
- if (flow->match.tagged)
- match |= MSCC_MS_SAM_MISC_MATCH_TAGGED;
- if (flow->match.untagged)
- match |= MSCC_MS_SAM_MISC_MATCH_UNTAGGED;
-
- if (bank == MACSEC_INGR && flow->assoc_num >= 0) {
- match |= MSCC_MS_SAM_MISC_MATCH_AN(flow->assoc_num);
- mask |= MSCC_MS_SAM_MASK_AN_MASK(0x3);
- }
-
- if (bank == MACSEC_INGR && flow->match.sci && flow->rx_sa->sc->sci) {
- match |= MSCC_MS_SAM_MISC_MATCH_TCI(BIT(3));
- mask |= MSCC_MS_SAM_MASK_TCI_MASK(BIT(3)) |
- MSCC_MS_SAM_MASK_SCI_MASK;
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_LO(idx),
- lower_32_bits(flow->rx_sa->sc->sci));
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_HI(idx),
- upper_32_bits(flow->rx_sa->sc->sci));
- }
-
- if (flow->match.etype) {
- mask |= MSCC_MS_SAM_MASK_MAC_ETYPE_MASK;
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MAC_SA_MATCH_HI(idx),
- MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE(htons(flow->etype)));
- }
-
- match |= MSCC_MS_SAM_MISC_MATCH_PRIORITY(flow->priority);
-
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MISC_MATCH(idx), match);
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MASK(idx), mask);
-
- /* Action for matching packets */
- if (flow->action.drop)
- action = MSCC_MS_FLOW_DROP;
- else if (flow->action.bypass || flow->port == MSCC_MS_PORT_UNCONTROLLED)
- action = MSCC_MS_FLOW_BYPASS;
- else
- action = (bank == MACSEC_INGR) ?
- MSCC_MS_FLOW_INGRESS : MSCC_MS_FLOW_EGRESS;
-
- val = MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE(action) |
- MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION(MSCC_MS_ACTION_DROP) |
- MSCC_MS_SAM_FLOW_CTRL_DEST_PORT(flow->port);
-
- if (action == MSCC_MS_FLOW_BYPASS)
- goto write_ctrl;
-
- if (bank == MACSEC_INGR) {
- if (priv->secy->replay_protect)
- val |= MSCC_MS_SAM_FLOW_CTRL_REPLAY_PROTECT;
- if (priv->secy->validate_frames == MACSEC_VALIDATE_STRICT)
- val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_STRICT);
- else if (priv->secy->validate_frames == MACSEC_VALIDATE_CHECK)
- val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_CHECK);
- } else if (bank == MACSEC_EGR) {
- if (priv->secy->protect_frames)
- val |= MSCC_MS_SAM_FLOW_CTRL_PROTECT_FRAME;
- if (priv->secy->tx_sc.encrypt)
- val |= MSCC_MS_SAM_FLOW_CTRL_CONF_PROTECT;
- if (priv->secy->tx_sc.send_sci)
- val |= MSCC_MS_SAM_FLOW_CTRL_INCLUDE_SCI;
- }
-
-write_ctrl:
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
-}
-
-static struct macsec_flow *vsc8584_macsec_find_flow(struct macsec_context *ctx,
- enum macsec_bank bank)
-{
- struct vsc8531_private *priv = ctx->phydev->priv;
- struct macsec_flow *pos, *tmp;
-
- list_for_each_entry_safe(pos, tmp, &priv->macsec_flows, list)
- if (pos->assoc_num == ctx->sa.assoc_num && pos->bank == bank)
- return pos;
-
- return ERR_PTR(-ENOENT);
-}
-
-static void vsc8584_macsec_flow_enable(struct phy_device *phydev,
- struct macsec_flow *flow)
-{
- enum macsec_bank bank = flow->bank;
- u32 val, idx = flow->index;
-
- if ((flow->bank == MACSEC_INGR && flow->rx_sa && !flow->rx_sa->active) ||
- (flow->bank == MACSEC_EGR && flow->tx_sa && !flow->tx_sa->active))
- return;
-
- /* Enable */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_SET1, BIT(idx));
-
- /* Set in-use */
- val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
- val |= MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
-}
-
-static void vsc8584_macsec_flow_disable(struct phy_device *phydev,
- struct macsec_flow *flow)
-{
- enum macsec_bank bank = flow->bank;
- u32 val, idx = flow->index;
-
- /* Disable */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_CLEAR1, BIT(idx));
-
- /* Clear in-use */
- val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
- val &= ~MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
-}
-
-static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
-{
- if (flow->bank == MACSEC_INGR)
- return flow->index + MSCC_MS_MAX_FLOWS;
-
- return flow->index;
-}
-
-/* Derive the AES key to get a key for the hash autentication */
-static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
- u16 key_len, u8 hkey[16])
-{
- struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
- struct skcipher_request *req = NULL;
- struct scatterlist src, dst;
- DECLARE_CRYPTO_WAIT(wait);
- u32 input[4] = {0};
- int ret;
-
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- req = skcipher_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- ret = -ENOMEM;
- goto out;
- }
-
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done,
- &wait);
- ret = crypto_skcipher_setkey(tfm, key, key_len);
- if (ret < 0)
- goto out;
-
- sg_init_one(&src, input, 16);
- sg_init_one(&dst, hkey, 16);
- skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
-
- ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
-
-out:
- skcipher_request_free(req);
- crypto_free_skcipher(tfm);
- return ret;
-}
-
-static int vsc8584_macsec_transformation(struct phy_device *phydev,
- struct macsec_flow *flow)
-{
- struct vsc8531_private *priv = phydev->priv;
- enum macsec_bank bank = flow->bank;
- int i, ret, index = flow->index;
- u32 rec = 0, control = 0;
- u8 hkey[16];
- sci_t sci;
-
- ret = vsc8584_macsec_derive_key(flow->key, priv->secy->key_len, hkey);
- if (ret)
- return ret;
-
- switch (priv->secy->key_len) {
- case 16:
- control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_128);
- break;
- case 32:
- control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_256);
- break;
- default:
- return -EINVAL;
- }
-
- control |= (bank == MACSEC_EGR) ?
- (CONTROL_TYPE_EGRESS | CONTROL_AN(priv->secy->tx_sc.encoding_sa)) :
- (CONTROL_TYPE_INGRESS | CONTROL_SEQ_MASK);
-
- control |= CONTROL_UPDATE_SEQ | CONTROL_ENCRYPT_AUTH | CONTROL_KEY_IN_CTX |
- CONTROL_IV0 | CONTROL_IV1 | CONTROL_IV_IN_SEQ |
- CONTROL_DIGEST_TYPE(0x2) | CONTROL_SEQ_TYPE(0x1) |
- CONTROL_AUTH_ALG(AUTH_ALG_AES_GHAS) | CONTROL_CONTEXT_ID;
-
- /* Set the control word */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
- control);
-
- /* Set the context ID. Must be unique. */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
- vsc8584_macsec_flow_context_id(flow));
-
- /* Set the encryption/decryption key */
- for (i = 0; i < priv->secy->key_len / sizeof(u32); i++)
- vsc8584_macsec_phy_write(phydev, bank,
- MSCC_MS_XFORM_REC(index, rec++),
- ((u32 *)flow->key)[i]);
-
- /* Set the authentication key */
- for (i = 0; i < 4; i++)
- vsc8584_macsec_phy_write(phydev, bank,
- MSCC_MS_XFORM_REC(index, rec++),
- ((u32 *)hkey)[i]);
-
- /* Initial sequence number */
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
- bank == MACSEC_INGR ?
- flow->rx_sa->next_pn : flow->tx_sa->next_pn);
-
- if (bank == MACSEC_INGR)
- /* Set the mask (replay window size) */
- vsc8584_macsec_phy_write(phydev, bank,
- MSCC_MS_XFORM_REC(index, rec++),
- priv->secy->replay_window);
-
- /* Set the input vectors */
- sci = bank == MACSEC_INGR ? flow->rx_sa->sc->sci : priv->secy->sci;
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
- lower_32_bits(sci));
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
- upper_32_bits(sci));
-
- while (rec < 20)
- vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
- 0);
-
- flow->has_transformation = true;
- return 0;
-}
-
-static struct macsec_flow *vsc8584_macsec_alloc_flow(struct vsc8531_private *priv,
- enum macsec_bank bank)
-{
- unsigned long *bitmap = bank == MACSEC_INGR ?
- &priv->ingr_flows : &priv->egr_flows;
- struct macsec_flow *flow;
- int index;
-
- index = find_first_zero_bit(bitmap, MSCC_MS_MAX_FLOWS);
-
- if (index == MSCC_MS_MAX_FLOWS)
- return ERR_PTR(-ENOMEM);
-
- flow = kzalloc(sizeof(*flow), GFP_KERNEL);
- if (!flow)
- return ERR_PTR(-ENOMEM);
-
- set_bit(index, bitmap);
- flow->index = index;
- flow->bank = bank;
- flow->priority = 8;
- flow->assoc_num = -1;
-
- list_add_tail(&flow->list, &priv->macsec_flows);
- return flow;
-}
-
-static void vsc8584_macsec_free_flow(struct vsc8531_private *priv,
- struct macsec_flow *flow)
-{
- unsigned long *bitmap = flow->bank == MACSEC_INGR ?
- &priv->ingr_flows : &priv->egr_flows;
-
- list_del(&flow->list);
- clear_bit(flow->index, bitmap);
- kfree(flow);
-}
-
-static int vsc8584_macsec_add_flow(struct phy_device *phydev,
- struct macsec_flow *flow, bool update)
-{
- int ret;
-
- flow->port = MSCC_MS_PORT_CONTROLLED;
- vsc8584_macsec_flow(phydev, flow);
-
- if (update)
- return 0;
-
- ret = vsc8584_macsec_transformation(phydev, flow);
- if (ret) {
- vsc8584_macsec_free_flow(phydev->priv, flow);
- return ret;
- }
-
- return 0;
-}
-
-static int vsc8584_macsec_default_flows(struct phy_device *phydev)
-{
- struct macsec_flow *flow;
-
- /* Add a rule to let the MKA traffic go through, ingress */
- flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_INGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- flow->priority = 15;
- flow->port = MSCC_MS_PORT_UNCONTROLLED;
- flow->match.tagged = 1;
- flow->match.untagged = 1;
- flow->match.etype = 1;
- flow->etype = ETH_P_PAE;
- flow->action.bypass = 1;
-
- vsc8584_macsec_flow(phydev, flow);
- vsc8584_macsec_flow_enable(phydev, flow);
-
- /* Add a rule to let the MKA traffic go through, egress */
- flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_EGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- flow->priority = 15;
- flow->port = MSCC_MS_PORT_COMMON;
- flow->match.untagged = 1;
- flow->match.etype = 1;
- flow->etype = ETH_P_PAE;
- flow->action.bypass = 1;
-
- vsc8584_macsec_flow(phydev, flow);
- vsc8584_macsec_flow_enable(phydev, flow);
-
- return 0;
-}
-
-static void vsc8584_macsec_del_flow(struct phy_device *phydev,
- struct macsec_flow *flow)
-{
- vsc8584_macsec_flow_disable(phydev, flow);
- vsc8584_macsec_free_flow(phydev->priv, flow);
-}
-
-static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
- struct macsec_flow *flow, bool update)
-{
- struct phy_device *phydev = ctx->phydev;
- struct vsc8531_private *priv = phydev->priv;
-
- if (!flow) {
- flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
- }
-
- flow->assoc_num = ctx->sa.assoc_num;
- flow->rx_sa = ctx->sa.rx_sa;
-
- /* Always match tagged packets on ingress */
- flow->match.tagged = 1;
- flow->match.sci = 1;
-
- if (priv->secy->validate_frames != MACSEC_VALIDATE_DISABLED)
- flow->match.untagged = 1;
-
- return vsc8584_macsec_add_flow(phydev, flow, update);
-}
-
-static int __vsc8584_macsec_add_txsa(struct macsec_context *ctx,
- struct macsec_flow *flow, bool update)
-{
- struct phy_device *phydev = ctx->phydev;
- struct vsc8531_private *priv = phydev->priv;
-
- if (!flow) {
- flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
- }
-
- flow->assoc_num = ctx->sa.assoc_num;
- flow->tx_sa = ctx->sa.tx_sa;
-
- /* Always match untagged packets on egress */
- flow->match.untagged = 1;
-
- return vsc8584_macsec_add_flow(phydev, flow, update);
-}
-
-static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
-{
- struct vsc8531_private *priv = ctx->phydev->priv;
- struct macsec_flow *flow, *tmp;
-
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
- list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
-
- return 0;
-}
-
-static int vsc8584_macsec_dev_stop(struct macsec_context *ctx)
-{
- struct vsc8531_private *priv = ctx->phydev->priv;
- struct macsec_flow *flow, *tmp;
-
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
- list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
- vsc8584_macsec_flow_disable(ctx->phydev, flow);
-
- return 0;
-}
-
-static int vsc8584_macsec_add_secy(struct macsec_context *ctx)
-{
- struct vsc8531_private *priv = ctx->phydev->priv;
- struct macsec_secy *secy = ctx->secy;
-
- if (ctx->prepare) {
- if (priv->secy)
- return -EEXIST;
-
- return 0;
- }
-
- priv->secy = secy;
-
- vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR,
- secy->validate_frames != MACSEC_VALIDATE_DISABLED);
- vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR,
- secy->validate_frames != MACSEC_VALIDATE_DISABLED);
-
- return vsc8584_macsec_default_flows(ctx->phydev);
-}
-
-static int vsc8584_macsec_del_secy(struct macsec_context *ctx)
-{
- struct vsc8531_private *priv = ctx->phydev->priv;
- struct macsec_flow *flow, *tmp;
-
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
- list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
- vsc8584_macsec_del_flow(ctx->phydev, flow);
-
- vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR, false);
- vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR, false);
-
- priv->secy = NULL;
- return 0;
-}
-
-static int vsc8584_macsec_upd_secy(struct macsec_context *ctx)
-{
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
- vsc8584_macsec_del_secy(ctx);
- return vsc8584_macsec_add_secy(ctx);
-}
-
-static int vsc8584_macsec_add_rxsc(struct macsec_context *ctx)
-{
- /* Nothing to do */
- return 0;
-}
-
-static int vsc8584_macsec_upd_rxsc(struct macsec_context *ctx)
-{
- return -EOPNOTSUPP;
-}
-
-static int vsc8584_macsec_del_rxsc(struct macsec_context *ctx)
-{
- struct vsc8531_private *priv = ctx->phydev->priv;
- struct macsec_flow *flow, *tmp;
-
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
- list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
- if (flow->bank == MACSEC_INGR && flow->rx_sa &&
- flow->rx_sa->sc->sci == ctx->rx_sc->sci)
- vsc8584_macsec_del_flow(ctx->phydev, flow);
- }
-
- return 0;
-}
-
-static int vsc8584_macsec_add_rxsa(struct macsec_context *ctx)
-{
- struct macsec_flow *flow = NULL;
-
- if (ctx->prepare)
- return __vsc8584_macsec_add_rxsa(ctx, flow, false);
-
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
- return 0;
-}
-
-static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
-{
- struct macsec_flow *flow;
-
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- if (ctx->prepare) {
- /* Make sure the flow is disabled before updating it */
- vsc8584_macsec_flow_disable(ctx->phydev, flow);
-
- return __vsc8584_macsec_add_rxsa(ctx, flow, true);
- }
-
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
- return 0;
-}
-
-static int vsc8584_macsec_del_rxsa(struct macsec_context *ctx)
-{
- struct macsec_flow *flow;
-
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
-
- if (IS_ERR(flow))
- return PTR_ERR(flow);
- if (ctx->prepare)
- return 0;
-
- vsc8584_macsec_del_flow(ctx->phydev, flow);
- return 0;
-}
-
-static int vsc8584_macsec_add_txsa(struct macsec_context *ctx)
-{
- struct macsec_flow *flow = NULL;
-
- if (ctx->prepare)
- return __vsc8584_macsec_add_txsa(ctx, flow, false);
-
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
- return 0;
-}
-
-static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
-{
- struct macsec_flow *flow;
-
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- if (ctx->prepare) {
- /* Make sure the flow is disabled before updating it */
- vsc8584_macsec_flow_disable(ctx->phydev, flow);
-
- return __vsc8584_macsec_add_txsa(ctx, flow, true);
- }
-
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
- return 0;
-}
-
-static int vsc8584_macsec_del_txsa(struct macsec_context *ctx)
-{
- struct macsec_flow *flow;
-
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
-
- if (IS_ERR(flow))
- return PTR_ERR(flow);
- if (ctx->prepare)
- return 0;
-
- vsc8584_macsec_del_flow(ctx->phydev, flow);
- return 0;
-}
-
-static struct macsec_ops vsc8584_macsec_ops = {
- .mdo_dev_open = vsc8584_macsec_dev_open,
- .mdo_dev_stop = vsc8584_macsec_dev_stop,
- .mdo_add_secy = vsc8584_macsec_add_secy,
- .mdo_upd_secy = vsc8584_macsec_upd_secy,
- .mdo_del_secy = vsc8584_macsec_del_secy,
- .mdo_add_rxsc = vsc8584_macsec_add_rxsc,
- .mdo_upd_rxsc = vsc8584_macsec_upd_rxsc,
- .mdo_del_rxsc = vsc8584_macsec_del_rxsc,
- .mdo_add_rxsa = vsc8584_macsec_add_rxsa,
- .mdo_upd_rxsa = vsc8584_macsec_upd_rxsa,
- .mdo_del_rxsa = vsc8584_macsec_del_rxsa,
- .mdo_add_txsa = vsc8584_macsec_add_txsa,
- .mdo_upd_txsa = vsc8584_macsec_upd_txsa,
- .mdo_del_txsa = vsc8584_macsec_del_txsa,
-};
-#endif /* CONFIG_MACSEC */
-
/* Check if one PHY has already done the init of the parts common to all PHYs
* in the Quad PHY package.
*/
@@ -2751,27 +1396,35 @@ static int vsc8584_config_init(struct phy_device *phydev)
val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
val &= ~MAC_CFG_MASK;
- if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
+ if (phydev->interface == PHY_INTERFACE_MODE_QSGMII) {
val |= MAC_CFG_QSGMII;
- else
+ } else if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
val |= MAC_CFG_SGMII;
+ } else if (phy_interface_is_rgmii(phydev)) {
+ val |= MAC_CFG_RGMII;
+ } else {
+ ret = -EINVAL;
+ goto err;
+ }
ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
if (ret)
goto err;
- val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
- PROC_CMD_READ_MOD_WRITE_PORT;
- if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
- val |= PROC_CMD_QSGMII_MAC;
- else
- val |= PROC_CMD_SGMII_MAC;
+ if (!phy_interface_is_rgmii(phydev)) {
+ val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
+ PROC_CMD_READ_MOD_WRITE_PORT;
+ if (phydev->interface == PHY_INTERFACE_MODE_QSGMII)
+ val |= PROC_CMD_QSGMII_MAC;
+ else
+ val |= PROC_CMD_SGMII_MAC;
- ret = vsc8584_cmd(phydev, val);
- if (ret)
- goto err;
+ ret = vsc8584_cmd(phydev, val);
+ if (ret)
+ goto err;
- usleep_range(10000, 20000);
+ usleep_range(10000, 20000);
+ }
/* Disable SerDes for 100Base-FX */
ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
@@ -2791,31 +1444,27 @@ static int vsc8584_config_init(struct phy_device *phydev)
mutex_unlock(&phydev->mdio.bus->mdio_lock);
-#if IS_ENABLED(CONFIG_MACSEC)
- /* MACsec */
- switch (phydev->phy_id & phydev->drv->phy_id_mask) {
- case PHY_ID_VSC856X:
- case PHY_ID_VSC8575:
- case PHY_ID_VSC8582:
- case PHY_ID_VSC8584:
- INIT_LIST_HEAD(&vsc8531->macsec_flows);
- vsc8531->secy = NULL;
-
- phydev->macsec_ops = &vsc8584_macsec_ops;
-
- ret = vsc8584_macsec_init(phydev);
- if (ret)
- goto err;
- }
-#endif
+ ret = vsc8584_macsec_init(phydev);
+ if (ret)
+ return ret;
phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
val &= ~(MEDIA_OP_MODE_MASK | VSC8584_MAC_IF_SELECTION_MASK);
- val |= MEDIA_OP_MODE_COPPER | (VSC8584_MAC_IF_SELECTION_SGMII <<
- VSC8584_MAC_IF_SELECTION_POS);
+ val |= (MEDIA_OP_MODE_COPPER << MEDIA_OP_MODE_POS) |
+ (VSC8584_MAC_IF_SELECTION_SGMII << VSC8584_MAC_IF_SELECTION_POS);
ret = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, val);
+ if (ret)
+ return ret;
+
+ if (phy_interface_is_rgmii(phydev)) {
+ ret = vsc85xx_rgmii_set_skews(phydev, VSC8572_RGMII_CNTL,
+ VSC8572_RGMII_RX_DELAY_MASK,
+ VSC8572_RGMII_TX_DELAY_MASK);
+ if (ret)
+ return ret;
+ }
ret = genphy_soft_reset(phydev);
if (ret)
@@ -2834,41 +1483,21 @@ err:
return ret;
}
-static int vsc8584_handle_interrupt(struct phy_device *phydev)
+static irqreturn_t vsc8584_handle_interrupt(struct phy_device *phydev)
{
-#if IS_ENABLED(CONFIG_MACSEC)
- struct vsc8531_private *priv = phydev->priv;
- struct macsec_flow *flow, *tmp;
- u32 cause, rec;
+ int irq_status;
- /* Check MACsec PN rollover */
- cause = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
- MSCC_MS_INTR_CTRL_STATUS);
- cause &= MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS_M;
- if (!(cause & MACSEC_INTR_CTRL_STATUS_ROLLOVER))
- goto skip_rollover;
+ irq_status = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+ if (irq_status < 0 || !(irq_status & MII_VSC85XX_INT_MASK_MASK))
+ return IRQ_NONE;
- rec = 6 + priv->secy->key_len / sizeof(u32);
- list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
- u32 val;
+ if (irq_status & MII_VSC85XX_INT_MASK_EXT)
+ vsc8584_handle_macsec_interrupt(phydev);
- if (flow->bank != MACSEC_EGR || !flow->has_transformation)
- continue;
-
- val = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
- MSCC_MS_XFORM_REC(flow->index, rec));
- if (val == 0xffffffff) {
- vsc8584_macsec_flow_disable(phydev, flow);
- macsec_pn_wrapped(priv->secy, flow->tx_sa);
- break;
- }
- }
-
-skip_rollover:
-#endif
+ if (irq_status & MII_VSC85XX_INT_MASK_LINK_CHG)
+ phy_mac_interrupt(phydev);
- phy_mac_interrupt(phydev);
- return 0;
+ return IRQ_HANDLED;
}
static int vsc85xx_config_init(struct phy_device *phydev)
@@ -3276,7 +1905,7 @@ static int vsc8514_config_init(struct phy_device *phydev)
return ret;
ret = phy_modify(phydev, MSCC_PHY_EXT_PHY_CNTL_1, MEDIA_OP_MODE_MASK,
- MEDIA_OP_MODE_COPPER);
+ MEDIA_OP_MODE_COPPER << MEDIA_OP_MODE_POS);
if (ret)
return ret;
@@ -3314,20 +1943,8 @@ static int vsc85xx_config_intr(struct phy_device *phydev)
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
-#if IS_ENABLED(CONFIG_MACSEC)
- phy_write(phydev, MSCC_EXT_PAGE_ACCESS,
- MSCC_PHY_PAGE_EXTENDED_2);
- phy_write(phydev, MSCC_PHY_EXTENDED_INT,
- MSCC_PHY_EXTENDED_INT_MS_EGR);
- phy_write(phydev, MSCC_EXT_PAGE_ACCESS,
- MSCC_PHY_PAGE_STANDARD);
-
- vsc8584_macsec_phy_write(phydev, MACSEC_EGR,
- MSCC_MS_AIC_CTRL, 0xf);
- vsc8584_macsec_phy_write(phydev, MACSEC_EGR,
- MSCC_MS_INTR_CTRL_STATUS,
- MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE(MACSEC_INTR_CTRL_STATUS_ROLLOVER));
-#endif
+ vsc8584_config_macsec_intr(phydev);
+
rc = phy_write(phydev, MII_VSC85XX_INT_MASK,
MII_VSC85XX_INT_MASK_MASK);
} else {
@@ -3475,6 +2092,30 @@ static int vsc85xx_probe(struct phy_device *phydev)
/* Microsemi VSC85xx PHYs */
static struct phy_driver vsc85xx_driver[] = {
{
+ .phy_id = PHY_ID_VSC8502,
+ .name = "Microsemi GE VSC8502 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_BASIC_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc85xx_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc85xx_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
.phy_id = PHY_ID_VSC8504,
.name = "Microsemi GE VSC8504 SyncE",
.phy_id_mask = 0xfffffff0,
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index b705d0bd798b..47caae770ffc 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -72,20 +72,10 @@ static struct tja11xx_phy_stats tja11xx_hw_stats[] = {
static int tja11xx_check(struct phy_device *phydev, u8 reg, u16 mask, u16 set)
{
- int i, ret;
-
- for (i = 0; i < 200; i++) {
- ret = phy_read(phydev, reg);
- if (ret < 0)
- return ret;
-
- if ((ret & mask) == set)
- return 0;
-
- usleep_range(100, 150);
- }
+ int val;
- return -ETIMEDOUT;
+ return phy_read_poll_timeout(phydev, reg, val, (val & mask) == set,
+ 150, 30000, false);
}
static int phy_modify_check(struct phy_device *phydev, u8 reg,
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index dd2e23fb67c0..67ba47ae5284 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -239,9 +239,10 @@ int genphy_c45_read_link(struct phy_device *phydev)
/* The link state is latched low so that momentary link
* drops can be detected. Do not double-read the status
- * in polling mode to detect such short link drops.
+ * in polling mode to detect such short link drops except
+ * the link was already down.
*/
- if (!phy_polling_mode(phydev)) {
+ if (!phy_polling_mode(phydev) || !phydev->link) {
val = phy_read_mmd(phydev, devad, MDIO_STAT1);
if (val < 0)
return val;
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index a4d2d59fceca..66b8c61ca74c 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -8,7 +8,7 @@
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 74,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 75,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -329,6 +329,44 @@ void phy_resolve_aneg_linkmode(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
+/**
+ * phy_check_downshift - check whether downshift occurred
+ * @phydev: The phy_device struct
+ *
+ * Check whether a downshift to a lower speed occurred. If this should be the
+ * case warn the user.
+ * Prerequisite for detecting downshift is that PHY driver implements the
+ * read_status callback and sets phydev->speed to the actual link speed.
+ */
+void phy_check_downshift(struct phy_device *phydev)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
+ int i, speed = SPEED_UNKNOWN;
+
+ phydev->downshifted_rate = 0;
+
+ if (phydev->autoneg == AUTONEG_DISABLE ||
+ phydev->speed == SPEED_UNKNOWN)
+ return;
+
+ linkmode_and(common, phydev->lp_advertising, phydev->advertising);
+
+ for (i = 0; i < ARRAY_SIZE(settings); i++)
+ if (test_bit(settings[i].bit, common)) {
+ speed = settings[i].speed;
+ break;
+ }
+
+ if (speed == SPEED_UNKNOWN || phydev->speed >= speed)
+ return;
+
+ phydev_warn(phydev, "Downshift occurred from negotiated speed %s to actual speed %s, check cabling!\n",
+ phy_speed_to_str(speed), phy_speed_to_str(phydev->speed));
+
+ phydev->downshifted_rate = 1;
+}
+EXPORT_SYMBOL_GPL(phy_check_downshift);
+
static int phy_resolve_min_speed(struct phy_device *phydev, bool fdx_only)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
@@ -489,37 +527,6 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
EXPORT_SYMBOL(phy_write_mmd);
/**
- * __phy_modify_changed() - Convenience function for modifying a PHY register
- * @phydev: a pointer to a &struct phy_device
- * @regnum: register number
- * @mask: bit mask of bits to clear
- * @set: bit mask of bits to set
- *
- * Unlocked helper function which allows a PHY register to be modified as
- * new register value = (old register value & ~mask) | set
- *
- * Returns negative errno, 0 if there was no change, and 1 in case of change
- */
-int __phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask,
- u16 set)
-{
- int new, ret;
-
- ret = __phy_read(phydev, regnum);
- if (ret < 0)
- return ret;
-
- new = (ret & ~mask) | set;
- if (new == ret)
- return 0;
-
- ret = __phy_write(phydev, regnum, new);
-
- return ret < 0 ? ret : 1;
-}
-EXPORT_SYMBOL_GPL(__phy_modify_changed);
-
-/**
* phy_modify_changed - Function for modifying a PHY register
* @phydev: the phy_device struct
* @regnum: register number to modify
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 355bfdef48d2..72c69a9c8a98 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -96,9 +96,10 @@ void phy_print_status(struct phy_device *phydev)
{
if (phydev->link) {
netdev_info(phydev->attached_dev,
- "Link is Up - %s/%s - flow control %s\n",
+ "Link is Up - %s/%s %s- flow control %s\n",
phy_speed_to_str(phydev->speed),
phy_duplex_to_str(phydev->duplex),
+ phydev->downshifted_rate ? "(downshifted) " : "",
phy_pause_str(phydev));
} else {
netdev_info(phydev->attached_dev, "Link is Down\n");
@@ -507,6 +508,7 @@ static int phy_check_link_status(struct phy_device *phydev)
return err;
if (phydev->link && phydev->state != PHY_RUNNING) {
+ phy_check_downshift(phydev);
phydev->state = PHY_RUNNING;
phy_link_up(phydev);
} else if (!phydev->link && phydev->state != PHY_NOLINK) {
@@ -715,26 +717,24 @@ static int phy_disable_interrupts(struct phy_device *phydev)
static irqreturn_t phy_interrupt(int irq, void *phy_dat)
{
struct phy_device *phydev = phy_dat;
+ struct phy_driver *drv = phydev->drv;
- if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev))
+ if (drv->handle_interrupt)
+ return drv->handle_interrupt(phydev);
+
+ if (drv->did_interrupt && !drv->did_interrupt(phydev))
return IRQ_NONE;
- if (phydev->drv->handle_interrupt) {
- if (phydev->drv->handle_interrupt(phydev))
- goto phy_err;
- } else {
- /* reschedule state queue work to run as soon as possible */
- phy_trigger_machine(phydev);
- }
+ /* reschedule state queue work to run as soon as possible */
+ phy_trigger_machine(phydev);
/* did_interrupt() may have cleared the interrupt already */
- if (!phydev->drv->did_interrupt && phy_clear_interrupt(phydev))
- goto phy_err;
- return IRQ_HANDLED;
+ if (!drv->did_interrupt && phy_clear_interrupt(phydev)) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
-phy_err:
- phy_error(phydev);
- return IRQ_NONE;
+ return IRQ_HANDLED;
}
/**
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 28e3c5c0e3c3..ac2784192472 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1059,18 +1059,12 @@ EXPORT_SYMBOL(phy_disconnect);
static int phy_poll_reset(struct phy_device *phydev)
{
/* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
- unsigned int retries = 12;
- int ret;
-
- do {
- msleep(50);
- ret = phy_read(phydev, MII_BMCR);
- if (ret < 0)
- return ret;
- } while (ret & BMCR_RESET && --retries);
- if (ret & BMCR_RESET)
- return -ETIMEDOUT;
+ int ret, val;
+ ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
+ 50000, 600000, true);
+ if (ret)
+ return ret;
/* Some chips (smsc911x) may still need up to another 1ms after the
* BMCR_RESET bit is cleared before they are usable.
*/
@@ -1525,23 +1519,22 @@ EXPORT_SYMBOL(phy_detach);
int phy_suspend(struct phy_device *phydev)
{
- struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
- struct net_device *netdev = phydev->attached_dev;
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
- int ret = 0;
+ struct net_device *netdev = phydev->attached_dev;
+ struct phy_driver *phydrv = phydev->drv;
+ int ret;
/* If the device has WOL enabled, we cannot suspend the PHY */
phy_ethtool_get_wol(phydev, &wol);
if (wol.wolopts || (netdev && netdev->wol_enabled))
return -EBUSY;
- if (phydev->drv && phydrv->suspend)
- ret = phydrv->suspend(phydev);
-
- if (ret)
- return ret;
+ if (!phydrv || !phydrv->suspend)
+ return 0;
- phydev->suspended = true;
+ ret = phydrv->suspend(phydev);
+ if (!ret)
+ phydev->suspended = true;
return ret;
}
@@ -1549,18 +1542,17 @@ EXPORT_SYMBOL(phy_suspend);
int __phy_resume(struct phy_device *phydev)
{
- struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
- int ret = 0;
+ struct phy_driver *phydrv = phydev->drv;
+ int ret;
WARN_ON(!mutex_is_locked(&phydev->lock));
- if (phydev->drv && phydrv->resume)
- ret = phydrv->resume(phydev);
-
- if (ret)
- return ret;
+ if (!phydrv || !phydrv->resume)
+ return 0;
- phydev->suspended = false;
+ ret = phydrv->resume(phydev);
+ if (!ret)
+ phydev->suspended = false;
return ret;
}
@@ -1935,9 +1927,10 @@ int genphy_update_link(struct phy_device *phydev)
/* The link state is latched low so that momentary link
* drops can be detected. Do not double-read the status
- * in polling mode to detect such short link drops.
+ * in polling mode to detect such short link drops except
+ * the link was already down.
*/
- if (!phy_polling_mode(phydev)) {
+ if (!phy_polling_mode(phydev) || !phydev->link) {
status = phy_read(phydev, MII_BMSR);
if (status < 0)
return status;
@@ -2366,22 +2359,7 @@ void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx)
__ETHTOOL_DECLARE_LINK_MODE_MASK(oldadv);
linkmode_copy(oldadv, phydev->advertising);
-
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- phydev->advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phydev->advertising);
-
- if (rx) {
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- phydev->advertising);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phydev->advertising);
- }
-
- if (tx)
- linkmode_change_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phydev->advertising);
+ linkmode_set_pause(phydev->advertising, tx, rx);
if (!linkmode_equal(oldadv, phydev->advertising) &&
phydev->autoneg)
@@ -2414,6 +2392,32 @@ bool phy_validate_pause(struct phy_device *phydev,
}
EXPORT_SYMBOL(phy_validate_pause);
+/**
+ * phy_get_pause - resolve negotiated pause modes
+ * @phydev: phy_device struct
+ * @tx_pause: pointer to bool to indicate whether transmit pause should be
+ * enabled.
+ * @rx_pause: pointer to bool to indicate whether receive pause should be
+ * enabled.
+ *
+ * Resolve and return the flow control modes according to the negotiation
+ * result. This includes checking that we are operating in full duplex mode.
+ * See linkmode_resolve_pause() for further details.
+ */
+void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause)
+{
+ if (phydev->duplex != DUPLEX_FULL) {
+ *tx_pause = false;
+ *rx_pause = false;
+ return;
+ }
+
+ return linkmode_resolve_pause(phydev->advertising,
+ phydev->lp_advertising,
+ tx_pause, rx_pause);
+}
+EXPORT_SYMBOL(phy_get_pause);
+
static bool phy_drv_supports_irq(struct phy_driver *phydrv)
{
return phydrv->config_intr && phydrv->ack_interrupt;
@@ -2571,6 +2575,7 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
new_driver->mdiodrv.driver.probe = phy_probe;
new_driver->mdiodrv.driver.remove = phy_remove;
new_driver->mdiodrv.driver.owner = owner;
+ new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
retval = driver_register(&new_driver->mdiodrv.driver);
if (retval) {
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 6e66b8e77ec7..34ca12aec61b 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -40,7 +40,8 @@ enum {
struct phylink {
/* private: */
struct net_device *netdev;
- const struct phylink_mac_ops *ops;
+ const struct phylink_mac_ops *mac_ops;
+ const struct phylink_pcs_ops *pcs_ops;
struct phylink_config *config;
struct device *dev;
unsigned int old_link_state:1;
@@ -154,7 +155,7 @@ static const char *phylink_an_mode_str(unsigned int mode)
static int phylink_validate(struct phylink *pl, unsigned long *supported,
struct phylink_link_state *state)
{
- pl->ops->validate(pl->config, supported, state);
+ pl->mac_ops->validate(pl->config, supported, state);
return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
}
@@ -181,9 +182,11 @@ static int phylink_parse_fixedlink(struct phylink *pl,
/* We treat the "pause" and "asym-pause" terminology as
* defining the link partner's ability. */
if (fwnode_property_read_bool(fixed_node, "pause"))
- pl->link_config.pause |= MLO_PAUSE_SYM;
+ __set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ pl->link_config.lp_advertising);
if (fwnode_property_read_bool(fixed_node, "asym-pause"))
- pl->link_config.pause |= MLO_PAUSE_ASYM;
+ __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ pl->link_config.lp_advertising);
if (ret == 0) {
desc = fwnode_gpiod_get_index(fixed_node, "link", 0,
@@ -215,9 +218,11 @@ static int phylink_parse_fixedlink(struct phylink *pl,
DUPLEX_FULL : DUPLEX_HALF;
pl->link_config.speed = prop[2];
if (prop[3])
- pl->link_config.pause |= MLO_PAUSE_SYM;
+ __set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ pl->link_config.lp_advertising);
if (prop[4])
- pl->link_config.pause |= MLO_PAUSE_ASYM;
+ __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ pl->link_config.lp_advertising);
}
}
@@ -308,11 +313,13 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, 1000baseT_Half);
phylink_set(pl->supported, 1000baseT_Full);
phylink_set(pl->supported, 1000baseX_Full);
+ phylink_set(pl->supported, 1000baseKX_Full);
phylink_set(pl->supported, 2500baseT_Full);
phylink_set(pl->supported, 2500baseX_Full);
phylink_set(pl->supported, 5000baseT_Full);
phylink_set(pl->supported, 10000baseT_Full);
phylink_set(pl->supported, 10000baseKR_Full);
+ phylink_set(pl->supported, 10000baseKX4_Full);
phylink_set(pl->supported, 10000baseCR_Full);
phylink_set(pl->supported, 10000baseSR_Full);
phylink_set(pl->supported, 10000baseLR_Full);
@@ -320,6 +327,33 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, 10000baseER_Full);
break;
+ case PHY_INTERFACE_MODE_XLGMII:
+ phylink_set(pl->supported, 25000baseCR_Full);
+ phylink_set(pl->supported, 25000baseKR_Full);
+ phylink_set(pl->supported, 25000baseSR_Full);
+ phylink_set(pl->supported, 40000baseKR4_Full);
+ phylink_set(pl->supported, 40000baseCR4_Full);
+ phylink_set(pl->supported, 40000baseSR4_Full);
+ phylink_set(pl->supported, 40000baseLR4_Full);
+ phylink_set(pl->supported, 50000baseCR2_Full);
+ phylink_set(pl->supported, 50000baseKR2_Full);
+ phylink_set(pl->supported, 50000baseSR2_Full);
+ phylink_set(pl->supported, 50000baseKR_Full);
+ phylink_set(pl->supported, 50000baseSR_Full);
+ phylink_set(pl->supported, 50000baseCR_Full);
+ phylink_set(pl->supported, 50000baseLR_ER_FR_Full);
+ phylink_set(pl->supported, 50000baseDR_Full);
+ phylink_set(pl->supported, 100000baseKR4_Full);
+ phylink_set(pl->supported, 100000baseSR4_Full);
+ phylink_set(pl->supported, 100000baseCR4_Full);
+ phylink_set(pl->supported, 100000baseLR4_ER4_Full);
+ phylink_set(pl->supported, 100000baseKR2_Full);
+ phylink_set(pl->supported, 100000baseSR2_Full);
+ phylink_set(pl->supported, 100000baseCR2_Full);
+ phylink_set(pl->supported, 100000baseLR2_ER2_FR2_Full);
+ phylink_set(pl->supported, 100000baseDR2_Full);
+ break;
+
default:
phylink_err(pl,
"incorrect link mode %s for in-band status\n",
@@ -334,11 +368,42 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
"failed to validate link configuration for in-band status\n");
return -EINVAL;
}
+
+ /* Check if MAC/PCS also supports Autoneg. */
+ pl->link_config.an_enabled = phylink_test(pl->supported, Autoneg);
}
return 0;
}
+static void phylink_apply_manual_flow(struct phylink *pl,
+ struct phylink_link_state *state)
+{
+ /* If autoneg is disabled, pause AN is also disabled */
+ if (!state->an_enabled)
+ state->pause &= ~MLO_PAUSE_AN;
+
+ /* Manual configuration of pause modes */
+ if (!(pl->link_config.pause & MLO_PAUSE_AN))
+ state->pause = pl->link_config.pause;
+}
+
+static void phylink_resolve_flow(struct phylink_link_state *state)
+{
+ bool tx_pause, rx_pause;
+
+ state->pause = MLO_PAUSE_NONE;
+ if (state->duplex == DUPLEX_FULL) {
+ linkmode_resolve_pause(state->advertising,
+ state->lp_advertising,
+ &tx_pause, &rx_pause);
+ if (tx_pause)
+ state->pause |= MLO_PAUSE_TX;
+ if (rx_pause)
+ state->pause |= MLO_PAUSE_RX;
+ }
+}
+
static void phylink_mac_config(struct phylink *pl,
const struct phylink_link_state *state)
{
@@ -351,7 +416,7 @@ static void phylink_mac_config(struct phylink *pl,
__ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising,
state->pause, state->link, state->an_enabled);
- pl->ops->mac_config(pl->config, pl->cur_link_an_mode, state);
+ pl->mac_ops->mac_config(pl->config, pl->cur_link_an_mode, state);
}
static void phylink_mac_config_up(struct phylink *pl,
@@ -361,11 +426,32 @@ static void phylink_mac_config_up(struct phylink *pl,
phylink_mac_config(pl, state);
}
-static void phylink_mac_an_restart(struct phylink *pl)
+static void phylink_mac_pcs_an_restart(struct phylink *pl)
{
if (pl->link_config.an_enabled &&
- phy_interface_mode_is_8023z(pl->link_config.interface))
- pl->ops->mac_an_restart(pl->config);
+ phy_interface_mode_is_8023z(pl->link_config.interface)) {
+ if (pl->pcs_ops)
+ pl->pcs_ops->pcs_an_restart(pl->config);
+ else
+ pl->mac_ops->mac_an_restart(pl->config);
+ }
+}
+
+static void phylink_pcs_config(struct phylink *pl, bool force_restart,
+ const struct phylink_link_state *state)
+{
+ bool restart = force_restart;
+
+ if (pl->pcs_ops && pl->pcs_ops->pcs_config(pl->config,
+ pl->cur_link_an_mode,
+ state->interface,
+ state->advertising))
+ restart = true;
+
+ phylink_mac_config(pl, state);
+
+ if (restart)
+ phylink_mac_pcs_an_restart(pl);
}
static void phylink_mac_pcs_get_state(struct phylink *pl,
@@ -381,55 +467,54 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
state->an_complete = 0;
state->link = 1;
- pl->ops->mac_pcs_get_state(pl->config, state);
+ if (pl->pcs_ops)
+ pl->pcs_ops->pcs_get_state(pl->config, state);
+ else
+ pl->mac_ops->mac_pcs_get_state(pl->config, state);
}
/* The fixed state is... fixed except for the link state,
* which may be determined by a GPIO or a callback.
*/
-static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_state *state)
+static void phylink_get_fixed_state(struct phylink *pl,
+ struct phylink_link_state *state)
{
*state = pl->link_config;
if (pl->get_fixed_state)
pl->get_fixed_state(pl->netdev, state);
else if (pl->link_gpio)
state->link = !!gpiod_get_value_cansleep(pl->link_gpio);
+
+ phylink_resolve_flow(state);
}
-/* Flow control is resolved according to our and the link partners
- * advertisements using the following drawn from the 802.3 specs:
- * Local device Link partner
- * Pause AsymDir Pause AsymDir Result
- * 1 X 1 X TX+RX
- * 0 1 1 1 TX
- * 1 1 0 1 RX
- */
-static void phylink_resolve_flow(struct phylink *pl,
- struct phylink_link_state *state)
+static void phylink_mac_initial_config(struct phylink *pl, bool force_restart)
{
- int new_pause = 0;
+ struct phylink_link_state link_state;
- if (pl->link_config.pause & MLO_PAUSE_AN) {
- int pause = 0;
+ switch (pl->cur_link_an_mode) {
+ case MLO_AN_PHY:
+ link_state = pl->phy_state;
+ break;
- if (phylink_test(pl->link_config.advertising, Pause))
- pause |= MLO_PAUSE_SYM;
- if (phylink_test(pl->link_config.advertising, Asym_Pause))
- pause |= MLO_PAUSE_ASYM;
+ case MLO_AN_FIXED:
+ phylink_get_fixed_state(pl, &link_state);
+ break;
- pause &= state->pause;
+ case MLO_AN_INBAND:
+ link_state = pl->link_config;
+ if (link_state.interface == PHY_INTERFACE_MODE_SGMII)
+ link_state.pause = MLO_PAUSE_NONE;
+ break;
- if (pause & MLO_PAUSE_SYM)
- new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
- else if (pause & MLO_PAUSE_ASYM)
- new_pause = state->pause & MLO_PAUSE_SYM ?
- MLO_PAUSE_TX : MLO_PAUSE_RX;
- } else {
- new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK;
+ default: /* can't happen */
+ return;
}
- state->pause &= ~MLO_PAUSE_TXRX_MASK;
- state->pause |= new_pause;
+ link_state.link = false;
+
+ phylink_apply_manual_flow(pl, &link_state);
+ phylink_pcs_config(pl, force_restart, &link_state);
}
static const char *phylink_pause_to_str(int pause)
@@ -446,14 +531,23 @@ static const char *phylink_pause_to_str(int pause)
}
}
-static void phylink_mac_link_up(struct phylink *pl,
- struct phylink_link_state link_state)
+static void phylink_link_up(struct phylink *pl,
+ struct phylink_link_state link_state)
{
struct net_device *ndev = pl->netdev;
pl->cur_interface = link_state.interface;
- pl->ops->mac_link_up(pl->config, pl->cur_link_an_mode,
- pl->cur_interface, pl->phydev);
+
+ if (pl->pcs_ops && pl->pcs_ops->pcs_link_up)
+ pl->pcs_ops->pcs_link_up(pl->config, pl->cur_link_an_mode,
+ pl->cur_interface,
+ link_state.speed, link_state.duplex);
+
+ pl->mac_ops->mac_link_up(pl->config, pl->phydev,
+ pl->cur_link_an_mode, pl->cur_interface,
+ link_state.speed, link_state.duplex,
+ !!(link_state.pause & MLO_PAUSE_TX),
+ !!(link_state.pause & MLO_PAUSE_RX));
if (ndev)
netif_carrier_on(ndev);
@@ -465,14 +559,14 @@ static void phylink_mac_link_up(struct phylink *pl,
phylink_pause_to_str(link_state.pause));
}
-static void phylink_mac_link_down(struct phylink *pl)
+static void phylink_link_down(struct phylink *pl)
{
struct net_device *ndev = pl->netdev;
if (ndev)
netif_carrier_off(ndev);
- pl->ops->mac_link_down(pl->config, pl->cur_link_an_mode,
- pl->cur_interface);
+ pl->mac_ops->mac_link_down(pl->config, pl->cur_link_an_mode,
+ pl->cur_interface);
phylink_info(pl, "Link is Down\n");
}
@@ -493,7 +587,7 @@ static void phylink_resolve(struct work_struct *w)
switch (pl->cur_link_an_mode) {
case MLO_AN_PHY:
link_state = pl->phy_state;
- phylink_resolve_flow(pl, &link_state);
+ phylink_apply_manual_flow(pl, &link_state);
phylink_mac_config_up(pl, &link_state);
break;
@@ -515,10 +609,12 @@ static void phylink_resolve(struct work_struct *w)
link_state.interface = pl->phy_state.interface;
/* If we have a PHY, we need to update with
- * the pause mode bits. */
- link_state.pause |= pl->phy_state.pause;
- phylink_resolve_flow(pl, &link_state);
+ * the PHY flow control bits. */
+ link_state.pause = pl->phy_state.pause;
+ phylink_apply_manual_flow(pl, &link_state);
phylink_mac_config(pl, &link_state);
+ } else {
+ phylink_apply_manual_flow(pl, &link_state);
}
break;
}
@@ -532,9 +628,9 @@ static void phylink_resolve(struct work_struct *w)
if (link_changed) {
pl->old_link_state = link_state.link;
if (!link_state.link)
- phylink_mac_link_down(pl);
+ phylink_link_down(pl);
else
- phylink_mac_link_up(pl, link_state);
+ phylink_link_up(pl, link_state);
}
if (!link_state.link && pl->mac_link_dropped) {
pl->mac_link_dropped = false;
@@ -601,7 +697,7 @@ static int phylink_register_sfp(struct phylink *pl,
* @fwnode: a pointer to a &struct fwnode_handle describing the network
* interface
* @iface: the desired link mode defined by &typedef phy_interface_t
- * @ops: a pointer to a &struct phylink_mac_ops for the MAC.
+ * @mac_ops: a pointer to a &struct phylink_mac_ops for the MAC.
*
* Create a new phylink instance, and parse the link parameters found in @np.
* This will parse in-band modes, fixed-link or SFP configuration.
@@ -614,7 +710,7 @@ static int phylink_register_sfp(struct phylink *pl,
struct phylink *phylink_create(struct phylink_config *config,
struct fwnode_handle *fwnode,
phy_interface_t iface,
- const struct phylink_mac_ops *ops)
+ const struct phylink_mac_ops *mac_ops)
{
struct phylink *pl;
int ret;
@@ -647,7 +743,7 @@ struct phylink *phylink_create(struct phylink_config *config,
pl->link_config.speed = SPEED_UNKNOWN;
pl->link_config.duplex = DUPLEX_UNKNOWN;
pl->link_config.an_enabled = true;
- pl->ops = ops;
+ pl->mac_ops = mac_ops;
__set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
timer_setup(&pl->link_poll, phylink_fixed_poll, 0);
@@ -681,6 +777,12 @@ struct phylink *phylink_create(struct phylink_config *config,
}
EXPORT_SYMBOL_GPL(phylink_create);
+void phylink_add_pcs(struct phylink *pl, const struct phylink_pcs_ops *ops)
+{
+ pl->pcs_ops = ops;
+}
+EXPORT_SYMBOL_GPL(phylink_add_pcs);
+
/**
* phylink_destroy() - cleanup and destroy the phylink instance
* @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -705,15 +807,18 @@ static void phylink_phy_change(struct phy_device *phydev, bool up,
bool do_carrier)
{
struct phylink *pl = phydev->phylink;
+ bool tx_pause, rx_pause;
+
+ phy_get_pause(phydev, &tx_pause, &rx_pause);
mutex_lock(&pl->state_mutex);
pl->phy_state.speed = phydev->speed;
pl->phy_state.duplex = phydev->duplex;
pl->phy_state.pause = MLO_PAUSE_NONE;
- if (phydev->pause)
- pl->phy_state.pause |= MLO_PAUSE_SYM;
- if (phydev->asym_pause)
- pl->phy_state.pause |= MLO_PAUSE_ASYM;
+ if (tx_pause)
+ pl->phy_state.pause |= MLO_PAUSE_TX;
+ if (rx_pause)
+ pl->phy_state.pause |= MLO_PAUSE_RX;
pl->phy_state.interface = phydev->interface;
pl->phy_state.link = up;
mutex_unlock(&pl->state_mutex);
@@ -783,6 +888,9 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
mutex_lock(&pl->state_mutex);
pl->phydev = phy;
pl->phy_state.interface = interface;
+ pl->phy_state.pause = MLO_PAUSE_NONE;
+ pl->phy_state.speed = SPEED_UNKNOWN;
+ pl->phy_state.duplex = DUPLEX_UNKNOWN;
linkmode_copy(pl->supported, supported);
linkmode_copy(pl->link_config.advertising, config.advertising);
@@ -1011,15 +1119,12 @@ void phylink_start(struct phylink *pl)
/* Apply the link configuration to the MAC when starting. This allows
* a fixed-link to start with the correct parameters, and also
* ensures that we set the appropriate advertisement for Serdes links.
- */
- phylink_resolve_flow(pl, &pl->link_config);
- phylink_mac_config(pl, &pl->link_config);
-
- /* Restart autonegotiation if using 802.3z to ensure that the link
+ *
+ * Restart autonegotiation if using 802.3z to ensure that the link
* parameters are properly negotiated. This is necessary for DSA
* switches using 802.3z negotiation to ensure they see our modes.
*/
- phylink_mac_an_restart(pl);
+ phylink_mac_initial_config(pl, true);
clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
phylink_run_resolve(pl);
@@ -1316,8 +1421,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
* advertisement; the only thing we have is the pause
* modes which can only come from a PHY.
*/
- phylink_mac_config(pl, &pl->link_config);
- phylink_mac_an_restart(pl);
+ phylink_pcs_config(pl, true, &pl->link_config);
}
mutex_unlock(&pl->state_mutex);
}
@@ -1345,7 +1449,7 @@ int phylink_ethtool_nway_reset(struct phylink *pl)
if (pl->phydev)
ret = phy_restart_aneg(pl->phydev);
- phylink_mac_an_restart(pl);
+ phylink_mac_pcs_an_restart(pl);
return ret;
}
@@ -1379,6 +1483,9 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
ASSERT_RTNL();
+ if (pl->cur_link_an_mode == MLO_AN_FIXED)
+ return -EOPNOTSUPP;
+
if (!phylink_test(pl->supported, Pause) &&
!phylink_test(pl->supported, Asym_Pause))
return -EOPNOTSUPP;
@@ -1387,8 +1494,8 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
!pause->autoneg && pause->rx_pause != pause->tx_pause)
return -EINVAL;
- config->pause &= ~(MLO_PAUSE_AN | MLO_PAUSE_TXRX_MASK);
-
+ mutex_lock(&pl->state_mutex);
+ config->pause = 0;
if (pause->autoneg)
config->pause |= MLO_PAUSE_AN;
if (pause->rx_pause)
@@ -1396,6 +1503,22 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
if (pause->tx_pause)
config->pause |= MLO_PAUSE_TX;
+ /*
+ * See the comments for linkmode_set_pause(), wrt the deficiencies
+ * with the current implementation. A solution to this issue would
+ * be:
+ * ethtool Local device
+ * rx tx Pause AsymDir
+ * 0 0 0 0
+ * 1 0 1 1
+ * 0 1 0 1
+ * 1 1 1 1
+ * and then use the ethtool rx/tx enablement status to mask the
+ * rx/tx pause resolution.
+ */
+ linkmode_set_pause(config->advertising, pause->tx_pause,
+ pause->rx_pause);
+
/* If we have a PHY, phylib will call our link state function if the
* mode has changed, which will trigger a resolve and update the MAC
* configuration.
@@ -1405,19 +1528,9 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
pause->tx_pause);
} else if (!test_bit(PHYLINK_DISABLE_STOPPED,
&pl->phylink_disable_state)) {
- switch (pl->cur_link_an_mode) {
- case MLO_AN_FIXED:
- /* Should we allow fixed links to change against the config? */
- phylink_resolve_flow(pl, config);
- phylink_mac_config(pl, config);
- break;
-
- case MLO_AN_INBAND:
- phylink_mac_config(pl, config);
- phylink_mac_an_restart(pl);
- break;
- }
+ phylink_pcs_config(pl, true, &pl->link_config);
}
+ mutex_unlock(&pl->state_mutex);
return 0;
}
@@ -1509,13 +1622,14 @@ static int phylink_mii_emul_read(unsigned int reg,
struct phylink_link_state *state)
{
struct fixed_phy_status fs;
+ unsigned long *lpa = state->lp_advertising;
int val;
fs.link = state->link;
fs.speed = state->speed;
fs.duplex = state->duplex;
- fs.pause = state->pause & MLO_PAUSE_SYM;
- fs.asym_pause = state->pause & MLO_PAUSE_ASYM;
+ fs.pause = test_bit(ETHTOOL_LINK_MODE_Pause_BIT, lpa);
+ fs.asym_pause = test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, lpa);
val = swphy_read_reg(reg, &fs);
if (reg == MII_BMSR) {
@@ -1820,7 +1934,7 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
if (changed && !test_bit(PHYLINK_DISABLE_STOPPED,
&pl->phylink_disable_state))
- phylink_mac_config(pl, &pl->link_config);
+ phylink_mac_initial_config(pl, false);
return ret;
}
@@ -1987,4 +2101,242 @@ void phylink_helper_basex_speed(struct phylink_link_state *state)
}
EXPORT_SYMBOL_GPL(phylink_helper_basex_speed);
+static void phylink_decode_c37_word(struct phylink_link_state *state,
+ uint16_t config_reg, int speed)
+{
+ bool tx_pause, rx_pause;
+ int fd_bit;
+
+ if (speed == SPEED_2500)
+ fd_bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT;
+ else
+ fd_bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT;
+
+ mii_lpa_mod_linkmode_x(state->lp_advertising, config_reg, fd_bit);
+
+ if (linkmode_test_bit(fd_bit, state->advertising) &&
+ linkmode_test_bit(fd_bit, state->lp_advertising)) {
+ state->speed = speed;
+ state->duplex = DUPLEX_FULL;
+ } else {
+ /* negotiation failure */
+ state->link = false;
+ }
+
+ linkmode_resolve_pause(state->advertising, state->lp_advertising,
+ &tx_pause, &rx_pause);
+
+ if (tx_pause)
+ state->pause |= MLO_PAUSE_TX;
+ if (rx_pause)
+ state->pause |= MLO_PAUSE_RX;
+}
+
+static void phylink_decode_sgmii_word(struct phylink_link_state *state,
+ uint16_t config_reg)
+{
+ if (!(config_reg & LPA_SGMII_LINK)) {
+ state->link = false;
+ return;
+ }
+
+ switch (config_reg & LPA_SGMII_SPD_MASK) {
+ case LPA_SGMII_10:
+ state->speed = SPEED_10;
+ break;
+ case LPA_SGMII_100:
+ state->speed = SPEED_100;
+ break;
+ case LPA_SGMII_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ state->link = false;
+ return;
+ }
+ if (config_reg & LPA_SGMII_FULL_DUPLEX)
+ state->duplex = DUPLEX_FULL;
+ else
+ state->duplex = DUPLEX_HALF;
+}
+
+/**
+ * phylink_mii_c22_pcs_get_state() - read the MAC PCS state
+ * @pcs: a pointer to a &struct mdio_device.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Helper for MAC PCS supporting the 802.3 clause 22 register set for
+ * clause 37 negotiation and/or SGMII control.
+ *
+ * Read the MAC PCS state from the MII device configured in @config and
+ * parse the Clause 37 or Cisco SGMII link partner negotiation word into
+ * the phylink @state structure. This is suitable to be directly plugged
+ * into the mac_pcs_get_state() member of the struct phylink_mac_ops
+ * structure.
+ */
+void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
+ struct phylink_link_state *state)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ int bmsr, lpa;
+
+ bmsr = mdiobus_read(bus, addr, MII_BMSR);
+ lpa = mdiobus_read(bus, addr, MII_LPA);
+ if (bmsr < 0 || lpa < 0) {
+ state->link = false;
+ return;
+ }
+
+ state->link = !!(bmsr & BMSR_LSTATUS);
+ state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
+ if (!state->link)
+ return;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ phylink_decode_c37_word(state, lpa, SPEED_1000);
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ phylink_decode_c37_word(state, lpa, SPEED_2500);
+ break;
+
+ case PHY_INTERFACE_MODE_SGMII:
+ phylink_decode_sgmii_word(state, lpa);
+ break;
+
+ default:
+ state->link = false;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_get_state);
+
+/**
+ * phylink_mii_c22_pcs_set_advertisement() - configure the clause 37 PCS
+ * advertisement
+ * @pcs: a pointer to a &struct mdio_device.
+ * @interface: the PHY interface mode being configured
+ * @advertising: the ethtool advertisement mask
+ *
+ * Helper for MAC PCS supporting the 802.3 clause 22 register set for
+ * clause 37 negotiation and/or SGMII control.
+ *
+ * Configure the clause 37 PCS advertisement as specified by @state. This
+ * does not trigger a renegotiation; phylink will do that via the
+ * mac_an_restart() method of the struct phylink_mac_ops structure.
+ *
+ * Returns negative error code on failure to configure the advertisement,
+ * zero if no change has been made, or one if the advertisement has changed.
+ */
+int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
+ phy_interface_t interface,
+ const unsigned long *advertising)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ int val, ret;
+ u16 adv;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ adv = ADVERTISE_1000XFULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ advertising))
+ adv |= ADVERTISE_1000XPAUSE;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ advertising))
+ adv |= ADVERTISE_1000XPSE_ASYM;
+
+ val = mdiobus_read(bus, addr, MII_ADVERTISE);
+ if (val < 0)
+ return val;
+
+ if (val == adv)
+ return 0;
+
+ ret = mdiobus_write(bus, addr, MII_ADVERTISE, adv);
+ if (ret < 0)
+ return ret;
+
+ return 1;
+
+ case PHY_INTERFACE_MODE_SGMII:
+ val = mdiobus_read(bus, addr, MII_ADVERTISE);
+ if (val < 0)
+ return val;
+
+ if (val == 0x0001)
+ return 0;
+
+ ret = mdiobus_write(bus, addr, MII_ADVERTISE, 0x0001);
+ if (ret < 0)
+ return ret;
+
+ return 1;
+
+ default:
+ /* Nothing to do for other modes */
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_set_advertisement);
+
+/**
+ * phylink_mii_c22_pcs_an_restart() - restart 802.3z autonegotiation
+ * @pcs: a pointer to a &struct mdio_device.
+ *
+ * Helper for MAC PCS supporting the 802.3 clause 22 register set for
+ * clause 37 negotiation.
+ *
+ * Restart the clause 37 negotiation with the link partner. This is
+ * suitable to be directly plugged into the mac_pcs_get_state() member
+ * of the struct phylink_mac_ops structure.
+ */
+void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs)
+{
+ struct mii_bus *bus = pcs->bus;
+ int val, addr = pcs->addr;
+
+ val = mdiobus_read(bus, addr, MII_BMCR);
+ if (val >= 0) {
+ val |= BMCR_ANRESTART;
+
+ mdiobus_write(bus, addr, MII_BMCR, val);
+ }
+}
+EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_an_restart);
+
+#define C45_ADDR(d,a) (MII_ADDR_C45 | (d) << 16 | (a))
+void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
+ struct phylink_link_state *state)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ int stat;
+
+ stat = mdiobus_read(bus, addr, C45_ADDR(MDIO_MMD_PCS, MDIO_STAT1));
+ if (stat < 0) {
+ state->link = false;
+ return;
+ }
+
+ state->link = !!(stat & MDIO_STAT1_LSTATUS);
+ if (!state->link)
+ return;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_10GBASER:
+ state->speed = SPEED_10000;
+ state->duplex = DUPLEX_FULL;
+ break;
+
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(phylink_mii_c45_pcs_get_state);
+
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index f5fa2fff3ddc..2d99e9de6ee1 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -49,6 +49,8 @@
#define RTL_LPADV_5000FULL BIT(6)
#define RTL_LPADV_2500FULL BIT(5)
+#define RTLGEN_SPEED_MASK 0x0630
+
#define RTL_GENERIC_PHYID 0x001cc800
MODULE_DESCRIPTION("Realtek PHY driver");
@@ -309,6 +311,55 @@ static int rtl8366rb_config_init(struct phy_device *phydev)
return ret;
}
+/* get actual speed to cover the downshift case */
+static int rtlgen_get_speed(struct phy_device *phydev)
+{
+ int val;
+
+ if (!phydev->link)
+ return 0;
+
+ val = phy_read_paged(phydev, 0xa43, 0x12);
+ if (val < 0)
+ return val;
+
+ switch (val & RTLGEN_SPEED_MASK) {
+ case 0x0000:
+ phydev->speed = SPEED_10;
+ break;
+ case 0x0010:
+ phydev->speed = SPEED_100;
+ break;
+ case 0x0020:
+ phydev->speed = SPEED_1000;
+ break;
+ case 0x0200:
+ phydev->speed = SPEED_10000;
+ break;
+ case 0x0210:
+ phydev->speed = SPEED_2500;
+ break;
+ case 0x0220:
+ phydev->speed = SPEED_5000;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int rtlgen_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ return rtlgen_get_speed(phydev);
+}
+
static int rtlgen_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
{
int ret;
@@ -429,6 +480,8 @@ static int rtl8125_config_aneg(struct phy_device *phydev)
static int rtl8125_read_status(struct phy_device *phydev)
{
+ int ret;
+
if (phydev->autoneg == AUTONEG_ENABLE) {
int lpadv = phy_read_paged(phydev, 0xa5d, 0x13);
@@ -443,7 +496,11 @@ static int rtl8125_read_status(struct phy_device *phydev)
phydev->lp_advertising, lpadv & RTL_LPADV_2500FULL);
}
- return genphy_read_status(phydev);
+ ret = genphy_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ return rtlgen_get_speed(phydev);
}
static bool rtlgen_supports_2_5gbps(struct phy_device *phydev)
@@ -550,6 +607,7 @@ static struct phy_driver realtek_drvs[] = {
}, {
.name = "Generic FE-GE Realtek PHY",
.match_phy_device = rtlgen_match_phy_device,
+ .read_status = rtlgen_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index b73298250793..93da7d3d0954 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -112,8 +112,6 @@ static int lan87xx_read_status(struct phy_device *phydev)
int err = genphy_read_status(phydev);
if (!phydev->link && priv->energy_enable) {
- int i;
-
/* Disable EDPD to wake up PHY */
int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0)
@@ -125,15 +123,11 @@ static int lan87xx_read_status(struct phy_device *phydev)
return rc;
/* Wait max 640 ms to detect energy */
- for (i = 0; i < 64; i++) {
- /* Sleep to allow link test pulses to be sent */
- msleep(10);
- rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
- if (rc < 0)
- return rc;
- if (rc & MII_LAN83C185_ENERGYON)
- break;
- }
+ phy_read_poll_timeout(phydev, MII_LAN83C185_CTRL_STATUS, rc,
+ rc & MII_LAN83C185_ENERGYON, 10000,
+ 640000, true);
+ if (rc < 0)
+ return rc;
/* Re-enable EDPD */
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index babb01888b78..f81fb0b13a94 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -456,11 +456,8 @@ static void slip_write_wakeup(struct tty_struct *tty)
rcu_read_lock();
sl = rcu_dereference(tty->disc_data);
- if (!sl)
- goto out;
-
- schedule_work(&sl->tx_work);
-out:
+ if (sl)
+ schedule_work(&sl->tx_work);
rcu_read_unlock();
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 650c937ed56b..228fe449dc6d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -75,35 +75,6 @@
static void tun_default_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd);
-/* Uncomment to enable debugging */
-/* #define TUN_DEBUG 1 */
-
-#ifdef TUN_DEBUG
-static int debug;
-
-#define tun_debug(level, tun, fmt, args...) \
-do { \
- if (tun->debug) \
- netdev_printk(level, tun->dev, fmt, ##args); \
-} while (0)
-#define DBG1(level, fmt, args...) \
-do { \
- if (debug == 2) \
- printk(level fmt, ##args); \
-} while (0)
-#else
-#define tun_debug(level, tun, fmt, args...) \
-do { \
- if (0) \
- netdev_printk(level, tun->dev, fmt, ##args); \
-} while (0)
-#define DBG1(level, fmt, args...) \
-do { \
- if (0) \
- printk(level fmt, ##args); \
-} while (0)
-#endif
-
#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
/* TUN device flags */
@@ -225,9 +196,7 @@ struct tun_struct {
struct sock_fprog fprog;
/* protected by rtnl lock */
bool filter_attached;
-#ifdef TUN_DEBUG
- int debug;
-#endif
+ u32 msg_enable;
spinlock_t lock;
struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
struct timer_list flow_gc_timer;
@@ -423,8 +392,9 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
if (e) {
- tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
- rxhash, queue_index);
+ netif_info(tun, tx_queued, tun->dev,
+ "create flow: hash %u index %u\n",
+ rxhash, queue_index);
e->updated = jiffies;
e->rxhash = rxhash;
e->rps_rxhash = 0;
@@ -438,8 +408,8 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
{
- tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
- e->rxhash, e->queue_index);
+ netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
+ e->rxhash, e->queue_index);
hlist_del_rcu(&e->hash_link);
kfree_rcu(e, rcu);
--tun->flow_count;
@@ -485,8 +455,6 @@ static void tun_flow_cleanup(struct timer_list *t)
unsigned long count = 0;
int i;
- tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
-
spin_lock(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
@@ -546,8 +514,7 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
rcu_read_unlock();
}
-/**
- * Save the hash received in the stack receive path and update the
+/* Save the hash received in the stack receive path and update the
* flow_hash table accordingly.
*/
static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
@@ -1076,9 +1043,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
if (!rcu_dereference(tun->steering_prog))
tun_automq_xmit(tun, skb);
- tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
-
- BUG_ON(!tfile);
+ netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
/* Drop if the filter does not like it.
* This is a noop if the filter is disabled.
@@ -1435,8 +1400,6 @@ static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
sk = tfile->socket.sk;
- tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
-
poll_wait(file, sk_sleep(sk), wait);
if (!ptr_ring_empty(&tfile->tx_ring))
@@ -2207,8 +2170,6 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
ssize_t ret;
int err;
- tun_debug(KERN_INFO, tun, "tun_do_read\n");
-
if (!iov_iter_count(to)) {
tun_ptr_free(ptr);
return 0;
@@ -2853,8 +2814,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
netif_carrier_on(tun->dev);
- tun_debug(KERN_INFO, tun, "tun_set_iff\n");
-
/* Make sure persistent devices do not get stuck in
* xoff state.
*/
@@ -2885,8 +2844,6 @@ err_free_dev:
static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
{
- tun_debug(KERN_INFO, tun, "tun_get_iff\n");
-
strcpy(ifr->ifr_name, tun->dev->name);
ifr->ifr_flags = tun_flags(tun);
@@ -3110,7 +3067,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
if (!tun)
goto unlock;
- tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
+ netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
net = dev_net(tun->dev);
ret = 0;
@@ -3131,8 +3088,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
/* Disable/Enable checksum */
/* [unimplemented] */
- tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
- arg ? "disabled" : "enabled");
+ netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
+ arg ? "disabled" : "enabled");
break;
case TUNSETPERSIST:
@@ -3150,8 +3107,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
do_notify = true;
}
- tun_debug(KERN_INFO, tun, "persist %s\n",
- arg ? "enabled" : "disabled");
+ netif_info(tun, drv, tun->dev, "persist %s\n",
+ arg ? "enabled" : "disabled");
break;
case TUNSETOWNER:
@@ -3163,8 +3120,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
}
tun->owner = owner;
do_notify = true;
- tun_debug(KERN_INFO, tun, "owner set to %u\n",
- from_kuid(&init_user_ns, tun->owner));
+ netif_info(tun, drv, tun->dev, "owner set to %u\n",
+ from_kuid(&init_user_ns, tun->owner));
break;
case TUNSETGROUP:
@@ -3176,29 +3133,28 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
}
tun->group = group;
do_notify = true;
- tun_debug(KERN_INFO, tun, "group set to %u\n",
- from_kgid(&init_user_ns, tun->group));
+ netif_info(tun, drv, tun->dev, "group set to %u\n",
+ from_kgid(&init_user_ns, tun->group));
break;
case TUNSETLINK:
/* Only allow setting the type when the interface is down */
if (tun->dev->flags & IFF_UP) {
- tun_debug(KERN_INFO, tun,
- "Linktype set failed because interface is up\n");
+ netif_info(tun, drv, tun->dev,
+ "Linktype set failed because interface is up\n");
ret = -EBUSY;
} else {
tun->dev->type = (int) arg;
- tun_debug(KERN_INFO, tun, "linktype set to %d\n",
- tun->dev->type);
+ netif_info(tun, drv, tun->dev, "linktype set to %d\n",
+ tun->dev->type);
ret = 0;
}
break;
-#ifdef TUN_DEBUG
case TUNSETDEBUG:
- tun->debug = arg;
+ tun->msg_enable = (u32)arg;
break;
-#endif
+
case TUNSETOFFLOAD:
ret = set_offload(tun, arg);
break;
@@ -3221,9 +3177,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
case SIOCSIFHWADDR:
/* Set hw address */
- tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
- ifr.ifr_hwaddr.sa_data);
-
ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL);
break;
@@ -3418,8 +3371,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
struct net *net = current->nsproxy->net_ns;
struct tun_file *tfile;
- DBG1(KERN_INFO, "tunX: tun_chr_open\n");
-
tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
&tun_proto, 0);
if (!tfile)
@@ -3559,20 +3510,16 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
static u32 tun_get_msglevel(struct net_device *dev)
{
-#ifdef TUN_DEBUG
struct tun_struct *tun = netdev_priv(dev);
- return tun->debug;
-#else
- return -EOPNOTSUPP;
-#endif
+
+ return tun->msg_enable;
}
static void tun_set_msglevel(struct net_device *dev, u32 value)
{
-#ifdef TUN_DEBUG
struct tun_struct *tun = netdev_priv(dev);
- tun->debug = value;
-#endif
+
+ tun->msg_enable = value;
}
static int tun_get_coalesce(struct net_device *dev,
@@ -3599,6 +3546,7 @@ static int tun_set_coalesce(struct net_device *dev,
}
static const struct ethtool_ops tun_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_drvinfo = tun_get_drvinfo,
.get_msglevel = tun_get_msglevel,
.set_msglevel = tun_set_msglevel,
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index bcabd39d136a..9bdbd7b472a0 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -36,7 +36,7 @@ struct usbpn_dev {
spinlock_t rx_lock;
struct sk_buff *rx_skb;
- struct urb *urbs[0];
+ struct urb *urbs[];
};
static void tx_complete(struct urb *req);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index c2c82e6391b4..8929669b5e6d 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -175,7 +175,11 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
u32 val, max, min;
/* clamp new_tx to sane values */
- min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
+ if (ctx->is_ndp16)
+ min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
+ else
+ min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
+
max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
/* some devices set dwNtbOutMaxSize too low for the above default */
@@ -307,10 +311,17 @@ static ssize_t ndp_to_end_store(struct device *d, struct device_attribute *attr
if (enable == (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
return len;
- if (enable && !ctx->delayed_ndp16) {
- ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
- if (!ctx->delayed_ndp16)
- return -ENOMEM;
+ if (enable) {
+ if (ctx->is_ndp16 && !ctx->delayed_ndp16) {
+ ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp16)
+ return -ENOMEM;
+ }
+ if (!ctx->is_ndp16 && !ctx->delayed_ndp32) {
+ ctx->delayed_ndp32 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp32)
+ return -ENOMEM;
+ }
}
/* flush pending data before changing flag */
@@ -512,6 +523,9 @@ static int cdc_ncm_init(struct usbnet *dev)
dev_err(&dev->intf->dev, "SET_CRC_MODE failed\n");
}
+ /* use ndp16 by default */
+ ctx->is_ndp16 = 1;
+
/* set NTB format, if both formats are supported.
*
* "The host shall only send this command while the NCM Data
@@ -519,14 +533,27 @@ static int cdc_ncm_init(struct usbnet *dev)
*/
if (le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported) &
USB_CDC_NCM_NTB32_SUPPORTED) {
- dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit\n");
- err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- USB_CDC_NCM_NTB16_FORMAT,
- iface_no, NULL, 0);
- if (err < 0)
+ if (ctx->drvflags & CDC_NCM_FLAG_PREFER_NTB32) {
+ ctx->is_ndp16 = 0;
+ dev_dbg(&dev->intf->dev, "Setting NTB format to 32-bit\n");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB32_FORMAT,
+ iface_no, NULL, 0);
+ } else {
+ ctx->is_ndp16 = 1;
+ dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit\n");
+ err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB16_FORMAT,
+ iface_no, NULL, 0);
+ }
+ if (err < 0) {
+ ctx->is_ndp16 = 1;
dev_err(&dev->intf->dev, "SET_NTB_FORMAT failed\n");
+ }
}
/* set initial device values */
@@ -549,7 +576,10 @@ static int cdc_ncm_init(struct usbnet *dev)
ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
/* set up maximum NDP size */
- ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16);
+ if (ctx->is_ndp16)
+ ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16);
+ else
+ ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp32) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe32);
/* initial coalescing timer interval */
ctx->timer_interval = CDC_NCM_TIMER_INTERVAL_USEC * NSEC_PER_USEC;
@@ -734,7 +764,10 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
ctx->tx_curr_skb = NULL;
}
- kfree(ctx->delayed_ndp16);
+ if (ctx->is_ndp16)
+ kfree(ctx->delayed_ndp16);
+ else
+ kfree(ctx->delayed_ndp32);
kfree(ctx);
}
@@ -772,10 +805,8 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
u8 *buf;
int len;
int temp;
- int err;
u8 iface_no;
struct usb_cdc_parsed_header hdr;
- __le16 curr_ntb_format;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -879,32 +910,6 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
goto error2;
}
- /*
- * Some Huawei devices have been observed to come out of reset in NDP32 mode.
- * Let's check if this is the case, and set the device to NDP16 mode again if
- * needed.
- */
- if (ctx->drvflags & CDC_NCM_FLAG_RESET_NTB16) {
- err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_FORMAT,
- USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
- 0, iface_no, &curr_ntb_format, 2);
- if (err < 0) {
- goto error2;
- }
-
- if (curr_ntb_format == cpu_to_le16(USB_CDC_NCM_NTB32_FORMAT)) {
- dev_info(&intf->dev, "resetting NTB format to 16-bit");
- err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
- USB_TYPE_CLASS | USB_DIR_OUT
- | USB_RECIP_INTERFACE,
- USB_CDC_NCM_NTB16_FORMAT,
- iface_no, NULL, 0);
-
- if (err < 0)
- goto error2;
- }
- }
-
cdc_ncm_find_endpoints(dev, ctx->data);
cdc_ncm_find_endpoints(dev, ctx->control);
if (!dev->in || !dev->out || !dev->status) {
@@ -929,9 +934,15 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
/* Allocate the delayed NDP if needed. */
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
- ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
- if (!ctx->delayed_ndp16)
- goto error2;
+ if (ctx->is_ndp16) {
+ ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp16)
+ goto error2;
+ } else {
+ ctx->delayed_ndp32 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
+ if (!ctx->delayed_ndp32)
+ goto error2;
+ }
dev_info(&intf->dev, "NDP will be placed at end of frame for this device.");
}
@@ -1055,7 +1066,7 @@ static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remai
/* return a pointer to a valid struct usb_cdc_ncm_ndp16 of type sign, possibly
* allocating a new one within skb
*/
-static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
+static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
{
struct usb_cdc_ncm_ndp16 *ndp16 = NULL;
struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data;
@@ -1110,12 +1121,73 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
return ndp16;
}
+static struct usb_cdc_ncm_ndp32 *cdc_ncm_ndp32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
+{
+ struct usb_cdc_ncm_ndp32 *ndp32 = NULL;
+ struct usb_cdc_ncm_nth32 *nth32 = (void *)skb->data;
+ size_t ndpoffset = le32_to_cpu(nth32->dwNdpIndex);
+
+ /* If NDP should be moved to the end of the NCM package, we can't follow the
+ * NTH32 header as we would normally do. NDP isn't written to the SKB yet, and
+ * the wNdpIndex field in the header is actually not consistent with reality. It will be later.
+ */
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
+ if (ctx->delayed_ndp32->dwSignature == sign)
+ return ctx->delayed_ndp32;
+
+ /* We can only push a single NDP to the end. Return
+ * NULL to send what we've already got and queue this
+ * skb for later.
+ */
+ else if (ctx->delayed_ndp32->dwSignature)
+ return NULL;
+ }
+
+ /* follow the chain of NDPs, looking for a match */
+ while (ndpoffset) {
+ ndp32 = (struct usb_cdc_ncm_ndp32 *)(skb->data + ndpoffset);
+ if (ndp32->dwSignature == sign)
+ return ndp32;
+ ndpoffset = le32_to_cpu(ndp32->dwNextNdpIndex);
+ }
+
+ /* align new NDP */
+ if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
+ cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size);
+
+ /* verify that there is room for the NDP and the datagram (reserve) */
+ if ((ctx->tx_curr_size - skb->len - reserve) < ctx->max_ndp_size)
+ return NULL;
+
+ /* link to it */
+ if (ndp32)
+ ndp32->dwNextNdpIndex = cpu_to_le32(skb->len);
+ else
+ nth32->dwNdpIndex = cpu_to_le32(skb->len);
+
+ /* push a new empty NDP */
+ if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
+ ndp32 = skb_put_zero(skb, ctx->max_ndp_size);
+ else
+ ndp32 = ctx->delayed_ndp32;
+
+ ndp32->dwSignature = sign;
+ ndp32->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp32) + sizeof(struct usb_cdc_ncm_dpe32));
+ return ndp32;
+}
+
struct sk_buff *
cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- struct usb_cdc_ncm_nth16 *nth16;
- struct usb_cdc_ncm_ndp16 *ndp16;
+ union {
+ struct usb_cdc_ncm_nth16 *nth16;
+ struct usb_cdc_ncm_nth32 *nth32;
+ } nth;
+ union {
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct usb_cdc_ncm_ndp32 *ndp32;
+ } ndp;
struct sk_buff *skb_out;
u16 n = 0, index, ndplen;
u8 ready2send = 0;
@@ -1179,11 +1251,19 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
}
ctx->tx_low_mem_val--;
}
- /* fill out the initial 16-bit NTB header */
- nth16 = skb_put_zero(skb_out, sizeof(struct usb_cdc_ncm_nth16));
- nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
- nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
- nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
+ if (ctx->is_ndp16) {
+ /* fill out the initial 16-bit NTB header */
+ nth.nth16 = skb_put_zero(skb_out, sizeof(struct usb_cdc_ncm_nth16));
+ nth.nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
+ nth.nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
+ nth.nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
+ } else {
+ /* fill out the initial 32-bit NTB header */
+ nth.nth32 = skb_put_zero(skb_out, sizeof(struct usb_cdc_ncm_nth32));
+ nth.nth32->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH32_SIGN);
+ nth.nth32->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth32));
+ nth.nth32->wSequence = cpu_to_le16(ctx->tx_seq++);
+ }
/* count total number of frames in this NTB */
ctx->tx_curr_frame_num = 0;
@@ -1205,13 +1285,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
}
/* get the appropriate NDP for this skb */
- ndp16 = cdc_ncm_ndp(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
+ if (ctx->is_ndp16)
+ ndp.ndp16 = cdc_ncm_ndp16(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
+ else
+ ndp.ndp32 = cdc_ncm_ndp32(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
/* align beginning of next frame */
cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_curr_size);
/* check if we had enough room left for both NDP and frame */
- if (!ndp16 || skb_out->len + skb->len + delayed_ndp_size > ctx->tx_curr_size) {
+ if ((ctx->is_ndp16 && !ndp.ndp16) || (!ctx->is_ndp16 && !ndp.ndp32) ||
+ skb_out->len + skb->len + delayed_ndp_size > ctx->tx_curr_size) {
if (n == 0) {
/* won't fit, MTU problem? */
dev_kfree_skb_any(skb);
@@ -1233,13 +1317,22 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
}
/* calculate frame number withing this NDP */
- ndplen = le16_to_cpu(ndp16->wLength);
- index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1;
+ if (ctx->is_ndp16) {
+ ndplen = le16_to_cpu(ndp.ndp16->wLength);
+ index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1;
+
+ /* OK, add this skb */
+ ndp.ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len);
+ ndp.ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
+ ndp.ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
+ } else {
+ ndplen = le16_to_cpu(ndp.ndp32->wLength);
+ index = (ndplen - sizeof(struct usb_cdc_ncm_ndp32)) / sizeof(struct usb_cdc_ncm_dpe32) - 1;
- /* OK, add this skb */
- ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len);
- ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
- ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
+ ndp.ndp32->dpe32[index].dwDatagramLength = cpu_to_le32(skb->len);
+ ndp.ndp32->dpe32[index].dwDatagramIndex = cpu_to_le32(skb_out->len);
+ ndp.ndp32->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe32));
+ }
skb_put_data(skb_out, skb->data, skb->len);
ctx->tx_curr_frame_payload += skb->len; /* count real tx payload data */
dev_kfree_skb_any(skb);
@@ -1286,13 +1379,22 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
/* If requested, put NDP at end of frame. */
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
- nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
- cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
- nth16->wNdpIndex = cpu_to_le16(skb_out->len);
- skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
+ if (ctx->is_ndp16) {
+ nth.nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+ cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
+ nth.nth16->wNdpIndex = cpu_to_le16(skb_out->len);
+ skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
+
+ /* Zero out delayed NDP - signature checking will naturally fail. */
+ ndp.ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size);
+ } else {
+ nth.nth32 = (struct usb_cdc_ncm_nth32 *)skb_out->data;
+ cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
+ nth.nth32->dwNdpIndex = cpu_to_le32(skb_out->len);
+ skb_put_data(skb_out, ctx->delayed_ndp32, ctx->max_ndp_size);
- /* Zero out delayed NDP - signature checking will naturally fail. */
- ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size);
+ ndp.ndp32 = memset(ctx->delayed_ndp32, 0, ctx->max_ndp_size);
+ }
}
/* If collected data size is less or equal ctx->min_tx_pkt
@@ -1314,8 +1416,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
}
/* set final frame length */
- nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
- nth16->wBlockLength = cpu_to_le16(skb_out->len);
+ if (ctx->is_ndp16) {
+ nth.nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+ nth.nth16->wBlockLength = cpu_to_le16(skb_out->len);
+ } else {
+ nth.nth32 = (struct usb_cdc_ncm_nth32 *)skb_out->data;
+ nth.nth32->dwBlockLength = cpu_to_le32(skb_out->len);
+ }
/* return skb */
ctx->tx_curr_skb = NULL;
@@ -1398,7 +1505,12 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
goto error;
spin_lock_bh(&ctx->mtx);
- skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
+
+ if (ctx->is_ndp16)
+ skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
+ else
+ skb_out = cdc_ncm_fill_tx_frame(dev, skb, cpu_to_le32(USB_CDC_NCM_NDP32_NOCRC_SIGN));
+
spin_unlock_bh(&ctx->mtx);
return skb_out;
@@ -1459,6 +1571,54 @@ error:
}
EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth16);
+int cdc_ncm_rx_verify_nth32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
+{
+ struct usbnet *dev = netdev_priv(skb_in->dev);
+ struct usb_cdc_ncm_nth32 *nth32;
+ int len;
+ int ret = -EINVAL;
+
+ if (ctx == NULL)
+ goto error;
+
+ if (skb_in->len < (sizeof(struct usb_cdc_ncm_nth32) +
+ sizeof(struct usb_cdc_ncm_ndp32))) {
+ netif_dbg(dev, rx_err, dev->net, "frame too short\n");
+ goto error;
+ }
+
+ nth32 = (struct usb_cdc_ncm_nth32 *)skb_in->data;
+
+ if (nth32->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH32_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid NTH32 signature <%#010x>\n",
+ le32_to_cpu(nth32->dwSignature));
+ goto error;
+ }
+
+ len = le32_to_cpu(nth32->dwBlockLength);
+ if (len > ctx->rx_max) {
+ netif_dbg(dev, rx_err, dev->net,
+ "unsupported NTB block length %u/%u\n", len,
+ ctx->rx_max);
+ goto error;
+ }
+
+ if ((ctx->rx_seq + 1) != le16_to_cpu(nth32->wSequence) &&
+ (ctx->rx_seq || le16_to_cpu(nth32->wSequence)) &&
+ !((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth32->wSequence))) {
+ netif_dbg(dev, rx_err, dev->net,
+ "sequence number glitch prev=%d curr=%d\n",
+ ctx->rx_seq, le16_to_cpu(nth32->wSequence));
+ }
+ ctx->rx_seq = le16_to_cpu(nth32->wSequence);
+
+ ret = le32_to_cpu(nth32->dwNdpIndex);
+error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth32);
+
/* verify NDP header and return number of datagrams, or negative error */
int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
{
@@ -1495,6 +1655,42 @@ error:
}
EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp16);
+/* verify NDP header and return number of datagrams, or negative error */
+int cdc_ncm_rx_verify_ndp32(struct sk_buff *skb_in, int ndpoffset)
+{
+ struct usbnet *dev = netdev_priv(skb_in->dev);
+ struct usb_cdc_ncm_ndp32 *ndp32;
+ int ret = -EINVAL;
+
+ if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp32)) > skb_in->len) {
+ netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n",
+ ndpoffset);
+ goto error;
+ }
+ ndp32 = (struct usb_cdc_ncm_ndp32 *)(skb_in->data + ndpoffset);
+
+ if (le16_to_cpu(ndp32->wLength) < USB_CDC_NCM_NDP32_LENGTH_MIN) {
+ netif_dbg(dev, rx_err, dev->net, "invalid DPT32 length <%u>\n",
+ le16_to_cpu(ndp32->wLength));
+ goto error;
+ }
+
+ ret = ((le16_to_cpu(ndp32->wLength) -
+ sizeof(struct usb_cdc_ncm_ndp32)) /
+ sizeof(struct usb_cdc_ncm_dpe32));
+ ret--; /* we process NDP entries except for the last one */
+
+ if ((sizeof(struct usb_cdc_ncm_ndp32) +
+ ret * (sizeof(struct usb_cdc_ncm_dpe32))) > skb_in->len) {
+ netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret);
+ ret = -EINVAL;
+ }
+
+error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp32);
+
int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
{
struct sk_buff *skb;
@@ -1503,34 +1699,66 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
int nframes;
int x;
int offset;
- struct usb_cdc_ncm_ndp16 *ndp16;
- struct usb_cdc_ncm_dpe16 *dpe16;
+ union {
+ struct usb_cdc_ncm_ndp16 *ndp16;
+ struct usb_cdc_ncm_ndp32 *ndp32;
+ } ndp;
+ union {
+ struct usb_cdc_ncm_dpe16 *dpe16;
+ struct usb_cdc_ncm_dpe32 *dpe32;
+ } dpe;
+
int ndpoffset;
int loopcount = 50; /* arbitrary max preventing infinite loop */
u32 payload = 0;
- ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
+ if (ctx->is_ndp16)
+ ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
+ else
+ ndpoffset = cdc_ncm_rx_verify_nth32(ctx, skb_in);
+
if (ndpoffset < 0)
goto error;
next_ndp:
- nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
- if (nframes < 0)
- goto error;
+ if (ctx->is_ndp16) {
+ nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
+ if (nframes < 0)
+ goto error;
- ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
+ ndp.ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
- if (ndp16->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN)) {
- netif_dbg(dev, rx_err, dev->net,
- "invalid DPT16 signature <%#010x>\n",
- le32_to_cpu(ndp16->dwSignature));
- goto err_ndp;
+ if (ndp.ndp16->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid DPT16 signature <%#010x>\n",
+ le32_to_cpu(ndp.ndp16->dwSignature));
+ goto err_ndp;
+ }
+ dpe.dpe16 = ndp.ndp16->dpe16;
+ } else {
+ nframes = cdc_ncm_rx_verify_ndp32(skb_in, ndpoffset);
+ if (nframes < 0)
+ goto error;
+
+ ndp.ndp32 = (struct usb_cdc_ncm_ndp32 *)(skb_in->data + ndpoffset);
+
+ if (ndp.ndp32->dwSignature != cpu_to_le32(USB_CDC_NCM_NDP32_NOCRC_SIGN)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "invalid DPT32 signature <%#010x>\n",
+ le32_to_cpu(ndp.ndp32->dwSignature));
+ goto err_ndp;
+ }
+ dpe.dpe32 = ndp.ndp32->dpe32;
}
- dpe16 = ndp16->dpe16;
- for (x = 0; x < nframes; x++, dpe16++) {
- offset = le16_to_cpu(dpe16->wDatagramIndex);
- len = le16_to_cpu(dpe16->wDatagramLength);
+ for (x = 0; x < nframes; x++) {
+ if (ctx->is_ndp16) {
+ offset = le16_to_cpu(dpe.dpe16->wDatagramIndex);
+ len = le16_to_cpu(dpe.dpe16->wDatagramLength);
+ } else {
+ offset = le32_to_cpu(dpe.dpe32->dwDatagramIndex);
+ len = le32_to_cpu(dpe.dpe32->dwDatagramLength);
+ }
/*
* CDC NCM ch. 3.7
@@ -1561,10 +1789,19 @@ next_ndp:
usbnet_skb_return(dev, skb);
payload += len; /* count payload bytes in this NTB */
}
+
+ if (ctx->is_ndp16)
+ dpe.dpe16++;
+ else
+ dpe.dpe32++;
}
err_ndp:
/* are there more NDPs to process? */
- ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
+ if (ctx->is_ndp16)
+ ndpoffset = le16_to_cpu(ndp.ndp16->wNextNdpIndex);
+ else
+ ndpoffset = le32_to_cpu(ndp.ndp32->dwNextNdpIndex);
+
if (ndpoffset && loopcount--)
goto next_ndp;
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index e15a472c6a54..099d84827004 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -77,11 +77,11 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
*/
drvflags |= CDC_NCM_FLAG_NDP_TO_END;
- /* Additionally, it has been reported that some Huawei E3372H devices, with
- * firmware version 21.318.01.00.541, come out of reset in NTB32 format mode, hence
- * needing to be set to the NTB16 one again.
+ /* For many Huawei devices the NTB32 mode is the default and the best mode
+ * they work with. Huawei E5785 and E5885 devices refuse to work in NTB16 mode at all.
*/
- drvflags |= CDC_NCM_FLAG_RESET_NTB16;
+ drvflags |= CDC_NCM_FLAG_PREFER_NTB32;
+
ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
if (ret)
goto err;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 95b19ce96513..8f8d9883d363 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -891,7 +891,7 @@ struct fw_block {
struct fw_header {
u8 checksum[32];
char version[RTL_VER_SIZE];
- struct fw_block blocks[0];
+ struct fw_block blocks[];
} __packed;
/**
@@ -930,7 +930,7 @@ struct fw_mac {
__le32 reserved;
__le16 fw_ver_reg;
u8 fw_ver_data;
- char info[0];
+ char info[];
} __packed;
/**
@@ -982,7 +982,7 @@ struct fw_phy_nc {
__le16 bp_start;
__le16 bp_num;
__le16 bp[4];
- char info[0];
+ char info[];
} __packed;
enum rtl_fw_type {
@@ -1948,29 +1948,6 @@ drop:
}
}
-/* msdn_giant_send_check()
- * According to the document of microsoft, the TCP Pseudo Header excludes the
- * packet length for IPv6 TCP large packets.
- */
-static int msdn_giant_send_check(struct sk_buff *skb)
-{
- const struct ipv6hdr *ipv6h;
- struct tcphdr *th;
- int ret;
-
- ret = skb_cow_head(skb, 0);
- if (ret)
- return ret;
-
- ipv6h = ipv6_hdr(skb);
- th = tcp_hdr(skb);
-
- th->check = 0;
- th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
-
- return ret;
-}
-
static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb)
{
if (skb_vlan_tag_present(skb)) {
@@ -2016,10 +1993,11 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
break;
case htons(ETH_P_IPV6):
- if (msdn_giant_send_check(skb)) {
+ if (skb_cow_head(skb, 0)) {
ret = TX_CSUM_TSO;
goto unavailable;
}
+ tcp_v6_gso_csum_prep(skb);
opts1 |= GTSENDV6;
break;
@@ -6375,6 +6353,7 @@ static int rtl8152_set_ringparam(struct net_device *netdev,
}
static const struct ethtool_ops ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = rtl8152_get_drvinfo,
.get_link = ethtool_op_get_link,
.nway_reset = rtl8152_nway_reset,
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index d4cbb9e8c63f..aece0e5eec8c 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -34,16 +34,23 @@
#define VETH_RING_SIZE 256
#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
-/* Separating two types of XDP xmit */
-#define VETH_XDP_TX BIT(0)
-#define VETH_XDP_REDIR BIT(1)
-
#define VETH_XDP_TX_BULK_SIZE 16
+struct veth_stats {
+ u64 rx_drops;
+ /* xdp */
+ u64 xdp_packets;
+ u64 xdp_bytes;
+ u64 xdp_redirect;
+ u64 xdp_drops;
+ u64 xdp_tx;
+ u64 xdp_tx_err;
+ u64 peer_tq_xdp_xmit;
+ u64 peer_tq_xdp_xmit_err;
+};
+
struct veth_rq_stats {
- u64 xdp_packets;
- u64 xdp_bytes;
- u64 xdp_drops;
+ struct veth_stats vs;
struct u64_stats_sync syncp;
};
@@ -80,16 +87,27 @@ struct veth_q_stat_desc {
size_t offset;
};
-#define VETH_RQ_STAT(m) offsetof(struct veth_rq_stats, m)
+#define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
{ "xdp_packets", VETH_RQ_STAT(xdp_packets) },
{ "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
+ { "drops", VETH_RQ_STAT(rx_drops) },
+ { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
{ "xdp_drops", VETH_RQ_STAT(xdp_drops) },
+ { "xdp_tx", VETH_RQ_STAT(xdp_tx) },
+ { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
};
#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
+static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
+ { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) },
+ { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
+};
+
+#define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
+
static struct {
const char string[ETH_GSTRING_LEN];
} ethtool_stats_keys[] = {
@@ -124,11 +142,19 @@ static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
for (i = 0; i < dev->real_num_rx_queues; i++) {
for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
snprintf(p, ETH_GSTRING_LEN,
- "rx_queue_%u_%.11s",
+ "rx_queue_%u_%.18s",
i, veth_rq_stats_desc[j].desc);
p += ETH_GSTRING_LEN;
}
}
+ for (i = 0; i < dev->real_num_tx_queues; i++) {
+ for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "tx_queue_%u_%.18s",
+ i, veth_tq_stats_desc[j].desc);
+ p += ETH_GSTRING_LEN;
+ }
+ }
break;
}
}
@@ -138,7 +164,8 @@ static int veth_get_sset_count(struct net_device *dev, int sset)
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(ethtool_stats_keys) +
- VETH_RQ_STATS_LEN * dev->real_num_rx_queues;
+ VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
+ VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
default:
return -EOPNOTSUPP;
}
@@ -147,7 +174,7 @@ static int veth_get_sset_count(struct net_device *dev, int sset)
static void veth_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
- struct veth_priv *priv = netdev_priv(dev);
+ struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct net_device *peer = rtnl_dereference(priv->peer);
int i, j, idx;
@@ -155,7 +182,7 @@ static void veth_get_ethtool_stats(struct net_device *dev,
idx = 1;
for (i = 0; i < dev->real_num_rx_queues; i++) {
const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
- const void *stats_base = (void *)rq_stats;
+ const void *stats_base = (void *)&rq_stats->vs;
unsigned int start;
size_t offset;
@@ -168,6 +195,26 @@ static void veth_get_ethtool_stats(struct net_device *dev,
} while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
idx += VETH_RQ_STATS_LEN;
}
+
+ if (!peer)
+ return;
+
+ rcv_priv = netdev_priv(peer);
+ for (i = 0; i < peer->real_num_rx_queues; i++) {
+ const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
+ const void *base = (void *)&rq_stats->vs;
+ unsigned int start, tx_idx = idx;
+ size_t offset;
+
+ tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
+ do {
+ start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
+ for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
+ offset = veth_tq_stats_desc[j].offset;
+ data[tx_idx + j] += *(u64 *)(base + offset);
+ }
+ } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
+ }
}
static const struct ethtool_ops veth_ethtool_ops = {
@@ -283,28 +330,34 @@ static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
return atomic64_read(&priv->dropped);
}
-static void veth_stats_rx(struct veth_rq_stats *result, struct net_device *dev)
+static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
int i;
+ result->peer_tq_xdp_xmit_err = 0;
result->xdp_packets = 0;
+ result->xdp_tx_err = 0;
result->xdp_bytes = 0;
- result->xdp_drops = 0;
+ result->rx_drops = 0;
for (i = 0; i < dev->num_rx_queues; i++) {
+ u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
struct veth_rq_stats *stats = &priv->rq[i].stats;
- u64 packets, bytes, drops;
unsigned int start;
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
- packets = stats->xdp_packets;
- bytes = stats->xdp_bytes;
- drops = stats->xdp_drops;
+ peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
+ xdp_tx_err = stats->vs.xdp_tx_err;
+ packets = stats->vs.xdp_packets;
+ bytes = stats->vs.xdp_bytes;
+ drops = stats->vs.rx_drops;
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
+ result->xdp_tx_err += xdp_tx_err;
result->xdp_packets += packets;
result->xdp_bytes += bytes;
- result->xdp_drops += drops;
+ result->rx_drops += drops;
}
}
@@ -313,7 +366,7 @@ static void veth_get_stats64(struct net_device *dev,
{
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer;
- struct veth_rq_stats rx;
+ struct veth_stats rx;
u64 packets, bytes;
tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
@@ -321,7 +374,8 @@ static void veth_get_stats64(struct net_device *dev,
tot->tx_packets = packets;
veth_stats_rx(&rx, dev);
- tot->rx_dropped = rx.xdp_drops;
+ tot->tx_dropped += rx.xdp_tx_err;
+ tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
tot->rx_bytes = rx.xdp_bytes;
tot->rx_packets = rx.xdp_packets;
@@ -333,6 +387,8 @@ static void veth_get_stats64(struct net_device *dev,
tot->rx_packets += packets;
veth_stats_rx(&rx, peer);
+ tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
+ tot->rx_dropped += rx.xdp_tx_err;
tot->tx_bytes += rx.xdp_bytes;
tot->tx_packets += rx.xdp_packets;
}
@@ -369,25 +425,22 @@ static int veth_select_rxq(struct net_device *dev)
}
static int veth_xdp_xmit(struct net_device *dev, int n,
- struct xdp_frame **frames, u32 flags)
+ struct xdp_frame **frames,
+ u32 flags, bool ndo_xmit)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ int i, ret = -ENXIO, drops = 0;
struct net_device *rcv;
- int i, ret, drops = n;
unsigned int max_len;
struct veth_rq *rq;
- rcu_read_lock();
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
- ret = -EINVAL;
- goto drop;
- }
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+ rcu_read_lock();
rcv = rcu_dereference(priv->peer);
- if (unlikely(!rcv)) {
- ret = -ENXIO;
- goto drop;
- }
+ if (unlikely(!rcv))
+ goto out;
rcv_priv = netdev_priv(rcv);
rq = &rcv_priv->rq[veth_select_rxq(rcv)];
@@ -395,12 +448,9 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
* side. This means an XDP program is loaded on the peer and the peer
* device is up.
*/
- if (!rcu_access_pointer(rq->xdp_prog)) {
- ret = -ENXIO;
- goto drop;
- }
+ if (!rcu_access_pointer(rq->xdp_prog))
+ goto out;
- drops = 0;
max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
spin_lock(&rq->xdp_ring.producer_lock);
@@ -419,59 +469,80 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
if (flags & XDP_XMIT_FLUSH)
__veth_xdp_flush(rq);
- if (likely(!drops)) {
- rcu_read_unlock();
- return n;
+ ret = n - drops;
+ if (ndo_xmit) {
+ u64_stats_update_begin(&rq->stats.syncp);
+ rq->stats.vs.peer_tq_xdp_xmit += n - drops;
+ rq->stats.vs.peer_tq_xdp_xmit_err += drops;
+ u64_stats_update_end(&rq->stats.syncp);
}
- ret = n - drops;
-drop:
+out:
rcu_read_unlock();
- atomic64_add(drops, &priv->dropped);
return ret;
}
-static void veth_xdp_flush_bq(struct net_device *dev, struct veth_xdp_tx_bq *bq)
+static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ int err;
+
+ err = veth_xdp_xmit(dev, n, frames, flags, true);
+ if (err < 0) {
+ struct veth_priv *priv = netdev_priv(dev);
+
+ atomic64_add(n, &priv->dropped);
+ }
+
+ return err;
+}
+
+static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
{
int sent, i, err = 0;
- sent = veth_xdp_xmit(dev, bq->count, bq->q, 0);
+ sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
if (sent < 0) {
err = sent;
sent = 0;
for (i = 0; i < bq->count; i++)
xdp_return_frame(bq->q[i]);
}
- trace_xdp_bulk_tx(dev, sent, bq->count - sent, err);
+ trace_xdp_bulk_tx(rq->dev, sent, bq->count - sent, err);
+
+ u64_stats_update_begin(&rq->stats.syncp);
+ rq->stats.vs.xdp_tx += sent;
+ rq->stats.vs.xdp_tx_err += bq->count - sent;
+ u64_stats_update_end(&rq->stats.syncp);
bq->count = 0;
}
-static void veth_xdp_flush(struct net_device *dev, struct veth_xdp_tx_bq *bq)
+static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
{
- struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
struct net_device *rcv;
- struct veth_rq *rq;
+ struct veth_rq *rcv_rq;
rcu_read_lock();
- veth_xdp_flush_bq(dev, bq);
+ veth_xdp_flush_bq(rq, bq);
rcv = rcu_dereference(priv->peer);
if (unlikely(!rcv))
goto out;
rcv_priv = netdev_priv(rcv);
- rq = &rcv_priv->rq[veth_select_rxq(rcv)];
+ rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
/* xdp_ring is initialized on receive side? */
- if (unlikely(!rcu_access_pointer(rq->xdp_prog)))
+ if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
goto out;
- __veth_xdp_flush(rq);
+ __veth_xdp_flush(rcv_rq);
out:
rcu_read_unlock();
}
-static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp,
+static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
struct veth_xdp_tx_bq *bq)
{
struct xdp_frame *frame = convert_to_xdp_frame(xdp);
@@ -480,7 +551,7 @@ static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp,
return -EOVERFLOW;
if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
- veth_xdp_flush_bq(dev, bq);
+ veth_xdp_flush_bq(rq, bq);
bq->q[bq->count++] = frame;
@@ -489,8 +560,8 @@ static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp,
static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
struct xdp_frame *frame,
- unsigned int *xdp_xmit,
- struct veth_xdp_tx_bq *bq)
+ struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
{
void *hard_start = frame->data - frame->headroom;
void *head = hard_start - sizeof(struct xdp_frame);
@@ -523,12 +594,13 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
orig_frame = *frame;
xdp.data_hard_start = head;
xdp.rxq->mem = frame->mem;
- if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) {
+ if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
frame = &orig_frame;
+ stats->rx_drops++;
goto err_xdp;
}
- *xdp_xmit |= VETH_XDP_TX;
+ stats->xdp_tx++;
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
@@ -537,9 +609,10 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
xdp.rxq->mem = frame->mem;
if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
frame = &orig_frame;
+ stats->rx_drops++;
goto err_xdp;
}
- *xdp_xmit |= VETH_XDP_REDIR;
+ stats->xdp_redirect++;
rcu_read_unlock();
goto xdp_xmit;
default:
@@ -549,6 +622,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
trace_xdp_exception(rq->dev, xdp_prog, act);
/* fall through */
case XDP_DROP:
+ stats->xdp_drops++;
goto err_xdp;
}
}
@@ -558,6 +632,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
skb = veth_build_skb(head, headroom, len, 0);
if (!skb) {
xdp_return_frame(frame);
+ stats->rx_drops++;
goto err;
}
@@ -573,9 +648,10 @@ xdp_xmit:
return NULL;
}
-static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
- unsigned int *xdp_xmit,
- struct veth_xdp_tx_bq *bq)
+static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
+ struct sk_buff *skb,
+ struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
{
u32 pktlen, headroom, act, metalen;
void *orig_data, *orig_data_end;
@@ -651,20 +727,23 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
get_page(virt_to_page(xdp.data));
consume_skb(skb);
xdp.rxq->mem = rq->xdp_mem;
- if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) {
+ if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
+ stats->rx_drops++;
goto err_xdp;
}
- *xdp_xmit |= VETH_XDP_TX;
+ stats->xdp_tx++;
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
get_page(virt_to_page(xdp.data));
consume_skb(skb);
xdp.rxq->mem = rq->xdp_mem;
- if (xdp_do_redirect(rq->dev, &xdp, xdp_prog))
+ if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
+ stats->rx_drops++;
goto err_xdp;
- *xdp_xmit |= VETH_XDP_REDIR;
+ }
+ stats->xdp_redirect++;
rcu_read_unlock();
goto xdp_xmit;
default:
@@ -674,7 +753,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
trace_xdp_exception(rq->dev, xdp_prog, act);
/* fall through */
case XDP_DROP:
- goto drop;
+ stats->xdp_drops++;
+ goto xdp_drop;
}
rcu_read_unlock();
@@ -696,6 +776,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
out:
return skb;
drop:
+ stats->rx_drops++;
+xdp_drop:
rcu_read_unlock();
kfree_skb(skb);
return NULL;
@@ -706,14 +788,14 @@ xdp_xmit:
return NULL;
}
-static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit,
- struct veth_xdp_tx_bq *bq)
+static int veth_xdp_rcv(struct veth_rq *rq, int budget,
+ struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
{
- int i, done = 0, drops = 0, bytes = 0;
+ int i, done = 0;
for (i = 0; i < budget; i++) {
void *ptr = __ptr_ring_consume(&rq->xdp_ring);
- unsigned int xdp_xmit_one = 0;
struct sk_buff *skb;
if (!ptr)
@@ -722,27 +804,26 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit,
if (veth_is_xdp_frame(ptr)) {
struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
- bytes += frame->len;
- skb = veth_xdp_rcv_one(rq, frame, &xdp_xmit_one, bq);
+ stats->xdp_bytes += frame->len;
+ skb = veth_xdp_rcv_one(rq, frame, bq, stats);
} else {
skb = ptr;
- bytes += skb->len;
- skb = veth_xdp_rcv_skb(rq, skb, &xdp_xmit_one, bq);
+ stats->xdp_bytes += skb->len;
+ skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
}
- *xdp_xmit |= xdp_xmit_one;
if (skb)
napi_gro_receive(&rq->xdp_napi, skb);
- else if (!xdp_xmit_one)
- drops++;
done++;
}
u64_stats_update_begin(&rq->stats.syncp);
- rq->stats.xdp_packets += done;
- rq->stats.xdp_bytes += bytes;
- rq->stats.xdp_drops += drops;
+ rq->stats.vs.xdp_redirect += stats->xdp_redirect;
+ rq->stats.vs.xdp_bytes += stats->xdp_bytes;
+ rq->stats.vs.xdp_drops += stats->xdp_drops;
+ rq->stats.vs.rx_drops += stats->rx_drops;
+ rq->stats.vs.xdp_packets += done;
u64_stats_update_end(&rq->stats.syncp);
return done;
@@ -752,14 +833,14 @@ static int veth_poll(struct napi_struct *napi, int budget)
{
struct veth_rq *rq =
container_of(napi, struct veth_rq, xdp_napi);
- unsigned int xdp_xmit = 0;
+ struct veth_stats stats = {};
struct veth_xdp_tx_bq bq;
int done;
bq.count = 0;
xdp_set_return_frame_no_direct();
- done = veth_xdp_rcv(rq, budget, &xdp_xmit, &bq);
+ done = veth_xdp_rcv(rq, budget, &bq, &stats);
if (done < budget && napi_complete_done(napi, done)) {
/* Write rx_notify_masked before reading ptr_ring */
@@ -770,9 +851,9 @@ static int veth_poll(struct napi_struct *napi, int budget)
}
}
- if (xdp_xmit & VETH_XDP_TX)
- veth_xdp_flush(rq->dev, &bq);
- if (xdp_xmit & VETH_XDP_REDIR)
+ if (stats.xdp_tx > 0)
+ veth_xdp_flush(rq, &bq);
+ if (stats.xdp_redirect > 0)
xdp_do_flush();
xdp_clear_return_frame_no_direct();
@@ -1158,7 +1239,7 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = veth_set_rx_headroom,
.ndo_bpf = veth_xdp,
- .ndo_xdp_xmit = veth_xdp_xmit,
+ .ndo_xdp_xmit = veth_ndo_xdp_xmit,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 2fe7a3188282..11f722460513 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -371,7 +371,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct receive_queue *rq,
struct page *page, unsigned int offset,
unsigned int len, unsigned int truesize,
- bool hdr_valid)
+ bool hdr_valid, unsigned int metasize)
{
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -393,6 +393,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
+ /* hdr_valid means no XDP, so we can copy the vnet header */
if (hdr_valid)
memcpy(hdr, p, hdr_len);
@@ -405,6 +406,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
copy = skb_tailroom(skb);
skb_put_data(skb, p, copy);
+ if (metasize) {
+ __skb_pull(skb, metasize);
+ skb_metadata_set(skb, metasize);
+ }
+
len -= copy;
offset += copy;
@@ -450,10 +456,6 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
struct virtio_net_hdr_mrg_rxbuf *hdr;
int err;
- /* virtqueue want to use data area in-front of packet */
- if (unlikely(xdpf->metasize > 0))
- return -EOPNOTSUPP;
-
if (unlikely(xdpf->headroom < vi->hdr_len))
return -EOVERFLOW;
@@ -644,6 +646,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
unsigned int delta = 0;
struct page *xdp_page;
int err;
+ unsigned int metasize = 0;
len -= vi->hdr_len;
stats->bytes += len;
@@ -683,8 +686,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
xdp.data = xdp.data_hard_start + xdp_headroom;
- xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
+ xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -695,6 +698,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
/* Recalculate length in case bpf program changed it */
delta = orig_data - xdp.data;
len = xdp.data_end - xdp.data;
+ metasize = xdp.data - xdp.data_meta;
break;
case XDP_TX:
stats->xdp_tx++;
@@ -735,10 +739,13 @@ static struct sk_buff *receive_small(struct net_device *dev,
}
skb_reserve(skb, headroom - delta);
skb_put(skb, len);
- if (!delta) {
+ if (!xdp_prog) {
buf += header_offset;
memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
- } /* keep zeroed vnet hdr since packet was changed by bpf */
+ } /* keep zeroed vnet hdr since XDP is loaded */
+
+ if (metasize)
+ skb_metadata_set(skb, metasize);
err:
return skb;
@@ -760,8 +767,8 @@ static struct sk_buff *receive_big(struct net_device *dev,
struct virtnet_rq_stats *stats)
{
struct page *page = buf;
- struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
- PAGE_SIZE, true);
+ struct sk_buff *skb =
+ page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0);
stats->bytes += len - vi->hdr_len;
if (unlikely(!skb))
@@ -793,6 +800,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
unsigned int truesize;
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
int err;
+ unsigned int metasize = 0;
head_skb = NULL;
stats->bytes += len - vi->hdr_len;
@@ -839,8 +847,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
data = page_address(xdp_page) + offset;
xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
xdp.data = data + vi->hdr_len;
- xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + (len - vi->hdr_len);
+ xdp.data_meta = xdp.data;
xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -848,24 +856,27 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
switch (act) {
case XDP_PASS:
+ metasize = xdp.data - xdp.data_meta;
+
/* recalculate offset to account for any header
- * adjustments. Note other cases do not build an
- * skb and avoid using offset
+ * adjustments and minus the metasize to copy the
+ * metadata in page_to_skb(). Note other cases do not
+ * build an skb and avoid using offset
*/
- offset = xdp.data -
- page_address(xdp_page) - vi->hdr_len;
+ offset = xdp.data - page_address(xdp_page) -
+ vi->hdr_len - metasize;
- /* recalculate len if xdp.data or xdp.data_end were
- * adjusted
+ /* recalculate len if xdp.data, xdp.data_end or
+ * xdp.data_meta were adjusted
*/
- len = xdp.data_end - xdp.data + vi->hdr_len;
+ len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
/* We can only create skb based on xdp_page. */
if (unlikely(xdp_page != page)) {
rcu_read_unlock();
put_page(page);
- head_skb = page_to_skb(vi, rq, xdp_page,
- offset, len,
- PAGE_SIZE, false);
+ head_skb = page_to_skb(vi, rq, xdp_page, offset,
+ len, PAGE_SIZE, false,
+ metasize);
return head_skb;
}
break;
@@ -921,7 +932,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_skb;
}
- head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog);
+ head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
+ metasize);
curr_skb = head_skb;
if (unlikely(!curr_skb))
@@ -2166,48 +2178,13 @@ static void virtnet_get_channels(struct net_device *dev,
channels->other_count = 0;
}
-/* Check if the user is trying to change anything besides speed/duplex */
-static bool
-virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd)
-{
- struct ethtool_link_ksettings diff1 = *cmd;
- struct ethtool_link_ksettings diff2 = {};
-
- /* cmd is always set so we need to clear it, validate the port type
- * and also without autonegotiation we can ignore advertising
- */
- diff1.base.speed = 0;
- diff2.base.port = PORT_OTHER;
- ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
- diff1.base.duplex = 0;
- diff1.base.cmd = 0;
- diff1.base.link_mode_masks_nwords = 0;
-
- return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) &&
- bitmap_empty(diff1.link_modes.supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS) &&
- bitmap_empty(diff1.link_modes.advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS) &&
- bitmap_empty(diff1.link_modes.lp_advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
static int virtnet_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct virtnet_info *vi = netdev_priv(dev);
- u32 speed;
-
- speed = cmd->base.speed;
- /* don't allow custom speed and duplex */
- if (!ethtool_validate_speed(speed) ||
- !ethtool_validate_duplex(cmd->base.duplex) ||
- !virtnet_validate_ethtool_cmd(cmd))
- return -EINVAL;
- vi->speed = speed;
- vi->duplex = cmd->base.duplex;
- return 0;
+ return ethtool_virtdev_set_link_ksettings(dev, cmd,
+ &vi->speed, &vi->duplex);
}
static int virtnet_get_link_ksettings(struct net_device *dev,
@@ -2225,23 +2202,14 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
static int virtnet_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
- struct ethtool_coalesce ec_default = {
- .cmd = ETHTOOL_SCOALESCE,
- .rx_max_coalesced_frames = 1,
- };
struct virtnet_info *vi = netdev_priv(dev);
int i, napi_weight;
- if (ec->tx_max_coalesced_frames > 1)
+ if (ec->tx_max_coalesced_frames > 1 ||
+ ec->rx_max_coalesced_frames != 1)
return -EINVAL;
- ec_default.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
-
- /* disallow changes to fields not explicitly tested above */
- if (memcmp(ec, &ec_default, sizeof(ec_default)))
- return -EINVAL;
-
if (napi_weight ^ vi->sq[0].napi.weight) {
if (dev->flags & IFF_UP)
return -EBUSY;
@@ -2296,6 +2264,7 @@ static void virtnet_update_settings(struct virtnet_info *vi)
}
static const struct ethtool_ops virtnet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 18f152fa0068..722cb054a5cd 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -942,10 +942,7 @@ vmxnet3_prepare_tso(struct sk_buff *skb,
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
IPPROTO_TCP, 0);
} else if (ctx->ipv6) {
- struct ipv6hdr *iph = ipv6_hdr(skb);
-
- tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
- IPPROTO_TCP, 0);
+ tcp_v6_gso_csum_prep(skb);
}
}
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 1e4b9ba70983..6528940ce5f3 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -780,27 +780,6 @@ vmxnet3_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
if (!VMXNET3_VERSION_GE_3(adapter))
return -EOPNOTSUPP;
- if (ec->rx_coalesce_usecs_irq ||
- ec->rx_max_coalesced_frames_irq ||
- ec->tx_coalesce_usecs ||
- ec->tx_coalesce_usecs_irq ||
- ec->tx_max_coalesced_frames_irq ||
- ec->stats_block_coalesce_usecs ||
- ec->use_adaptive_tx_coalesce ||
- ec->pkt_rate_low ||
- ec->rx_coalesce_usecs_low ||
- ec->rx_max_coalesced_frames_low ||
- ec->tx_coalesce_usecs_low ||
- ec->tx_max_coalesced_frames_low ||
- ec->pkt_rate_high ||
- ec->rx_coalesce_usecs_high ||
- ec->rx_max_coalesced_frames_high ||
- ec->tx_coalesce_usecs_high ||
- ec->tx_max_coalesced_frames_high ||
- ec->rate_sample_interval) {
- return -EINVAL;
- }
-
if ((ec->rx_coalesce_usecs == 0) &&
(ec->use_adaptive_rx_coalesce == 0) &&
(ec->tx_max_coalesced_frames == 0) &&
@@ -891,6 +870,9 @@ done:
}
static const struct ethtool_ops vmxnet3_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = vmxnet3_get_drvinfo,
.get_regs_len = vmxnet3_get_regs_len,
.get_regs = vmxnet3_get_regs,
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index b8228f50bc94..66e00ddc0d42 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -504,7 +504,7 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
static int vrf_rt6_create(struct net_device *dev)
{
- int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM;
+ int flags = DST_NOPOLICY | DST_NOXFRM;
struct net_vrf *vrf = netdev_priv(dev);
struct net *net = dev_net(dev);
struct rt6_info *rt6;
@@ -739,7 +739,7 @@ static int vrf_rtable_create(struct net_device *dev)
return -ENOMEM;
/* create a dst for routing packets out through a VRF device */
- rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
+ rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1);
if (!rth)
return -ENOMEM;
diff --git a/drivers/net/wan/farsync.h b/drivers/net/wan/farsync.h
index 47b8e36f97ab..5f43568a9715 100644
--- a/drivers/net/wan/farsync.h
+++ b/drivers/net/wan/farsync.h
@@ -65,7 +65,7 @@
struct fstioc_write {
unsigned int size;
unsigned int offset;
- unsigned char data[0];
+ unsigned char data[];
};
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 23f93f1c815d..499f7cd19a4a 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -78,7 +78,7 @@ struct card {
struct sk_buff *rx_skbs[RX_QUEUE_LENGTH];
struct card_status *status; /* shared between host and card */
dma_addr_t status_address;
- struct port ports[0]; /* 1 - 4 port structures follow */
+ struct port ports[]; /* 1 - 4 port structures follow */
};
diff --git a/drivers/net/wireless/admtek/adm8211.h b/drivers/net/wireless/admtek/adm8211.h
index 2c55c629de28..095625ecb8ff 100644
--- a/drivers/net/wireless/admtek/adm8211.h
+++ b/drivers/net/wireless/admtek/adm8211.h
@@ -531,7 +531,7 @@ struct adm8211_eeprom {
u8 lpf_cutoff[14]; /* 0x62 */
u8 lnags_threshold[14]; /* 0x70 */
__le16 checksum; /* 0x7E */
- u8 cis_data[0]; /* 0x80, 384 bytes */
+ u8 cis_data[]; /* 0x80, 384 bytes */
} __packed;
struct adm8211_priv {
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index ed87bc00f2aa..342a7e58018a 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -459,7 +459,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
ar_ahb->mem_len = resource_size(res);
ar_ahb->gcc_mem = ioremap(ATH10K_GCC_REG_BASE,
- ATH10K_GCC_REG_SIZE);
+ ATH10K_GCC_REG_SIZE);
if (!ar_ahb->gcc_mem) {
ath10k_err(ar, "gcc mem ioremap error\n");
ret = -ENOMEM;
@@ -467,7 +467,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
}
ar_ahb->tcsr_mem = ioremap(ATH10K_TCSR_REG_BASE,
- ATH10K_TCSR_REG_SIZE);
+ ATH10K_TCSR_REG_SIZE);
if (!ar_ahb->tcsr_mem) {
ath10k_err(ar, "tcsr mem ioremap error\n");
ret = -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index a202a4eea76a..f26cc6989dad 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -541,6 +541,33 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
},
{
+ .id = QCA9377_HW_1_1_DEV_VERSION,
+ .dev_id = QCA9377_1_0_DEVICE_ID,
+ .bus = ATH10K_BUS_SDIO,
+ .name = "qca9377 hw1.1 sdio",
+ .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 19,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
+ .fw = {
+ .dir = QCA9377_HW_1_0_FW_DIR,
+ .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
+ .board_size = QCA9377_BOARD_DATA_SZ,
+ .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
+ },
+ .hw_ops = &qca6174_ops,
+ .hw_clk = qca6174_clk,
+ .target_cpu_freq = 176000000,
+ .decap_align_bytes = 4,
+ .n_cipher_suites = 8,
+ .num_peers = TARGET_QCA9377_HL_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .uart_pin_workaround = true,
+ },
+ {
.id = QCA4019_HW_1_0_DEV_VERSION,
.dev_id = 0,
.bus = ATH10K_BUS_AHB,
@@ -874,6 +901,13 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
return -ENODATA;
}
+ if (ar->id.bmi_ids_valid) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot already acquired valid otp board id,skip download, board_id %d chip_id %d\n",
+ ar->id.bmi_board_id, ar->id.bmi_chip_id);
+ goto skip_otp_download;
+ }
+
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot upload otp to 0x%x len %zd for board id\n",
address, ar->normal_mode_fw.fw_file.otp_len);
@@ -921,6 +955,8 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
ar->id.bmi_board_id = board_id;
ar->id.bmi_chip_id = chip_id;
+skip_otp_download:
+
return 0;
}
@@ -2119,6 +2155,40 @@ done:
return 0;
}
+static void ath10k_core_fetch_btcoex_dt(struct ath10k *ar)
+{
+ struct device_node *node;
+ u8 coex_support = 0;
+ int ret;
+
+ node = ar->dev->of_node;
+ if (!node)
+ goto out;
+
+ ret = of_property_read_u8(node, "qcom,coexist-support", &coex_support);
+ if (ret) {
+ ar->coex_support = true;
+ goto out;
+ }
+
+ if (coex_support) {
+ ar->coex_support = true;
+ } else {
+ ar->coex_support = false;
+ ar->coex_gpio_pin = -1;
+ goto out;
+ }
+
+ ret = of_property_read_u32(node, "qcom,coexist-gpio-pin",
+ &ar->coex_gpio_pin);
+ if (ret)
+ ar->coex_gpio_pin = -1;
+
+out:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot coex_support %d coex_gpio_pin %d\n",
+ ar->coex_support, ar->coex_gpio_pin);
+}
+
static int ath10k_init_uart(struct ath10k *ar)
{
int ret;
@@ -2696,14 +2766,22 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
val |= WMI_10_4_BSS_CHANNEL_INFO_64;
+ ath10k_core_fetch_btcoex_dt(ar);
+
/* 10.4 firmware supports BT-Coex without reloading firmware
* via pdev param. To support Bluetooth coexistence pdev param,
* WMI_COEX_GPIO_SUPPORT of extended resource config should be
* enabled always.
+ *
+ * We can still enable BTCOEX if firmware has the support
+ * eventhough btceox_support value is
+ * ATH10K_DT_BTCOEX_NOT_FOUND
*/
+
if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
- ar->running_fw->fw_file.fw_features))
+ ar->running_fw->fw_file.fw_features) &&
+ ar->coex_support)
val |= WMI_10_4_COEX_GPIO_SUPPORT;
if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
@@ -2863,6 +2941,8 @@ void ath10k_core_stop(struct ath10k *ar)
ath10k_htt_tx_stop(&ar->htt);
ath10k_htt_rx_free(&ar->htt);
ath10k_wmi_detach(ar);
+
+ ar->id.bmi_ids_valid = false;
}
EXPORT_SYMBOL(ath10k_core_stop);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 5101bf2b5b15..bd8ef576c590 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -119,6 +119,7 @@ struct ath10k_skb_cb {
u16 airtime_est;
struct ieee80211_vif *vif;
struct ieee80211_txq *txq;
+ u32 ucast_cipher;
} __packed;
struct ath10k_skb_rxcb {
@@ -504,6 +505,7 @@ struct ath10k_sta {
struct work_struct update_wk;
u64 rx_duration;
struct ath10k_htt_tx_stats *tx_stats;
+ u32 ucast_cipher;
#ifdef CONFIG_MAC80211_DEBUGFS
/* protected by conf_mutex */
@@ -1222,6 +1224,9 @@ struct ath10k {
struct ath10k_bus_params bus_param;
struct completion peer_delete_done;
+ bool coex_support;
+ int coex_gpio_pin;
+
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index e000677ac516..f811e6940fb0 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1978,6 +1978,9 @@ static ssize_t ath10k_write_btcoex(struct file *file,
if (strtobool(buf, &val) != 0)
return -EINVAL;
+ if (!ar->coex_support)
+ return -EOPNOTSUPP;
+
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_ON &&
@@ -2370,9 +2373,6 @@ static ssize_t ath10k_write_warm_hw_reset(struct file *file,
goto exit;
}
- if (!(test_bit(WMI_SERVICE_RESET_CHIP, ar->wmi.svc_map)))
- ath10k_warn(ar, "wmi service for reset chip is not available\n");
-
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pdev_reset,
WMI_RST_MODE_WARM_RESET);
@@ -2647,8 +2647,10 @@ int ath10k_debug_register(struct ath10k *ar)
ar->debug.debugfs_phy, ar,
&fops_tpc_stats_final);
- debugfs_create_file("warm_hw_reset", 0600, ar->debug.debugfs_phy, ar,
- &fops_warm_hw_reset);
+ if (test_bit(WMI_SERVICE_RESET_CHIP, ar->wmi.svc_map))
+ debugfs_create_file("warm_hw_reset", 0600,
+ ar->debug.debugfs_phy, ar,
+ &fops_warm_hw_reset);
debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_phy, ar,
&fops_ps_state_enable);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 38a5814cf345..f883f2a724dd 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2744,7 +2744,8 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
continue;
}
- tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0);
+ tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) &
+ IEEE80211_QOS_CTL_TID_MASK;
tx_duration = __le32_to_cpu(ppdu_dur->tx_duration);
ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index a182c0944cc7..e9d12ea708b6 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -1163,6 +1163,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
int len = 0;
int msdu_id = -1;
int res;
+ const u8 *peer_addr;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
len += sizeof(cmd->hdr);
@@ -1178,7 +1179,16 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
- skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ peer_addr = hdr->addr1;
+ if (is_multicast_ether_addr(peer_addr)) {
+ skb_put(msdu, sizeof(struct ieee80211_mmie_16));
+ } else {
+ if (skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||
+ skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256)
+ skb_put(msdu, IEEE80211_GCMP_MIC_LEN);
+ else
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
}
txdesc = ath10k_htc_alloc_skb(ar, len);
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 2451e0fb8ee5..57c58af64a57 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -1131,6 +1131,7 @@ static int ath10k_get_htt_tx_data_rssi_pad(struct htt_resp *resp)
const struct ath10k_hw_ops qca988x_ops = {
.set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
+ .is_rssi_enable = ath10k_htt_tx_rssi_enable,
};
static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 775fd62fb92d..970c736ac6bb 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -774,6 +774,9 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
#define TARGET_HL_TLV_AST_SKID_LIMIT 16
#define TARGET_HL_TLV_NUM_WDS_ENTRIES 2
+/* Target specific defines for QCA9377 high latency firmware */
+#define TARGET_QCA9377_HL_NUM_PEERS 15
+
/* Diagnostic Window */
#define CE_DIAG_PIPE 7
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 7fee35ff966b..2d03b8dd3b8c 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -258,6 +258,7 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_GCM];
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
@@ -3576,6 +3577,7 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
struct ieee80211_vif *vif,
struct ieee80211_txq *txq,
+ struct ieee80211_sta *sta,
struct sk_buff *skb, u16 airtime)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -3583,6 +3585,7 @@ static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool is_data = ieee80211_is_data(hdr->frame_control) ||
ieee80211_is_data_qos(hdr->frame_control);
+ struct ath10k_sta *arsta;
cb->flags = 0;
if (!ath10k_tx_h_use_hwcrypto(vif, skb))
@@ -3607,6 +3610,12 @@ static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
cb->vif = vif;
cb->txq = txq;
cb->airtime_est = airtime;
+ if (sta) {
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ spin_lock_bh(&ar->data_lock);
+ cb->ucast_cipher = arsta->ucast_cipher;
+ spin_unlock_bh(&ar->data_lock);
+ }
}
bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
@@ -4078,7 +4087,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
}
airtime = ath10k_mac_update_airtime(ar, txq, skb);
- ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb, airtime);
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, sta, skb, airtime);
skb_len = skb->len;
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
@@ -4348,7 +4357,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
u16 airtime;
airtime = ath10k_mac_update_airtime(ar, txq, skb);
- ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb, airtime);
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, sta, skb, airtime);
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
@@ -4982,7 +4991,8 @@ static int ath10k_start(struct ieee80211_hw *hw)
param = ar->wmi.pdev_param->enable_btcoex;
if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
- ar->running_fw->fw_file.fw_features)) {
+ ar->running_fw->fw_file.fw_features) &&
+ ar->coex_support) {
ret = ath10k_wmi_pdev_set_param(ar, param, 0);
if (ret) {
ath10k_warn(ar,
@@ -5103,7 +5113,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
- if (arvif->txpower <= 0)
+ /* txpower not initialized yet? */
+ if (arvif->txpower == INT_MIN)
continue;
if (txpower == -1)
@@ -6195,6 +6206,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_sta *arsta;
struct ath10k_peer *peer;
const u8 *peer_addr;
bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
@@ -6219,12 +6231,17 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mutex_lock(&ar->conf_mutex);
- if (sta)
+ if (sta) {
+ arsta = (struct ath10k_sta *)sta->drv_priv;
peer_addr = sta->addr;
- else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ spin_lock_bh(&ar->data_lock);
+ arsta->ucast_cipher = key->cipher;
+ spin_unlock_bh(&ar->data_lock);
+ } else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
peer_addr = vif->bss_conf.bssid;
- else
+ } else {
peer_addr = vif->addr;
+ }
key->hw_key_idx = key->keyidx;
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index e5316b911e1d..1f709b65c29b 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -694,7 +694,7 @@ static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)
htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);
pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
- if (pkt->act_len > pkt->alloc_len ) {
+ if (pkt->act_len > pkt->alloc_len) {
ret = -EINVAL;
goto err;
}
@@ -953,8 +953,11 @@ static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,
*/
ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
irq_proc_reg, sizeof(*irq_proc_reg));
- if (ret)
+ if (ret) {
+ queue_work(ar->workqueue, &ar->restart_work);
+ ath10k_warn(ar, "read int status fail, start recovery\n");
goto out;
+ }
/* Update only those registers that are enabled */
*host_int_status = irq_proc_reg->host_int_status &
@@ -1647,23 +1650,33 @@ static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
size_t buf_len)
{
int ret;
+ void *mem;
+
+ mem = kzalloc(buf_len, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
/* set window register to start read cycle */
ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
if (ret) {
ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
- return ret;
+ goto out;
}
/* read the data */
- ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len);
+ ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);
if (ret) {
ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
ret);
- return ret;
+ goto out;
}
- return 0;
+ memcpy(buf, mem, buf_len);
+
+out:
+ kfree(mem);
+
+ return ret;
}
static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 61885d4d662c..2ea77bb880b1 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1926,6 +1926,7 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
u32 vdev_id;
u32 buf_len = msdu->len;
u16 fc;
+ const u8 *peer_addr;
hdr = (struct ieee80211_hdr *)msdu->data;
fc = le16_to_cpu(hdr->frame_control);
@@ -1946,8 +1947,20 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
- len += IEEE80211_CCMP_MIC_LEN;
- buf_len += IEEE80211_CCMP_MIC_LEN;
+ peer_addr = hdr->addr1;
+ if (is_multicast_ether_addr(peer_addr)) {
+ len += sizeof(struct ieee80211_mmie_16);
+ buf_len += sizeof(struct ieee80211_mmie_16);
+ } else {
+ if (cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||
+ cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256) {
+ len += IEEE80211_GCMP_MIC_LEN;
+ buf_len += IEEE80211_GCMP_MIC_LEN;
+ } else {
+ len += IEEE80211_CCMP_MIC_LEN;
+ buf_len += IEEE80211_CCMP_MIC_LEN;
+ }
+ }
}
len = round_up(len, 4);
@@ -8787,7 +8800,7 @@ ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
cmd->host_platform_config = __cpu_to_le32(type);
cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
- cmd->wlan_gpio_priority = __cpu_to_le32(-1);
+ cmd->wlan_gpio_priority = __cpu_to_le32(ar->coex_gpio_pin);
cmd->coex_version = __cpu_to_le32(WMI_NO_COEX_VERSION_SUPPORT);
cmd->coex_gpio_pin1 = __cpu_to_le32(-1);
cmd->coex_gpio_pin2 = __cpu_to_le32(-1);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 972d53d77654..6df415778374 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -371,6 +371,11 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
WMI_10_4_SERVICE_REPORT_AIRTIME,
WMI_10_4_SERVICE_TX_PWR_PER_PEER,
+ WMI_10_4_SERVICE_FETCH_PEER_TX_PN,
+ WMI_10_4_SERVICE_MULTIPLE_VDEV_RESTART,
+ WMI_10_4_SERVICE_ENHANCED_RADIO_COUNTERS,
+ WMI_10_4_SERVICE_QINQ_SUPPORT,
+ WMI_10_4_SERVICE_RESET_CHIP,
};
static inline char *wmi_service_name(enum wmi_service service_id)
@@ -827,6 +832,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_REPORT_AIRTIME, len);
SVCMAP(WMI_10_4_SERVICE_TX_PWR_PER_PEER,
WMI_SERVICE_TX_PWR_PER_PEER, len);
+ SVCMAP(WMI_10_4_SERVICE_RESET_CHIP,
+ WMI_SERVICE_RESET_CHIP, len);
}
#undef SVCMAP
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
index c88e16d4022b..738f99090d83 100644
--- a/drivers/net/wireless/ath/ath11k/Kconfig
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -3,6 +3,7 @@ config ATH11K
tristate "Qualcomm Technologies 802.11ax chipset support"
depends on MAC80211 && HAS_DMA
depends on REMOTEPROC
+ depends on CRYPTO_MICHAEL_MIC
depends on ARCH_QCOM || COMPILE_TEST
select ATH_COMMON
select QCOM_QMI_HELPERS
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
index 2761d07d938e..fe7736e53583 100644
--- a/drivers/net/wireless/ath/ath11k/Makefile
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -20,6 +20,7 @@ ath11k-y += core.o \
ath11k-$(CONFIG_ATH11K_DEBUGFS) += debug_htt_stats.o debugfs_sta.o
ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
+ath11k-$(CONFIG_THERMAL) += thermal.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index e7e3e64c07aa..59342d2797ca 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -458,7 +458,6 @@ static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
{
- struct sk_buff *skb;
int i;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
@@ -468,9 +467,6 @@ static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
napi_synchronize(&irq_grp->napi);
napi_disable(&irq_grp->napi);
-
- while ((skb = __skb_dequeue(&irq_grp->pending_q)))
- dev_kfree_skb_any(skb);
}
}
@@ -681,6 +677,9 @@ static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
{
struct ath11k_ce_pipe *ce_pipe = arg;
+ /* last interrupt received for this CE */
+ ce_pipe->timestamp = jiffies;
+
ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
tasklet_schedule(&ce_pipe->intr_tq);
@@ -712,6 +711,9 @@ static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
{
struct ath11k_ext_irq_grp *irq_grp = arg;
+ /* last interrupt received for this group */
+ irq_grp->timestamp = jiffies;
+
ath11k_ahb_ext_grp_disable(irq_grp);
napi_schedule(&irq_grp->napi);
@@ -734,7 +736,6 @@ static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
init_dummy_netdev(&irq_grp->napi_ndev);
netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
ath11k_ahb_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
- __skb_queue_head_init(&irq_grp->pending_q);
for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
if (ath11k_tx_ring_mask[i] & BIT(j)) {
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
index e355dfda48bf..688f357e6eaf 100644
--- a/drivers/net/wireless/ath/ath11k/ce.h
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -161,6 +161,7 @@ struct ath11k_ce_pipe {
struct ath11k_ce_ring *src_ring;
struct ath11k_ce_ring *dest_ring;
struct ath11k_ce_ring *status_ring;
+ u64 timestamp;
};
struct ath11k_ce {
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 9e823056e673..bf5657d2ae18 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -392,11 +392,19 @@ static int ath11k_core_pdev_create(struct ath11k_base *ab)
goto err_mac_unregister;
}
+ ret = ath11k_thermal_register(ab);
+ if (ret) {
+ ath11k_err(ab, "could not register thermal device: %d\n",
+ ret);
+ goto err_dp_pdev_free;
+ }
+
return 0;
+err_dp_pdev_free:
+ ath11k_dp_pdev_free(ab);
err_mac_unregister:
ath11k_mac_unregister(ab);
-
err_pdev_debug:
ath11k_debug_pdev_destroy(ab);
@@ -405,6 +413,7 @@ err_pdev_debug:
static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
{
+ ath11k_thermal_unregister(ab);
ath11k_mac_unregister(ab);
ath11k_ahb_ext_irq_disable(ab);
ath11k_dp_pdev_free(ab);
@@ -569,6 +578,7 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
int ret;
mutex_lock(&ab->core_lock);
+ ath11k_thermal_unregister(ab);
ath11k_ahb_ext_irq_disable(ab);
ath11k_dp_pdev_free(ab);
ath11k_ahb_stop(ab);
@@ -607,6 +617,7 @@ void ath11k_core_halt(struct ath11k *ar)
lockdep_assert_held(&ar->conf_mutex);
ar->num_created_vdevs = 0;
+ ar->allocated_vdev_map = 0;
ath11k_mac_scan_finish(ar);
ath11k_mac_peer_cleanup_all(ar);
@@ -644,6 +655,7 @@ static void ath11k_core_restart(struct work_struct *work)
complete(&ar->install_key_done);
complete(&ar->vdev_setup_done);
complete(&ar->bss_survey_done);
+ complete(&ar->thermal.wmi_sync);
wake_up(&ar->dp.tx_empty_waitq);
idr_for_each(&ar->txmgmt_idr,
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 25cdcf71d0c4..6e7b8ecd09a6 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -20,6 +20,7 @@
#include "hw.h"
#include "hal_rx.h"
#include "reg.h"
+#include "thermal.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -76,6 +77,8 @@ struct ath11k_skb_rxcb {
u8 err_code;
u8 mac_id;
u8 unmapped;
+ u8 is_frag;
+ u8 tid;
};
enum ath11k_hw_rev {
@@ -108,12 +111,9 @@ struct ath11k_ext_irq_grp {
u32 irqs[ATH11K_EXT_IRQ_NUM_MAX];
u32 num_irq;
u32 grp_id;
+ u64 timestamp;
struct napi_struct napi;
struct net_device napi_ndev;
- /* Queue of pending packets, not expected to be accessed concurrently
- * to avoid locking overhead.
- */
- struct sk_buff_head pending_q;
};
#define HEHANDLE_CAP_PHYINFO_SIZE 3
@@ -243,6 +243,8 @@ struct ath11k_rx_peer_stats {
u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
u64 reception_type[HAL_RX_RECEPTION_TYPE_MAX];
u64 rx_duration;
+ u64 dcm_count;
+ u64 ru_alloc_cnt[HAL_RX_RU_ALLOC_TYPE_MAX];
};
#define ATH11K_HE_MCS_NUM 12
@@ -329,9 +331,9 @@ struct ath11k_sta {
u32 bw;
u32 nss;
u32 smps;
+ enum hal_pn_type pn_type;
struct work_struct update_wk;
- struct ieee80211_tx_info tx_info;
struct rate_info txrate;
struct rate_info last_txrate;
u64 rx_duration;
@@ -486,6 +488,7 @@ struct ath11k {
int max_num_peers;
u32 num_started_vdevs;
u32 num_created_vdevs;
+ unsigned long long allocated_vdev_map;
struct idr txmgmt_idr;
/* protects txmgmt_idr data */
@@ -524,6 +527,7 @@ struct ath11k {
struct ath11k_debug debug;
#endif
bool dfs_block_radar_events;
+ struct ath11k_thermal thermal;
};
struct ath11k_band_cap {
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
index 8e8d5588b541..97e7306c506d 100644
--- a/drivers/net/wireless/ath/ath11k/debug.h
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -53,6 +53,8 @@ enum ath11k_dbg_htt_ext_stats_type {
ATH11K_DBG_HTT_EXT_STATS_TWT_SESSIONS = 20,
ATH11K_DBG_HTT_EXT_STATS_REO_RESOURCE_STATS = 21,
ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
+ ATH11K_DBG_HTT_EXT_STATS_RING_BACKPRESSURE_STATS = 24,
/* keep this last */
ATH11K_DBG_HTT_NUM_EXT_STATS,
@@ -68,12 +70,19 @@ struct debug_htt_stats_req {
u8 buf[0];
};
-#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
+struct ath_pktlog_hdr {
+ u16 flags;
+ u16 missed_cnt;
+ u16 log_type;
+ u16 size;
+ u32 timestamp;
+ u32 type_specific_data;
+ u8 payload[0];
+};
+#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
#define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024)
-#define ATH11K_HTT_PKTLOG_MAX_SIZE 2048
-
enum ath11k_pktlog_filter {
ATH11K_PKTLOG_RX = 0x000000001,
ATH11K_PKTLOG_TX = 0x000000002,
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.c b/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
index 9939e909628f..5db0c27de475 100644
--- a/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
@@ -22,7 +22,7 @@
do { \
int index = 0; u8 i; \
for (i = 0; i < len; i++) { \
- index += snprintf(out + index, HTT_MAX_STRING_LEN - index, \
+ index += scnprintf(out + index, HTT_MAX_STRING_LEN - index, \
" %u:%u,", i, arr[i]); \
if (index < 0 || index >= HTT_MAX_STRING_LEN) \
break; \
@@ -46,7 +46,7 @@ static inline void htt_print_stats_string_tlv(const void *tag_buf,
len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:");
for (i = 0; i < tag_len; i++) {
- index += snprintf(&data[index],
+ index += scnprintf(&data[index],
HTT_MAX_STRING_LEN - index,
"%.*s", 4, (char *)&(htt_stats_buf->data[i]));
if (index >= HTT_MAX_STRING_LEN)
@@ -3097,7 +3097,7 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
index = 0;
for (i = 0; i < HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS; i++)
- index += snprintf(&rx_pilot_evm_db[j][index],
+ index += scnprintf(&rx_pilot_evm_db[j][index],
HTT_MAX_STRING_LEN - index,
" %u:%d,",
i,
@@ -3109,7 +3109,7 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
index = 0;
memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
for (i = 0; i < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
- index += snprintf(&str_buf[index],
+ index += scnprintf(&str_buf[index],
HTT_MAX_STRING_LEN - index,
" %u:%d,", i, htt_stats_buf->rx_pilot_evm_db_mean[i]);
len += HTT_DBG_OUT(buf + len, buf_len - len, "pilot_evm_dB_mean = %s ", str_buf);
@@ -3217,7 +3217,7 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
index = 0;
memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++)
- index += snprintf(&str_buf[index],
+ index += scnprintf(&str_buf[index],
HTT_MAX_STRING_LEN - index,
" %u:%d,",
i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
@@ -3232,7 +3232,7 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
index = 0;
memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
- index += snprintf(&str_buf[index],
+ index += scnprintf(&str_buf[index],
HTT_MAX_STRING_LEN - index,
" %u:%d,",
i,
@@ -3854,6 +3854,47 @@ htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf,
stats_req->buf_len = len;
}
+static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf,
+ u8 *data)
+{
+ struct debug_htt_stats_req *stats_req =
+ (struct debug_htt_stats_req *)data;
+ struct htt_ring_backpressure_stats_tlv *htt_stats_buf =
+ (struct htt_ring_backpressure_stats_tlv *)tag_buf;
+ int i;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
+ htt_stats_buf->pdev_id);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "current_head_idx = %u",
+ htt_stats_buf->current_head_idx);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "current_tail_idx = %u",
+ htt_stats_buf->current_tail_idx);
+ len += HTT_DBG_OUT(buf + len, buf_len - len, "num_htt_msgs_sent = %u",
+ htt_stats_buf->num_htt_msgs_sent);
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "backpressure_time_ms = %u",
+ htt_stats_buf->backpressure_time_ms);
+
+ for (i = 0; i < 5; i++)
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "backpressure_hist_%u = %u",
+ i + 1, htt_stats_buf->backpressure_hist[i]);
+
+ len += HTT_DBG_OUT(buf + len, buf_len - len,
+ "============================");
+
+ if (len >= buf_len) {
+ buf[buf_len - 1] = 0;
+ stats_req->buf_len = buf_len - 1;
+ } else {
+ buf[len] = 0;
+ stats_req->buf_len = len;
+ }
+}
+
static inline void htt_htt_stats_debug_dump(const u32 *tag_buf,
struct debug_htt_stats_req *stats_req)
{
@@ -4246,6 +4287,9 @@ static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
case HTT_STATS_PDEV_OBSS_PD_TAG:
htt_print_pdev_obss_pd_stats_tlv_v(tag_buf, stats_req);
break;
+ case HTT_STATS_RING_BACKPRESSURE_STATS_TAG:
+ htt_print_backpressure_stats_tlv_v(tag_buf, user_data);
+ break;
default:
break;
}
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.h b/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
index 4bdb62dd7b8d..23a6baa9e95a 100644
--- a/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
@@ -100,6 +100,8 @@ enum htt_tlv_tag_t {
HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87,
HTT_STATS_PDEV_OBSS_PD_TAG = 88,
+ HTT_STATS_HW_WAR_TAG = 89,
+ HTT_STATS_RING_BACKPRESSURE_STATS_TAG = 90,
HTT_STATS_MAX_TAG,
};
@@ -1659,4 +1661,30 @@ struct htt_pdev_obss_pd_stats_tlv {
};
void ath11k_debug_htt_stats_init(struct ath11k *ar);
+
+struct htt_ring_backpressure_stats_tlv {
+ u32 pdev_id;
+ u32 current_head_idx;
+ u32 current_tail_idx;
+ u32 num_htt_msgs_sent;
+ /* Time in milliseconds for which the ring has been in
+ * its current backpressure condition
+ */
+ u32 backpressure_time_ms;
+ /* backpressure_hist - histogram showing how many times
+ * different degrees of backpressure duration occurred:
+ * Index 0 indicates the number of times ring was
+ * continuously in backpressure state for 100 - 200ms.
+ * Index 1 indicates the number of times ring was
+ * continuously in backpressure state for 200 - 300ms.
+ * Index 2 indicates the number of times ring was
+ * continuously in backpressure state for 300 - 400ms.
+ * Index 3 indicates the number of times ring was
+ * continuously in backpressure state for 400 - 500ms.
+ * Index 4 indicates the number of times ring was
+ * continuously in backpressure state beyond 500ms.
+ */
+ u32 backpressure_hist[5];
+};
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index 743760c9bcae..389dac219238 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -24,7 +24,7 @@ ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
tx_stats = arsta->tx_stats;
gi = FIELD_GET(RATE_INFO_FLAGS_SHORT_GI, arsta->txrate.flags);
mcs = txrate->mcs;
- bw = txrate->bw;
+ bw = ath11k_mac_mac80211_bw_to_ath11k_bw(txrate->bw);
nss = txrate->nss - 1;
#define STATS_OP_FMT(name) tx_stats->stats[ATH11K_STATS_TYPE_##name]
@@ -136,7 +136,7 @@ void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
struct ath11k_sta *arsta;
struct ieee80211_sta *sta;
u16 rate;
- u8 rate_idx;
+ u8 rate_idx = 0;
int ret;
u8 mcs;
@@ -219,6 +219,9 @@ static ssize_t ath11k_dbg_sta_dump_tx_stats(struct file *file,
const int size = 2 * 4096;
char *buf;
+ if (!arsta->tx_stats)
+ return -ENOENT;
+
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -379,6 +382,13 @@ static ssize_t ath11k_dbg_sta_dump_rx_stats(struct file *file,
len += scnprintf(buf + len, size - len, "%llu ", rx_stats->nss_count[i]);
len += scnprintf(buf + len, size - len, "\nRX Duration:%llu ",
rx_stats->rx_duration);
+ len += scnprintf(buf + len, size - len,
+ "\nDCM: %llu\nRU: 26 %llu 52: %llu 106: %llu 242: %llu 484: %llu 996: %llu\n",
+ rx_stats->dcm_count, rx_stats->ru_alloc_cnt[0],
+ rx_stats->ru_alloc_cnt[1], rx_stats->ru_alloc_cnt[2],
+ rx_stats->ru_alloc_cnt[3], rx_stats->ru_alloc_cnt[4],
+ rx_stats->ru_alloc_cnt[5]);
+
len += scnprintf(buf + len, size - len, "\n");
spin_unlock_bh(&ar->ab->base_lock);
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index b112825a52ed..50350f77b309 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -3,6 +3,7 @@
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
+#include <crypto/hash.h>
#include "core.h"
#include "dp_tx.h"
#include "hal_tx.h"
@@ -33,14 +34,16 @@ void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
}
ath11k_peer_rx_tid_cleanup(ar, peer);
+ crypto_free_shash(peer->tfm_mmic);
spin_unlock_bh(&ab->base_lock);
}
int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
{
struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
u32 reo_dest;
- int ret;
+ int ret = 0, tid;
/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
reo_dest = ar->dp.mac_id + 1;
@@ -54,24 +57,42 @@ int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
return ret;
}
- ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id,
- HAL_DESC_REO_NON_QOS_TID, 1, 0);
- if (ret) {
- ath11k_warn(ab, "failed to setup rxd tid queue for non-qos tid %d\n",
- ret);
- return ret;
+ for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
+ ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
+ HAL_PN_TYPE_NONE);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
+ tid, ret);
+ goto peer_clean;
+ }
}
- ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, 0, 1, 0);
+ ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
if (ret) {
- ath11k_warn(ab, "failed to setup rxd tid queue for tid 0 %d\n",
- ret);
+ ath11k_warn(ab, "failed to setup rx defrag context\n");
return ret;
}
/* TODO: Setup other peer specific resource used in data path */
return 0;
+
+peer_clean:
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to del rx tid\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ for (; tid >= 0; tid--)
+ ath11k_peer_rx_tid_delete(ar, peer, tid);
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return ret;
}
void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
@@ -197,6 +218,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
struct ath11k_dp *dp = &ab->dp;
struct hal_srng *srng;
int i, ret;
+ u32 ring_hash_map;
ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
HAL_SW2WBM_RELEASE, 0, 0,
@@ -284,7 +306,21 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
goto err;
}
- ath11k_hal_reo_hw_setup(ab);
+ /* When hash based routing of rx packet is enabled, 32 entries to map
+ * the hash values to the ring will be configured. Each hash entry uses
+ * three bits to map to a particular ring. The ring mapping will be
+ * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:Not used.
+ */
+ ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+ HAL_HASH_ROUTING_RING_SW2 << 3 |
+ HAL_HASH_ROUTING_RING_SW3 << 6 |
+ HAL_HASH_ROUTING_RING_SW4 << 9 |
+ HAL_HASH_ROUTING_RING_SW1 << 12 |
+ HAL_HASH_ROUTING_RING_SW2 << 15 |
+ HAL_HASH_ROUTING_RING_SW3 << 18 |
+ HAL_HASH_ROUTING_RING_SW4 << 21;
+
+ ath11k_hal_reo_hw_setup(ab, ring_hash_map);
return 0;
@@ -614,17 +650,13 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
}
if (ath11k_rx_ring_mask[grp_id]) {
- for (i = 0; i < ab->num_radios; i++) {
- if (ath11k_rx_ring_mask[grp_id] & BIT(i)) {
- work_done = ath11k_dp_process_rx(ab, i, napi,
- &irq_grp->pending_q,
- budget);
- budget -= work_done;
- tot_work_done += work_done;
- }
- if (budget <= 0)
- goto done;
- }
+ i = fls(ath11k_rx_ring_mask[grp_id]) - 1;
+ work_done = ath11k_dp_process_rx(ab, i, napi,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
}
if (rx_mon_status_ring_mask[grp_id]) {
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 6ef5be4201b2..551f9c9fb847 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -22,6 +22,18 @@ struct dp_rx_tid {
u32 size;
u32 ba_win_sz;
bool active;
+
+ /* Info related to rx fragments */
+ u32 cur_sn;
+ u16 last_frag_no;
+ u16 rx_frag_bitmap;
+
+ struct sk_buff_head rx_frags;
+ struct hal_reo_dest_ring *dst_ring_desc;
+
+ /* Timer info related to fragments */
+ struct timer_list frag_timer;
+ struct ath11k_base *ab;
};
#define DP_REO_DESC_FREE_TIMEOUT_MS 1000
@@ -128,7 +140,6 @@ struct ath11k_pdev_dp {
u32 mac_id;
atomic_t num_tx_pending;
wait_queue_head_t tx_empty_waitq;
- struct dp_srng reo_dst_ring;
struct dp_rxdma_ring rx_refill_buf_ring;
struct dp_srng rxdma_err_dst_ring;
struct dp_srng rxdma_mon_dst_ring;
@@ -148,7 +159,7 @@ struct ath11k_pdev_dp {
#define DP_AVG_MPDUS_PER_TID_MAX 128
#define DP_AVG_MSDUS_PER_MPDU 4
-#define DP_RX_HASH_ENABLE 0 /* Disable hash based Rx steering */
+#define DP_RX_HASH_ENABLE 1 /* Enable hash based Rx steering */
#define DP_BA_WIN_SZ_MAX 256
@@ -168,7 +179,7 @@ struct ath11k_pdev_dp {
#define DP_RX_RELEASE_RING_SIZE 1024
#define DP_REO_EXCEPTION_RING_SIZE 128
#define DP_REO_CMD_RING_SIZE 128
-#define DP_REO_STATUS_RING_SIZE 256
+#define DP_REO_STATUS_RING_SIZE 2048
#define DP_RXDMA_BUF_RING_SIZE 4096
#define DP_RXDMA_REFILL_RING_SIZE 2048
#define DP_RXDMA_ERR_DST_RING_SIZE 1024
@@ -206,6 +217,7 @@ struct ath11k_dp {
struct dp_srng reo_except_ring;
struct dp_srng reo_cmd_ring;
struct dp_srng reo_status_ring;
+ struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
struct list_head reo_cmd_list;
@@ -924,6 +936,7 @@ enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x1f,
HTT_T2H_MSG_TYPE_PPDU_STATS_IND = 0x1d,
HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c,
+ HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND = 0x24,
};
#define HTT_TARGET_VERSION_MAJOR 3
@@ -972,6 +985,13 @@ struct htt_resp_msg {
};
} __packed;
+#define HTT_BACKPRESSURE_EVENT_PDEV_ID_M GENMASK(15, 8)
+#define HTT_BACKPRESSURE_EVENT_RING_TYPE_M GENMASK(23, 16)
+#define HTT_BACKPRESSURE_EVENT_RING_ID_M GENMASK(31, 24)
+
+#define HTT_BACKPRESSURE_EVENT_HP_M GENMASK(15, 0)
+#define HTT_BACKPRESSURE_EVENT_TP_M GENMASK(31, 16)
+
/* ppdu stats
*
* @details
@@ -1066,6 +1086,13 @@ struct htt_ppdu_stats_common {
u16 bw_mhz;
} __packed;
+enum htt_ppdu_stats_gi {
+ HTT_PPDU_STATS_SGI_0_8_US,
+ HTT_PPDU_STATS_SGI_0_4_US,
+ HTT_PPDU_STATS_SGI_1_6_US,
+ HTT_PPDU_STATS_SGI_3_2_US,
+};
+
#define HTT_PPDU_STATS_USER_RATE_INFO0_USER_POS_M GENMASK(3, 0)
#define HTT_PPDU_STATS_USER_RATE_INFO0_MU_GROUP_ID_M GENMASK(11, 4)
@@ -1094,6 +1121,8 @@ struct htt_ppdu_stats_common {
FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M, _val)
#define HTT_USR_RATE_GI(_val) \
FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M, _val)
+#define HTT_USR_RATE_DCM(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M, _val)
#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LTF_SIZE_M GENMASK(1, 0)
#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_STBC_M BIT(2)
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index b08da839b7d9..f74a0e74bf3e 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -4,6 +4,9 @@
*/
#include <linux/ieee80211.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <crypto/hash.h>
#include "core.h"
#include "debug.h"
#include "hal_desc.h"
@@ -13,6 +16,8 @@
#include "dp_tx.h"
#include "peer.h"
+#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
+
static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc)
{
return desc->hdr_status;
@@ -28,22 +33,56 @@ static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_des
__le32_to_cpu(desc->mpdu_start.info2));
}
-static u8 ath11k_dp_rx_h_mpdu_start_decap_type(struct hal_rx_desc *desc)
+static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct hal_rx_desc *desc)
{
- return FIELD_GET(RX_MPDU_START_INFO5_DECAP_TYPE,
- __le32_to_cpu(desc->mpdu_start.info5));
+ return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+ __le32_to_cpu(desc->msdu_start.info2));
}
-static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc)
+static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct hal_rx_desc *desc)
{
- return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
- __le32_to_cpu(desc->attention.info2));
+ return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
+ __le32_to_cpu(desc->msdu_start.info2));
}
-static bool ath11k_dp_rx_h_attn_first_mpdu(struct hal_rx_desc *desc)
+static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct hal_rx_desc *desc)
{
- return !!FIELD_GET(RX_ATTENTION_INFO1_FIRST_MPDU,
- __le32_to_cpu(desc->attention.info1));
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
+ __le32_to_cpu(desc->mpdu_start.info1));
+}
+
+static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
+ __le32_to_cpu(desc->mpdu_start.info1));
+}
+
+static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
+ return ieee80211_has_morefrags(hdr->frame_control);
+}
+
+static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
+ return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+}
+
+static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
+ __le32_to_cpu(desc->mpdu_start.info1));
+}
+
+static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
+ __le32_to_cpu(desc->attention.info2));
}
static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc)
@@ -137,6 +176,17 @@ static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc)
return hweight8(mimo_ss_bitmap);
}
+static u8 ath11k_dp_rx_h_mpdu_start_tid(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO2_TID,
+ __le32_to_cpu(desc->mpdu_start.info2));
+}
+
+static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->mpdu_start.sw_peer_id);
+}
+
static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc)
{
return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
@@ -402,32 +452,25 @@ static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
{
- struct ath11k_pdev_dp *dp;
- struct ath11k *ar;
+ struct ath11k_dp *dp = &ab->dp;
int i;
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
- dp = &ar->dp;
- ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring);
- }
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++)
+ ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
}
int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
{
- struct ath11k *ar;
- struct ath11k_pdev_dp *dp;
+ struct ath11k_dp *dp = &ab->dp;
int ret;
int i;
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
- dp = &ar->dp;
- ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring, HAL_REO_DST,
- dp->mac_id, dp->mac_id,
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
+ HAL_REO_DST, i, 0,
DP_REO_DST_RING_SIZE);
if (ret) {
- ath11k_warn(ar->ab, "failed to setup reo_dst_ring\n");
+ ath11k_warn(ab, "failed to setup reo_dst_ring\n");
goto err_reo_cleanup;
}
}
@@ -633,8 +676,8 @@ free_desc:
kfree(rx_tid->vaddr);
}
-static void ath11k_peer_rx_tid_delete(struct ath11k *ar,
- struct ath11k_peer *peer, u8 tid)
+void ath11k_peer_rx_tid_delete(struct ath11k *ar,
+ struct ath11k_peer *peer, u8 tid)
{
struct ath11k_hal_reo_cmd cmd = {0};
struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
@@ -661,12 +704,75 @@ static void ath11k_peer_rx_tid_delete(struct ath11k *ar,
rx_tid->active = false;
}
+static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
+ u32 *link_desc,
+ enum hal_wbm_rel_bm_act action)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ u32 *desc;
+ int ret = 0;
+
+ srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOBUFS;
+ goto exit;
+ }
+
+ ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
+ action);
+
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
+{
+ struct ath11k_base *ab = rx_tid->ab;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (rx_tid->dst_ring_desc) {
+ if (rel_link_desc)
+ ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ kfree(rx_tid->dst_ring_desc);
+ rx_tid->dst_ring_desc = NULL;
+ }
+
+ rx_tid->cur_sn = 0;
+ rx_tid->last_frag_no = 0;
+ rx_tid->rx_frag_bitmap = 0;
+ __skb_queue_purge(&rx_tid->rx_frags);
+}
+
void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
{
+ struct dp_rx_tid *rx_tid;
int i;
- for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
+ lockdep_assert_held(&ar->ab->base_lock);
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+
ath11k_peer_rx_tid_delete(ar, peer, i);
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ del_timer_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ar->ab->base_lock);
+ }
}
static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
@@ -732,7 +838,8 @@ unlock_exit:
}
int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
- u8 tid, u32 ba_win_sz, u16 ssn)
+ u8 tid, u32 ba_win_sz, u16 ssn,
+ enum hal_pn_type pn_type)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_peer *peer;
@@ -793,7 +900,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
- ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, ssn);
+ ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
+ ssn, pn_type);
paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
DMA_BIDIRECTIONAL);
@@ -837,7 +945,7 @@ int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
params->tid, params->buf_size,
- params->ssn);
+ params->ssn, arsta->pn_type);
if (ret)
ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
@@ -890,8 +998,80 @@ int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
return ret;
}
-static int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
- u16 peer_id)
+int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
+ const u8 *peer_addr,
+ enum set_key_cmd key_cmd,
+ struct ieee80211_key_conf *key)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_hal_reo_cmd cmd = {0};
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ u8 tid;
+ int ret = 0;
+
+ /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
+ * We use mac80211 PN/TSC replay check functionality for bcast/mcast
+ * for now.
+ */
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return 0;
+
+ cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
+ HAL_REO_CMD_UPD0_PN_SIZE |
+ HAL_REO_CMD_UPD0_PN_VALID |
+ HAL_REO_CMD_UPD0_PN_CHECK |
+ HAL_REO_CMD_UPD0_SVLD;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (key_cmd == SET_KEY) {
+ cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
+ cmd.pn_size = 48;
+ }
+ break;
+ default:
+ break;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
+ rx_tid = &peer->rx_tid[tid];
+ if (!rx_tid->active)
+ continue;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE,
+ &cmd, NULL);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
+ tid, ret);
+ break;
+ }
+ }
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ return ret;
+}
+
+static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
+ u16 peer_id)
{
int i;
@@ -1028,23 +1208,23 @@ int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
return 0;
}
-static u32 ath11k_bw_to_mac80211_bwflags(u8 bw)
+static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi)
{
- u32 bwflags = 0;
+ u32 ret = 0;
- switch (bw) {
- case ATH11K_BW_40:
- bwflags = IEEE80211_TX_RC_40_MHZ_WIDTH;
+ switch (sgi) {
+ case RX_MSDU_START_SGI_0_8_US:
+ ret = NL80211_RATE_INFO_HE_GI_0_8;
break;
- case ATH11K_BW_80:
- bwflags = IEEE80211_TX_RC_80_MHZ_WIDTH;
+ case RX_MSDU_START_SGI_1_6_US:
+ ret = NL80211_RATE_INFO_HE_GI_1_6;
break;
- case ATH11K_BW_160:
- bwflags = IEEE80211_TX_RC_160_MHZ_WIDTH;
+ case RX_MSDU_START_SGI_3_2_US:
+ ret = NL80211_RATE_INFO_HE_GI_3_2;
break;
}
- return bwflags;
+ return ret;
}
static void
@@ -1056,12 +1236,11 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar,
struct ieee80211_sta *sta;
struct ath11k_sta *arsta;
struct htt_ppdu_stats_user_rate *user_rate;
- struct ieee80211_chanctx_conf *conf = NULL;
struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
struct htt_ppdu_stats_common *common = &ppdu_stats->common;
int ret;
- u8 flags, mcs, nss, bw, sgi, rate_idx = 0;
+ u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
u32 succ_bytes = 0;
u16 rate = 0, succ_pkts = 0;
u32 tx_duration = 0;
@@ -1096,18 +1275,29 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar,
nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
+ dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
/* Note: If host configured fixed rates and in some other special
* cases, the broadcast/management frames are sent in different rates.
* Firmware rate's control to be skipped for this?
*/
- if (flags == WMI_RATE_PREAMBLE_VHT && mcs > 9) {
+ if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) {
+ ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
+ ath11k_warn(ab, "Invalid HE mcs %hhd peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
ath11k_warn(ab, "Invalid VHT mcs %hhd peer stats", mcs);
return;
}
- if (flags == WMI_RATE_PREAMBLE_HT && (mcs > 7 || nss < 1)) {
+ if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
ath11k_warn(ab, "Invalid HT mcs %hhd nss %hhd peer stats",
mcs, nss);
return;
@@ -1136,60 +1326,42 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar,
arsta = (struct ath11k_sta *)sta->drv_priv;
memset(&arsta->txrate, 0, sizeof(arsta->txrate));
- memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
switch (flags) {
case WMI_RATE_PREAMBLE_OFDM:
arsta->txrate.legacy = rate;
- if (arsta->arvif && arsta->arvif->vif)
- conf = rcu_dereference(arsta->arvif->vif->chanctx_conf);
- if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
- arsta->tx_info.status.rates[0].idx = rate_idx - 4;
break;
case WMI_RATE_PREAMBLE_CCK:
arsta->txrate.legacy = rate;
- arsta->tx_info.status.rates[0].idx = rate_idx;
- if (mcs > ATH11K_HW_RATE_CCK_LP_1M &&
- mcs <= ATH11K_HW_RATE_CCK_SP_2M)
- arsta->tx_info.status.rates[0].flags |=
- IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
break;
case WMI_RATE_PREAMBLE_HT:
arsta->txrate.mcs = mcs + 8 * (nss - 1);
- arsta->tx_info.status.rates[0].idx = arsta->txrate.mcs;
arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
- arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
- if (sgi) {
+ if (sgi)
arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- arsta->tx_info.status.rates[0].flags |=
- IEEE80211_TX_RC_SHORT_GI;
- }
break;
case WMI_RATE_PREAMBLE_VHT:
arsta->txrate.mcs = mcs;
- ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], mcs, nss);
arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
- arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
- if (sgi) {
+ if (sgi)
arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
- arsta->tx_info.status.rates[0].flags |=
- IEEE80211_TX_RC_SHORT_GI;
- }
+ break;
+ case WMI_RATE_PREAMBLE_HE:
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+ arsta->txrate.he_dcm = dcm;
+ arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
+ arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc(
+ (user_rate->ru_end -
+ user_rate->ru_start) + 1);
break;
}
arsta->txrate.nss = nss;
arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
- arsta->tx_info.status.rates[0].flags |= ath11k_bw_to_mac80211_bwflags(bw);
arsta->tx_duration += tx_duration;
memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
- if (succ_pkts) {
- arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
- arsta->tx_info.status.rates[0].count = 1;
- ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
- }
-
/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
* So skip peer stats update for mgmt packets.
*/
@@ -1308,18 +1480,10 @@ exit:
static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
{
struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
+ struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
struct ath11k *ar;
- u32 len;
u8 pdev_id;
- len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, data->hdr);
- if (len > ATH11K_HTT_PKTLOG_MAX_SIZE) {
- ath11k_warn(ab, "htt pktlog buffer size %d, expected < %d\n",
- len,
- ATH11K_HTT_PKTLOG_MAX_SIZE);
- return;
- }
-
pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
if (!ar) {
@@ -1327,7 +1491,30 @@ static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
return;
}
- trace_ath11k_htt_pktlog(ar, data->payload, len);
+ trace_ath11k_htt_pktlog(ar, data->payload, hdr->size);
+}
+
+static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ u32 *data = (u32 *)skb->data;
+ u8 pdev_id, ring_type, ring_id;
+ u16 hp, tp;
+ u32 backpressure_time;
+
+ pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
+ ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
+ ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
+ ++data;
+
+ hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
+ tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
+ ++data;
+
+ backpressure_time = *data;
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
+ pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
}
void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
@@ -1379,6 +1566,9 @@ void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
case HTT_T2H_MSG_TYPE_PKTLOG:
ath11k_htt_pktlog(ab, skb);
break;
+ case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
+ ath11k_htt_backpressure_event_handler(ab, skb);
+ break;
default:
ath11k_warn(ab, "htt event %d not handled\n", type);
break;
@@ -1490,88 +1680,6 @@ static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_
return NULL;
}
-static int ath11k_dp_rx_retrieve_amsdu(struct ath11k *ar,
- struct sk_buff_head *msdu_list,
- struct sk_buff_head *amsdu_list)
-{
- struct sk_buff *msdu = skb_peek(msdu_list);
- struct sk_buff *last_buf;
- struct ath11k_skb_rxcb *rxcb;
- struct ieee80211_hdr *hdr;
- struct hal_rx_desc *rx_desc, *lrx_desc;
- u16 msdu_len;
- u8 l3_pad_bytes;
- u8 *hdr_status;
- int ret;
-
- if (!msdu)
- return -ENOENT;
-
- rx_desc = (struct hal_rx_desc *)msdu->data;
- hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc);
- hdr = (struct ieee80211_hdr *)hdr_status;
- /* Process only data frames */
- if (!ieee80211_is_data(hdr->frame_control)) {
- __skb_unlink(msdu, msdu_list);
- dev_kfree_skb_any(msdu);
- return -EINVAL;
- }
-
- do {
- __skb_unlink(msdu, msdu_list);
- last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
- if (!last_buf) {
- ath11k_warn(ar->ab,
- "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
- ret = -EIO;
- goto free_out;
- }
-
- rx_desc = (struct hal_rx_desc *)msdu->data;
- lrx_desc = (struct hal_rx_desc *)last_buf->data;
-
- if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) {
- ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n");
- ret = -EIO;
- goto free_out;
- }
-
- rxcb = ATH11K_SKB_RXCB(msdu);
- rxcb->rx_desc = rx_desc;
- msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
- l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc);
-
- if (!rxcb->is_continuation) {
- skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
- skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes);
- } else {
- ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
- msdu, last_buf,
- l3_pad_bytes, msdu_len);
- if (ret) {
- ath11k_warn(ar->ab,
- "failed to coalesce msdu rx buffer%d\n", ret);
- goto free_out;
- }
- }
- __skb_queue_tail(amsdu_list, msdu);
-
- /* Should we also consider msdu_cnt from mpdu_meta while
- * preparing amsdu list?
- */
- if (rxcb->is_last_msdu)
- break;
- } while ((msdu = skb_peek(msdu_list)) != NULL);
-
- return 0;
-
-free_out:
- dev_kfree_skb_any(msdu);
- __skb_queue_purge(amsdu_list);
-
- return ret;
-}
-
static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu)
{
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
@@ -1670,20 +1778,53 @@ static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
enum hal_encrypt_type enctype,
struct ieee80211_rx_status *status)
{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
struct ieee80211_hdr *hdr;
size_t hdr_len;
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
+ u16 qos_ctl = 0;
+ u8 *qos;
- /* pull decapped header and copy SA & DA */
+ /* copy SA & DA and pull decapped header */
hdr = (struct ieee80211_hdr *)msdu->data;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
ether_addr_copy(da, ieee80211_get_DA(hdr));
ether_addr_copy(sa, ieee80211_get_SA(hdr));
skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
- /* push original 802.11 header */
- hdr = (struct ieee80211_hdr *)first_hdr;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ if (rxcb->is_first_msdu) {
+ /* original 802.11 header is valid for the first msdu
+ * hence we can reuse the same header
+ */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ /* Each A-MSDU subframe will be reported as a separate MSDU,
+ * so strip the A-MSDU bit from QoS Ctl.
+ */
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
+ } else {
+ /* Rebuild qos header if this is a middle/last msdu */
+ hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+
+ /* Reset the order bit as the HT_Control header is stripped */
+ hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
+
+ qos_ctl = rxcb->tid;
+
+ if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(rxcb->rx_desc))
+ qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
+
+ /* TODO Add other QoS ctl fields when required */
+
+ /* copy decap header before overwriting for reuse below */
+ memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
+ }
if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
memcpy(skb_push(msdu,
@@ -1692,6 +1833,14 @@ static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
ath11k_dp_rx_crypto_param_len(ar, enctype));
}
+ if (!rxcb->is_first_msdu) {
+ memcpy(skb_push(msdu,
+ IEEE80211_QOS_CTL_LEN), &qos_ctl,
+ IEEE80211_QOS_CTL_LEN);
+ memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
+ return;
+ }
+
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
/* original 802.11 header has a different DA and in
@@ -1846,7 +1995,7 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
u8 decap;
first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc);
- decap = ath11k_dp_rx_h_mpdu_start_decap_type(rx_desc);
+ decap = ath11k_dp_rx_h_msdu_start_decap_type(rx_desc);
switch (decap) {
case DP_RX_DECAP_TYPE_NATIVE_WIFI:
@@ -1858,6 +2007,7 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
decrypted);
break;
case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
+ /* TODO undecap support for middle/last msdu's of amsdu */
ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
enctype, status);
break;
@@ -1868,42 +2018,41 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
}
static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
- struct sk_buff_head *amsdu_list,
+ struct sk_buff *msdu,
struct hal_rx_desc *rx_desc,
struct ieee80211_rx_status *rx_status)
{
- struct ieee80211_hdr *hdr;
+ bool fill_crypto_hdr, mcast;
enum hal_encrypt_type enctype;
- struct sk_buff *last_msdu;
- struct sk_buff *msdu;
- struct ath11k_skb_rxcb *last_rxcb;
- bool is_decrypted;
+ bool is_decrypted = false;
+ struct ieee80211_hdr *hdr;
+ struct ath11k_peer *peer;
u32 err_bitmap;
- u8 *qos;
- if (skb_queue_empty(amsdu_list))
- return;
+ hdr = (struct ieee80211_hdr *)msdu->data;
- hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rx_desc);
+ /* PN for multicast packets will be checked in mac80211 */
- /* Each A-MSDU subframe will use the original header as the base and be
- * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
- */
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- qos = ieee80211_get_qos_ctl(hdr);
- qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
- }
+ mcast = is_multicast_ether_addr(hdr->addr1);
+ fill_crypto_hdr = mcast;
is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
- enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
- /* Some attention flags are valid only in the last MSDU. */
- last_msdu = skb_peek_tail(amsdu_list);
- last_rxcb = ATH11K_SKB_RXCB(last_msdu);
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2);
+ if (peer) {
+ if (mcast)
+ enctype = peer->sec_type_grp;
+ else
+ enctype = peer->sec_type;
+ } else {
+ enctype = HAL_ENCRYPT_TYPE_OPEN;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
- err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(last_rxcb->rx_desc);
+ err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc);
- /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
+ /* Clear per-MPDU flags while leaving per-PPDU flags intact */
rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
RX_FLAG_MMIC_ERROR |
RX_FLAG_DECRYPTED |
@@ -1912,19 +2061,29 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
if (err_bitmap & DP_RX_MPDU_ERR_FCS)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
-
if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
rx_status->flag |= RX_FLAG_MMIC_ERROR;
- if (is_decrypted)
- rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED |
- RX_FLAG_MIC_STRIPPED | RX_FLAG_ICV_STRIPPED;
+ if (is_decrypted) {
+ rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
- skb_queue_walk(amsdu_list, msdu) {
- ath11k_dp_rx_h_csum_offload(msdu);
- ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
- enctype, rx_status, is_decrypted);
+ if (fill_crypto_hdr)
+ rx_status->flag |= RX_FLAG_MIC_STRIPPED |
+ RX_FLAG_ICV_STRIPPED;
+ else
+ rx_status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_PN_VALIDATED;
}
+
+ ath11k_dp_rx_h_csum_offload(msdu);
+ ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
+ enctype, rx_status, is_decrypted);
+
+ if (!is_decrypted || fill_crypto_hdr)
+ return;
+
+ hdr = (void *)msdu->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
}
static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
@@ -1988,6 +2147,7 @@ static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
}
rx_status->encoding = RX_ENC_HE;
rx_status->nss = nss;
+ rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
break;
}
@@ -2013,9 +2173,13 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
} else if (channel_num >= 36 && channel_num <= 173) {
rx_status->band = NL80211_BAND_5GHZ;
} else {
- ath11k_warn(ar->ab, "Unsupported Channel info received %d\n",
- channel_num);
- return;
+ spin_lock_bh(&ar->data_lock);
+ rx_status->band = ar->rx_channel->band;
+ channel_num =
+ ieee80211_frequency_to_channel(ar->rx_channel->center_freq);
+ spin_unlock_bh(&ar->data_lock);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
+ rx_desc, sizeof(struct hal_rx_desc));
}
rx_status->freq = ieee80211_channel_to_frequency(channel_num,
@@ -2024,29 +2188,6 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
}
-static void ath11k_dp_rx_process_amsdu(struct ath11k *ar,
- struct sk_buff_head *amsdu_list,
- struct ieee80211_rx_status *rx_status)
-{
- struct sk_buff *first;
- struct ath11k_skb_rxcb *rxcb;
- struct hal_rx_desc *rx_desc;
- bool first_mpdu;
-
- if (skb_queue_empty(amsdu_list))
- return;
-
- first = skb_peek(amsdu_list);
- rxcb = ATH11K_SKB_RXCB(first);
- rx_desc = rxcb->rx_desc;
-
- first_mpdu = ath11k_dp_rx_h_attn_first_mpdu(rx_desc);
- if (first_mpdu)
- ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
-
- ath11k_dp_rx_h_mpdu(ar, amsdu_list, rx_desc, rx_status);
-}
-
static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out,
size_t size)
{
@@ -2113,55 +2254,115 @@ static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *nap
ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
}
-static void ath11k_dp_rx_pre_deliver_amsdu(struct ath11k *ar,
- struct sk_buff_head *amsdu_list,
- struct ieee80211_rx_status *rxs)
+static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct sk_buff_head *msdu_list)
{
- struct sk_buff *msdu;
- struct sk_buff *first_subframe;
+ struct hal_rx_desc *rx_desc, *lrx_desc;
+ struct ieee80211_rx_status rx_status = {0};
struct ieee80211_rx_status *status;
+ struct ath11k_skb_rxcb *rxcb;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *last_buf;
+ u8 l3_pad_bytes;
+ u16 msdu_len;
+ int ret;
- first_subframe = skb_peek(amsdu_list);
+ last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
+ if (!last_buf) {
+ ath11k_warn(ar->ab,
+ "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
+ ret = -EIO;
+ goto free_out;
+ }
- skb_queue_walk(amsdu_list, msdu) {
- /* Setup per-MSDU flags */
- if (skb_queue_empty(amsdu_list))
- rxs->flag &= ~RX_FLAG_AMSDU_MORE;
- else
- rxs->flag |= RX_FLAG_AMSDU_MORE;
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ lrx_desc = (struct hal_rx_desc *)last_buf->data;
+ if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) {
+ ath11k_warn(ar->ab, "msdu_done bit in attention is not set\n");
+ ret = -EIO;
+ goto free_out;
+ }
- if (msdu == first_subframe) {
- first_subframe = NULL;
- rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
- } else {
- rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ rxcb->rx_desc = rx_desc;
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
+ l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc);
+
+ if (rxcb->is_frag) {
+ skb_pull(msdu, HAL_RX_DESC_SIZE);
+ } else if (!rxcb->is_continuation) {
+ if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
+ ret = -EINVAL;
+ ath11k_warn(ar->ab, "invalid msdu len %u\n", msdu_len);
+ goto free_out;
+ }
+ skb_put(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes + msdu_len);
+ skb_pull(msdu, HAL_RX_DESC_SIZE + l3_pad_bytes);
+ } else {
+ ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
+ msdu, last_buf,
+ l3_pad_bytes, msdu_len);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to coalesce msdu rx buffer%d\n", ret);
+ goto free_out;
}
- rxs->flag |= RX_FLAG_SKIP_MONITOR;
-
- status = IEEE80211_SKB_RXCB(msdu);
- *status = *rxs;
}
+
+ hdr = (struct ieee80211_hdr *)msdu->data;
+
+ /* Process only data frames */
+ if (!ieee80211_is_data(hdr->frame_control))
+ return -EINVAL;
+
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, &rx_status);
+ ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, &rx_status);
+
+ rx_status.flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
+
+ status = IEEE80211_SKB_RXCB(msdu);
+ *status = rx_status;
+ return 0;
+
+free_out:
+ return ret;
}
-static void ath11k_dp_rx_process_pending_packets(struct ath11k_base *ab,
- struct napi_struct *napi,
- struct sk_buff_head *pending_q,
- int *quota, u8 mac_id)
+static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
+ struct napi_struct *napi,
+ struct sk_buff_head *msdu_list,
+ int *quota, int ring_id)
{
- struct ath11k *ar;
+ struct ath11k_skb_rxcb *rxcb;
struct sk_buff *msdu;
- struct ath11k_pdev *pdev;
+ struct ath11k *ar;
+ u8 mac_id;
+ int ret;
- if (skb_queue_empty(pending_q))
+ if (skb_queue_empty(msdu_list))
return;
- ar = ab->pdevs[mac_id].ar;
-
rcu_read_lock();
- pdev = rcu_dereference(ab->pdevs_active[mac_id]);
- while (*quota && (msdu = __skb_dequeue(pending_q))) {
- if (!pdev) {
+ while (*quota && (msdu = __skb_dequeue(msdu_list))) {
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ mac_id = rxcb->mac_id;
+ ar = ab->pdevs[mac_id].ar;
+ if (!rcu_dereference(ab->pdevs_active[mac_id])) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list);
+ if (ret) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "Unable to process msdu %d", ret);
dev_kfree_skb_any(msdu);
continue;
}
@@ -2169,46 +2370,31 @@ static void ath11k_dp_rx_process_pending_packets(struct ath11k_base *ab,
ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
(*quota)--;
}
+
rcu_read_unlock();
}
-int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
- struct napi_struct *napi, struct sk_buff_head *pending_q,
- int budget)
+int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
+ struct napi_struct *napi, int budget)
{
- struct ath11k *ar = ab->pdevs[mac_id].ar;
- struct ath11k_pdev_dp *dp = &ar->dp;
- struct ieee80211_rx_status *rx_status = &dp->rx_status;
- struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
- struct hal_srng *srng;
- struct sk_buff *msdu;
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring;
+ int num_buffs_reaped[MAX_RADIOS] = {0};
struct sk_buff_head msdu_list;
- struct sk_buff_head amsdu_list;
struct ath11k_skb_rxcb *rxcb;
- u32 *rx_desc;
- int buf_id;
- int num_buffs_reaped = 0;
+ int total_msdu_reaped = 0;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
int quota = budget;
- int ret;
bool done = false;
-
- /* Process any pending packets from the previous napi poll.
- * Note: All msdu's in this pending_q corresponds to the same mac id
- * due to pdev based reo dest mapping and also since each irq group id
- * maps to specific reo dest ring.
- */
- ath11k_dp_rx_process_pending_packets(ab, napi, pending_q, &quota,
- mac_id);
-
- /* If all quota is exhausted by processing the pending_q,
- * Wait for the next napi poll to reap the new info
- */
- if (!quota)
- goto exit;
+ int buf_id, mac_id;
+ struct ath11k *ar;
+ u32 *rx_desc;
+ int i;
__skb_queue_head_init(&msdu_list);
- srng = &ab->hal.srng_list[dp->reo_dst_ring.ring_id];
+ srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
spin_lock_bh(&srng->lock);
@@ -2224,6 +2410,10 @@ try_again:
desc->buf_addr_info.info1);
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
cookie);
+ mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
+
+ ar = ab->pdevs[mac_id].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
spin_lock_bh(&rx_ring->idr_lock);
msdu = idr_find(&rx_ring->bufs_idr, buf_id);
if (!msdu) {
@@ -2241,15 +2431,15 @@ try_again:
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
- num_buffs_reaped++;
+ num_buffs_reaped[mac_id]++;
+ total_msdu_reaped++;
push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
desc->info0);
if (push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
- /* TODO: Check if the msdu can be sent up for processing */
dev_kfree_skb_any(msdu);
- ab->soc_stats.hal_reo_error[dp->reo_dst_ring.ring_id]++;
+ ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
continue;
}
@@ -2260,19 +2450,12 @@ try_again:
rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->mac_id = mac_id;
+ rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
+ desc->info0);
+
__skb_queue_tail(&msdu_list, msdu);
- /* Stop reaping from the ring once quota is exhausted
- * and we've received all msdu's in the the AMSDU. The
- * additional msdu's reaped in excess of quota here would
- * be pushed into the pending queue to be processed during
- * the next napi poll.
- * Note: More profiling can be done to see the impact on
- * pending_q and throughput during various traffic & density
- * and how use of budget instead of remaining quota affects it.
- */
- if (num_buffs_reaped >= quota && rxcb->is_last_msdu &&
- !rxcb->is_continuation) {
+ if (total_msdu_reaped >= quota && !rxcb->is_continuation) {
done = true;
break;
}
@@ -2293,58 +2476,23 @@ try_again:
spin_unlock_bh(&srng->lock);
- if (!num_buffs_reaped)
+ if (!total_msdu_reaped)
goto exit;
- /* Should we reschedule it later if we are not able to replenish all
- * the buffers?
- */
- ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buffs_reaped,
- HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
-
- rcu_read_lock();
- if (!rcu_dereference(ab->pdevs_active[mac_id])) {
- __skb_queue_purge(&msdu_list);
- goto rcu_unlock;
- }
-
- if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
- __skb_queue_purge(&msdu_list);
- goto rcu_unlock;
- }
-
- while (!skb_queue_empty(&msdu_list)) {
- __skb_queue_head_init(&amsdu_list);
- ret = ath11k_dp_rx_retrieve_amsdu(ar, &msdu_list, &amsdu_list);
- if (ret) {
- if (ret == -EIO) {
- ath11k_err(ab, "rx ring got corrupted %d\n", ret);
- __skb_queue_purge(&msdu_list);
- /* Should stop processing any more rx in
- * future from this ring?
- */
- goto rcu_unlock;
- }
-
- /* A-MSDU retrieval got failed due to non-fatal condition,
- * continue processing with the next msdu.
- */
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!num_buffs_reaped[i])
continue;
- }
- ath11k_dp_rx_process_amsdu(ar, &amsdu_list, rx_status);
+ ar = ab->pdevs[i].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
- ath11k_dp_rx_pre_deliver_amsdu(ar, &amsdu_list, rx_status);
- skb_queue_splice_tail(&amsdu_list, pending_q);
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
+ HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
}
- while (quota && (msdu = __skb_dequeue(pending_q))) {
- ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
- quota--;
- }
+ ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list,
+ &quota, ring_id);
-rcu_unlock:
- rcu_read_unlock();
exit:
return budget - quota;
}
@@ -2411,6 +2559,8 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
+ rx_stats->dcm_count += ppdu_info->dcm;
+ rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
arsta->rssi_comb = ppdu_info->rssi_comb;
rx_stats->rx_duration += ppdu_info->rx_duration;
@@ -2681,99 +2831,563 @@ exit:
return num_buffs_reaped;
}
-static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
- u32 *link_desc,
- enum hal_wbm_rel_bm_act action)
+static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
{
- struct ath11k_dp *dp = &ab->dp;
- struct hal_srng *srng;
- u32 *desc;
- int ret = 0;
+ struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
- srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
+ spin_lock_bh(&rx_tid->ab->base_lock);
+ if (rx_tid->last_frag_no &&
+ rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
+ spin_unlock_bh(&rx_tid->ab->base_lock);
+ return;
+ }
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+ spin_unlock_bh(&rx_tid->ab->base_lock);
+}
- spin_lock_bh(&srng->lock);
+int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct crypto_shash *tfm;
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ int i;
- ath11k_hal_srng_access_begin(ab, srng);
+ tfm = crypto_alloc_shash("michael_mic", 0, 0);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
- desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
- if (!desc) {
- ret = -ENOBUFS;
- goto exit;
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
}
- ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
- action);
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+ rx_tid->ab = ab;
+ timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
+ skb_queue_head_init(&rx_tid->rx_frags);
+ }
-exit:
- ath11k_hal_srng_access_end(ab, srng);
+ peer->tfm_mmic = tfm;
+ spin_unlock_bh(&ab->base_lock);
- spin_unlock_bh(&srng->lock);
+ return 0;
+}
+
+static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
+ struct ieee80211_hdr *hdr, u8 *data,
+ size_t data_len, u8 *mic)
+{
+ SHASH_DESC_ON_STACK(desc, tfm);
+ u8 mic_hdr[16] = {0};
+ u8 tid = 0;
+ int ret;
+
+ if (!tfm)
+ return -EINVAL;
+
+ desc->tfm = tfm;
+
+ ret = crypto_shash_setkey(tfm, key, 8);
+ if (ret)
+ goto out;
+ ret = crypto_shash_init(desc);
+ if (ret)
+ goto out;
+
+ /* TKIP MIC header */
+ memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
+ memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ mic_hdr[12] = tid;
+
+ ret = crypto_shash_update(desc, mic_hdr, 16);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(desc, data, data_len);
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(desc, mic);
+out:
+ shash_desc_zero(desc);
return ret;
}
-static void ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
- struct sk_buff *msdu,
- struct hal_rx_desc *rx_desc,
- struct ieee80211_rx_status *rx_status)
+static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
+ struct sk_buff *msdu)
+{
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
+ struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
+ struct ieee80211_key_conf *key_conf;
+ struct ieee80211_hdr *hdr;
+ u8 mic[IEEE80211_CCMP_MIC_LEN];
+ int head_len, tail_len, ret;
+ size_t data_len;
+ u32 hdr_len;
+ u8 *key, *data;
+ u8 key_idx;
+
+ if (ath11k_dp_rx_h_mpdu_start_enctype(rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
+ return 0;
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE);
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ head_len = hdr_len + HAL_RX_DESC_SIZE + IEEE80211_TKIP_IV_LEN;
+ tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
+
+ if (!is_multicast_ether_addr(hdr->addr1))
+ key_idx = peer->ucast_keyidx;
+ else
+ key_idx = peer->mcast_keyidx;
+
+ key_conf = peer->keys[key_idx];
+
+ data = msdu->data + head_len;
+ data_len = msdu->len - head_len - tail_len;
+ key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
+
+ ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
+ if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
+ goto mic_fail;
+
+ return 0;
+
+mic_fail:
+ (ATH11K_SKB_RXCB(msdu))->is_first_msdu = 1;
+ (ATH11K_SKB_RXCB(msdu))->is_last_msdu = 1;
+
+ rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
+ RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
+ skb_pull(msdu, HAL_RX_DESC_SIZE);
+
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+ ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
+ HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
+ ieee80211_rx(ar->hw, msdu);
+ return -EINVAL;
+}
+
+static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
+ enum hal_encrypt_type enctype, u32 flags)
+{
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ size_t crypto_len;
+
+ if (!flags)
+ return;
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + HAL_RX_DESC_SIZE);
+
+ if (flags & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+
+ if (flags & RX_FLAG_ICV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_icv_len(ar, enctype));
+
+ if (flags & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
+
+ memmove((void *)msdu->data + HAL_RX_DESC_SIZE + crypto_len,
+ (void *)msdu->data + HAL_RX_DESC_SIZE, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
+}
+
+static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
+ struct ath11k_peer *peer,
+ struct dp_rx_tid *rx_tid,
+ struct sk_buff **defrag_skb)
{
- u8 rx_channel;
+ struct hal_rx_desc *rx_desc;
+ struct sk_buff *skb, *first_frag, *last_frag;
+ struct ieee80211_hdr *hdr;
enum hal_encrypt_type enctype;
- bool is_decrypted;
- u32 err_bitmap;
+ bool is_decrypted = false;
+ int msdu_len = 0;
+ int extra_space;
+ u32 flags;
+
+ first_frag = skb_peek(&rx_tid->rx_frags);
+ last_frag = skb_peek_tail(&rx_tid->rx_frags);
+
+ skb_queue_walk(&rx_tid->rx_frags, skb) {
+ flags = 0;
+ rx_desc = (struct hal_rx_desc *)skb->data;
+ hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
+
+ enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
+ if (enctype != HAL_ENCRYPT_TYPE_OPEN)
+ is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
+
+ if (is_decrypted) {
+ if (skb != first_frag)
+ flags |= RX_FLAG_IV_STRIPPED;
+ if (skb != last_frag)
+ flags |= RX_FLAG_ICV_STRIPPED |
+ RX_FLAG_MIC_STRIPPED;
+ }
- is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
- enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc);
- err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc);
+ /* RX fragments are always raw packets */
+ if (skb != last_frag)
+ skb_trim(skb, skb->len - FCS_LEN);
+ ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
- if (err_bitmap & DP_RX_MPDU_ERR_FCS)
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (skb != first_frag)
+ skb_pull(skb, HAL_RX_DESC_SIZE +
+ ieee80211_hdrlen(hdr->frame_control));
+ msdu_len += skb->len;
+ }
- if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
- rx_status->flag |= RX_FLAG_MMIC_ERROR;
+ extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
+ if (extra_space > 0 &&
+ (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
+ return -ENOMEM;
- rx_status->encoding = RX_ENC_LEGACY;
- rx_status->bw = RATE_INFO_BW_20;
+ __skb_unlink(first_frag, &rx_tid->rx_frags);
+ while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
+ skb_put_data(first_frag, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ }
- rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ hdr = (struct ieee80211_hdr *)(first_frag->data + HAL_RX_DESC_SIZE);
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
+ ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
- rx_channel = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
+ if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
+ first_frag = NULL;
- if (rx_channel >= 1 && rx_channel <= 14) {
- rx_status->band = NL80211_BAND_2GHZ;
- } else if (rx_channel >= 36 && rx_channel <= 173) {
- rx_status->band = NL80211_BAND_5GHZ;
- } else {
- ath11k_warn(ar->ab, "Unsupported Channel info received %d\n",
- rx_channel);
+ *defrag_skb = first_frag;
+ return 0;
+}
+
+static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
+ struct sk_buff *defrag_skb)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
+ struct hal_reo_entrance_ring *reo_ent_ring;
+ struct hal_reo_dest_ring *reo_dest_ring;
+ struct dp_link_desc_bank *link_desc_banks;
+ struct hal_rx_msdu_link *msdu_link;
+ struct hal_rx_msdu_details *msdu0;
+ struct hal_srng *srng;
+ dma_addr_t paddr;
+ u32 desc_bank, msdu_info, mpdu_info;
+ u32 dst_idx, cookie;
+ u32 *msdu_len_offset;
+ int ret, buf_id;
+
+ link_desc_banks = ab->dp.link_desc_banks;
+ reo_dest_ring = rx_tid->dst_ring_desc;
+
+ ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
+ msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr));
+ msdu0 = &msdu_link->msdu_link[0];
+ dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
+ memset(msdu0, 0, sizeof(*msdu0));
+
+ msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
+ defrag_skb->len - HAL_RX_DESC_SIZE) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
+ msdu0->rx_msdu_info.info0 = msdu_info;
+
+ /* change msdu len in hal rx desc */
+ msdu_len_offset = (u32 *)&rx_desc->msdu_start;
+ *msdu_len_offset &= ~(RX_MSDU_START_INFO1_MSDU_LENGTH);
+ *msdu_len_offset |= defrag_skb->len - HAL_RX_DESC_SIZE;
+
+ paddr = dma_map_single(ab->dev, defrag_skb->data,
+ defrag_skb->len + skb_tailroom(defrag_skb),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr))
+ return -ENOMEM;
+
+ spin_lock_bh(&rx_refill_ring->idr_lock);
+ buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
+ rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
+ spin_unlock_bh(&rx_refill_ring->idr_lock);
+ if (buf_id < 0) {
+ ret = -ENOMEM;
+ goto err_unmap_dma;
+ }
+
+ ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, HAL_RX_BUF_RBM_SW3_BM);
+
+ /* Fill mpdu details into reo entrace ring */
+ srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ reo_ent_ring = (struct hal_reo_entrance_ring *)
+ ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!reo_ent_ring) {
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+ ret = -ENOSPC;
+ goto err_free_idr;
+ }
+ memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
+
+ ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
+ ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
+ HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
+
+ mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
+
+ reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
+ reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
+ reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
+ reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
+ FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
+ reo_dest_ring->info0)) |
+ FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return 0;
+
+err_free_idr:
+ spin_lock_bh(&rx_refill_ring->idr_lock);
+ idr_remove(&rx_refill_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_refill_ring->idr_lock);
+err_unmap_dma:
+ dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
+ DMA_FROM_DEVICE);
+ return ret;
+}
+
+static int ath11k_dp_rx_h_cmp_frags(struct sk_buff *a, struct sk_buff *b)
+{
+ int frag1, frag2;
+
+ frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(a);
+ frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(b);
+
+ return frag1 - frag2;
+}
+
+static void ath11k_dp_rx_h_sort_frags(struct sk_buff_head *frag_list,
+ struct sk_buff *cur_frag)
+{
+ struct sk_buff *skb;
+ int cmp;
+
+ skb_queue_walk(frag_list, skb) {
+ cmp = ath11k_dp_rx_h_cmp_frags(skb, cur_frag);
+ if (cmp < 0)
+ continue;
+ __skb_queue_before(frag_list, skb, cur_frag);
return;
}
+ __skb_queue_tail(frag_list, cur_frag);
+}
- rx_status->freq = ieee80211_channel_to_frequency(rx_channel,
- rx_status->band);
- ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
+static u64 ath11k_dp_rx_h_get_pn(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ u64 pn = 0;
+ u8 *ehdr;
- /* Rx fragments are received in raw mode */
- skb_trim(msdu, msdu->len - FCS_LEN);
+ hdr = (struct ieee80211_hdr *)(skb->data + HAL_RX_DESC_SIZE);
+ ehdr = skb->data + HAL_RX_DESC_SIZE + ieee80211_hdrlen(hdr->frame_control);
- if (is_decrypted) {
- rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
- skb_trim(msdu, msdu->len -
- ath11k_dp_rx_crypto_mic_len(ar, enctype));
+ pn = ehdr[0];
+ pn |= (u64)ehdr[1] << 8;
+ pn |= (u64)ehdr[4] << 16;
+ pn |= (u64)ehdr[5] << 24;
+ pn |= (u64)ehdr[6] << 32;
+ pn |= (u64)ehdr[7] << 40;
+
+ return pn;
+}
+
+static bool
+ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
+{
+ enum hal_encrypt_type encrypt_type;
+ struct sk_buff *first_frag, *skb;
+ struct hal_rx_desc *desc;
+ u64 last_pn;
+ u64 cur_pn;
+
+ first_frag = skb_peek(&rx_tid->rx_frags);
+ desc = (struct hal_rx_desc *)first_frag->data;
+
+ encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(desc);
+ if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
+ return true;
+
+ last_pn = ath11k_dp_rx_h_get_pn(first_frag);
+ skb_queue_walk(&rx_tid->rx_frags, skb) {
+ if (skb == first_frag)
+ continue;
+
+ cur_pn = ath11k_dp_rx_h_get_pn(skb);
+ if (cur_pn != last_pn + 1)
+ return false;
+ last_pn = cur_pn;
+ }
+ return true;
+}
+
+static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ u32 *ring_desc)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct hal_rx_desc *rx_desc;
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ struct sk_buff *defrag_skb = NULL;
+ u32 peer_id;
+ u16 seqno, frag_no;
+ u8 tid;
+ int ret = 0;
+ bool more_frags;
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(rx_desc);
+ tid = ath11k_dp_rx_h_mpdu_start_tid(rx_desc);
+ seqno = ath11k_dp_rx_h_mpdu_start_seq_no(rx_desc);
+ frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(msdu);
+ more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(msdu);
+
+ if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(rx_desc) ||
+ !ath11k_dp_rx_h_mpdu_start_fc_valid(rx_desc) ||
+ tid > IEEE80211_NUM_TIDS)
+ return -EINVAL;
+
+ /* received unfragmented packet in reo
+ * exception ring, this shouldn't happen
+ * as these packets typically come from
+ * reo2sw srngs.
+ */
+ if (WARN_ON_ONCE(!frag_no && !more_frags))
+ return -EINVAL;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, peer_id);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
+ peer_id);
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+ rx_tid = &peer->rx_tid[tid];
+
+ if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
+ skb_queue_empty(&rx_tid->rx_frags)) {
+ /* Flush stored fragments and start a new sequence */
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+ rx_tid->cur_sn = seqno;
+ }
+
+ if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
+ /* Fragment already present */
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (frag_no > __fls(rx_tid->rx_frag_bitmap))
+ __skb_queue_tail(&rx_tid->rx_frags, msdu);
+ else
+ ath11k_dp_rx_h_sort_frags(&rx_tid->rx_frags, msdu);
+
+ rx_tid->rx_frag_bitmap |= BIT(frag_no);
+ if (!more_frags)
+ rx_tid->last_frag_no = frag_no;
+
+ if (frag_no == 0) {
+ rx_tid->dst_ring_desc = kmemdup(ring_desc,
+ sizeof(*rx_tid->dst_ring_desc),
+ GFP_ATOMIC);
+ if (!rx_tid->dst_ring_desc) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ } else {
+ ath11k_dp_rx_link_desc_return(ab, ring_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
}
+
+ if (!rx_tid->last_frag_no ||
+ rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
+ mod_timer(&rx_tid->frag_timer, jiffies +
+ ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
+ goto out_unlock;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ del_timer_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_id(ab, peer_id);
+ if (!peer)
+ goto err_frags_cleanup;
+
+ if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
+ goto err_frags_cleanup;
+
+ if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
+ goto err_frags_cleanup;
+
+ if (!defrag_skb)
+ goto err_frags_cleanup;
+
+ if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
+ goto err_frags_cleanup;
+
+ ath11k_dp_rx_frags_cleanup(rx_tid, false);
+ goto out_unlock;
+
+err_frags_cleanup:
+ dev_kfree_skb_any(defrag_skb);
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+out_unlock:
+ spin_unlock_bh(&ab->base_lock);
+ return ret;
}
static int
-ath11k_dp_process_rx_err_buf(struct ath11k *ar, struct napi_struct *napi,
- int buf_id, bool frag)
+ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
- struct ieee80211_rx_status rx_status = {0};
struct sk_buff *msdu;
struct ath11k_skb_rxcb *rxcb;
- struct ieee80211_rx_status *status;
struct hal_rx_desc *rx_desc;
u16 msdu_len;
@@ -2794,10 +3408,7 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, struct napi_struct *napi,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
- if (!frag) {
- /* Process only rx fragments below, and drop
- * msdu's indicated due to error reasons.
- */
+ if (drop) {
dev_kfree_skb_any(msdu);
return 0;
}
@@ -2816,16 +3427,12 @@ ath11k_dp_process_rx_err_buf(struct ath11k *ar, struct napi_struct *napi,
rx_desc = (struct hal_rx_desc *)msdu->data;
msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc);
skb_put(msdu, HAL_RX_DESC_SIZE + msdu_len);
- skb_pull(msdu, HAL_RX_DESC_SIZE);
-
- ath11k_dp_rx_frag_h_mpdu(ar, msdu, rx_desc, &rx_status);
-
- status = IEEE80211_SKB_RXCB(msdu);
-
- *status = rx_status;
-
- ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+ if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
+ dev_kfree_skb_any(msdu);
+ ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
exit:
rcu_read_unlock();
return 0;
@@ -2850,6 +3457,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
dma_addr_t paddr;
u32 *desc;
bool is_frag;
+ u8 drop = 0;
tot_n_bufs_reaped = 0;
quota = budget;
@@ -2891,9 +3499,15 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
- /* Return the link desc back to wbm idle list */
- ath11k_dp_rx_link_desc_return(ab, desc,
- HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ /* Process only rx fragments with one msdu per link desc below, and drop
+ * msdu's indicated due to error reasons.
+ */
+ if (!is_frag || num_msdus > 1) {
+ drop = 1;
+ /* Return the link desc back to wbm idle list */
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
for (i = 0; i < num_msdus; i++) {
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
@@ -2904,8 +3518,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
ar = ab->pdevs[mac_id].ar;
- if (!ath11k_dp_process_rx_err_buf(ar, napi, buf_id,
- is_frag)) {
+ if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
n_bufs_reaped[mac_id]++;
tot_n_bufs_reaped++;
}
@@ -2966,7 +3579,6 @@ static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
struct ieee80211_rx_status *status,
struct sk_buff_head *msdu_list)
{
- struct sk_buff_head amsdu_list;
u16 msdu_len;
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
u8 l3pad_bytes;
@@ -2974,7 +3586,7 @@ static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc);
- if ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE) {
+ if (!rxcb->is_frag && ((msdu_len + HAL_RX_DESC_SIZE) > DP_RX_BUFFER_SIZE)) {
/* First buffer will be freed by the caller, so deduct it's length */
msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE);
ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
@@ -2997,24 +3609,25 @@ static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
* This error can show up both in a REO destination or WBM release ring.
*/
- __skb_queue_head_init(&amsdu_list);
-
rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc);
rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc);
- l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
-
- if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
- return -EINVAL;
+ if (rxcb->is_frag) {
+ skb_pull(msdu, HAL_RX_DESC_SIZE);
+ } else {
+ l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc);
- skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
- skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
+ if ((HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
+ return -EINVAL;
+ skb_put(msdu, HAL_RX_DESC_SIZE + l3pad_bytes + msdu_len);
+ skb_pull(msdu, HAL_RX_DESC_SIZE + l3pad_bytes);
+ }
ath11k_dp_rx_h_ppdu(ar, desc, status);
- __skb_queue_tail(&amsdu_list, msdu);
+ ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
- ath11k_dp_rx_h_mpdu(ar, &amsdu_list, desc, status);
+ rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(desc);
/* Please note that caller will having the access to msdu and completing
* rx with mac80211. Need not worry about cleaning up amsdu_list.
@@ -3037,6 +3650,13 @@ static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
drop = true;
break;
+ case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
+ /* TODO: Do not drop PN failed packets in the driver;
+ * instead, it is good to drop such packets in mac80211
+ * after incrementing the replay counters.
+ */
+
+ /* fall through */
default:
/* TODO: Review other errors and process them to mac80211
* as appropriate.
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
index eec5deaa59ad..88bbcae14e34 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
@@ -9,6 +9,8 @@
#include "rx_desc.h"
#include "debug.h"
+#define DP_MAX_NWIFI_HDR_LEN 30
+
#define DP_RX_MPDU_ERR_FCS BIT(0)
#define DP_RX_MPDU_ERR_DECRYPT BIT(1)
#define DP_RX_MPDU_ERR_TKIP_MIC BIT(2)
@@ -43,9 +45,16 @@ int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
struct ieee80211_ampdu_params *params);
int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
struct ieee80211_ampdu_params *params);
+int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
+ const u8 *peer_addr,
+ enum set_key_cmd key_cmd,
+ struct ieee80211_key_conf *key);
void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer);
+void ath11k_peer_rx_tid_delete(struct ath11k *ar,
+ struct ath11k_peer *peer, u8 tid);
int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
- u8 tid, u32 ba_win_sz, u16 ssn);
+ u8 tid, u32 ba_win_sz, u16 ssn,
+ enum hal_pn_type pn_type);
void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
struct sk_buff *skb);
int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab);
@@ -60,7 +69,7 @@ int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
int budget);
int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
- struct napi_struct *napi, struct sk_buff_head *pending_q,
+ struct napi_struct *napi,
int budget);
int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
struct dp_rxdma_ring *rx_ring,
@@ -82,5 +91,6 @@ int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
gfp_t gfp);
int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar);
int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar);
+int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id);
#endif /* ATH11K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 6d7d33761caf..7aac4b0eea0c 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -7,6 +7,7 @@
#include "dp_tx.h"
#include "debug.h"
#include "hw.h"
+#include "peer.h"
/* NOTE: Any of the mapped ring id value must not exceed DP_TCL_NUM_RING_MAX */
static const u8
@@ -46,7 +47,7 @@ static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
}
-static enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
+enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
{
switch (cipher) {
case WLAN_CIPHER_SUITE_WEP40:
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index b58ac11c2747..9e40c4bdd674 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -877,23 +877,32 @@ void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
/* For LMAC rings, ring pointer updates are done through FW and
* hence written to a shared memory location that is read by FW
*/
- if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.last_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
*srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
- else
+ } else {
+ srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
*srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
+ }
} else {
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.last_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
ath11k_ahb_write32(ab,
(unsigned long)srng->u.src_ring.hp_addr -
(unsigned long)ab->mem,
srng->u.src_ring.hp);
} else {
+ srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
ath11k_ahb_write32(ab,
(unsigned long)srng->u.dst_ring.tp_addr -
(unsigned long)ab->mem,
srng->u.dst_ring.tp);
}
}
+
+ srng->timestamp = jiffies;
}
void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
@@ -1017,6 +1026,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
params->intr_batch_cntr_thres_entries;
srng->intr_timer_thres_us = params->intr_timer_thres_us;
srng->flags = params->flags;
+ srng->initialized = 1;
spin_lock_init(&srng->lock);
for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
@@ -1122,3 +1132,55 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab)
ath11k_hal_free_cont_rdp(ab);
ath11k_hal_free_cont_wrp(ab);
}
+
+void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
+{
+ struct hal_srng *srng;
+ struct ath11k_ext_irq_grp *irq_grp;
+ struct ath11k_ce_pipe *ce_pipe;
+ int i;
+
+ ath11k_err(ab, "Last interrupt received for each CE:\n");
+ for (i = 0; i < CE_COUNT; i++) {
+ ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n",
+ i, ce_pipe->pipe_num,
+ jiffies_to_msecs(jiffies - ce_pipe->timestamp));
+ }
+
+ ath11k_err(ab, "\nLast interrupt received for each group:\n");
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ irq_grp = &ab->ext_irq_grp[i];
+ ath11k_err(ab, "group_id %d %ums before\n",
+ irq_grp->grp_id,
+ jiffies_to_msecs(jiffies - irq_grp->timestamp));
+ }
+
+ for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
+ srng = &ab->hal.srng_list[i];
+
+ if (!srng->initialized)
+ continue;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ ath11k_err(ab,
+ "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
+ srng->ring_id, srng->u.src_ring.hp,
+ srng->u.src_ring.reap_hp,
+ *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
+ srng->u.src_ring.last_tp,
+ jiffies_to_msecs(jiffies - srng->timestamp));
+ else if (srng->ring_dir == HAL_SRNG_DIR_DST)
+ ath11k_err(ab,
+ "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
+ srng->ring_id, srng->u.dst_ring.tp,
+ *srng->u.dst_ring.hp_addr,
+ srng->u.dst_ring.cached_hp,
+ srng->u.dst_ring.last_hp,
+ jiffies_to_msecs(jiffies - srng->timestamp));
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 5b13ccdf39e4..7722822a0456 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -96,6 +96,8 @@ struct ath11k_base;
/* REO2SW(x) R0 ring configuration address */
#define HAL_REO1_GEN_ENABLE 0x00000000
+#define HAL_REO1_DEST_RING_CTRL_IX_0 0x00000004
+#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
#define HAL_REO1_RING_BASE_LSB 0x0000029c
@@ -529,6 +531,8 @@ struct hal_srng {
*/
u32 hwreg_base[HAL_SRNG_NUM_REG_GRP];
+ u64 timestamp;
+
/* Source or Destination ring */
enum hal_srng_dir ring_dir;
@@ -554,6 +558,9 @@ struct hal_srng {
/* max transfer size */
u16 max_buffer_length;
+
+ /* head pointer at access end */
+ u32 last_hp;
} dst_ring;
struct {
@@ -577,6 +584,9 @@ struct hal_srng {
/* Low threshold - in number of ring entries */
u32 low_threshold;
+
+ /* tail pointer at access end */
+ u32 last_tp;
} src_ring;
} u;
};
@@ -717,6 +727,14 @@ enum hal_ce_desc {
HAL_CE_DESC_DST_STATUS,
};
+#define HAL_HASH_ROUTING_RING_TCL 0
+#define HAL_HASH_ROUTING_RING_SW1 1
+#define HAL_HASH_ROUTING_RING_SW2 2
+#define HAL_HASH_ROUTING_RING_SW3 3
+#define HAL_HASH_ROUTING_RING_SW4 4
+#define HAL_HASH_ROUTING_RING_REL 5
+#define HAL_HASH_ROUTING_RING_FW 6
+
struct hal_reo_status_header {
u16 cmd_num;
enum hal_reo_cmd_status cmd_status;
@@ -847,10 +865,10 @@ struct ath11k_hal {
u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
- u32 start_seqtype);
+ u32 start_seq, enum hal_pn_type type);
void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
struct hal_srng *srng);
-void ath11k_hal_reo_hw_setup(struct ath11k_base *ab);
+void ath11k_hal_reo_hw_setup(struct ath11k_base *ab, u32 ring_hash_map);
void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
struct hal_wbm_idle_scatter_list *sbuf,
u32 nsbufs, u32 tot_link_desc,
@@ -893,5 +911,6 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
struct hal_srng_params *params);
int ath11k_hal_srng_init(struct ath11k_base *ath11k);
void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
+void ath11k_hal_dump_srng_stats(struct ath11k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index 9e0f8064e427..f277c9434a25 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -694,7 +694,7 @@ u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
}
void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
- u32 start_seq)
+ u32 start_seq, enum hal_pn_type type)
{
struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr;
struct hal_rx_reo_queue_ext *ext_desc;
@@ -723,6 +723,18 @@ void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE,
ba_window_size - 1);
+ switch (type) {
+ case HAL_PN_TYPE_NONE:
+ case HAL_PN_TYPE_WAPI_EVEN:
+ case HAL_PN_TYPE_WAPI_UNEVEN:
+ break;
+ case HAL_PN_TYPE_WPA:
+ qdesc->info0 |=
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_CHECK, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_SIZE,
+ HAL_RX_REO_QUEUE_PN_SIZE_48);
+ break;
+ }
/* TODO: Set Ignore ampdu flags based on BA window size and/or
* AMPDU capabilities
@@ -787,7 +799,7 @@ void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
}
}
-void ath11k_hal_reo_hw_setup(struct ath11k_base *ab)
+void ath11k_hal_reo_hw_setup(struct ath11k_base *ab, u32 ring_hash_map)
{
u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
u32 val;
@@ -809,6 +821,19 @@ void ath11k_hal_reo_hw_setup(struct ath11k_base *ab)
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_ahb_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3,
HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_ahb_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
}
static enum hal_rx_mon_status
@@ -1001,6 +1026,7 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
}
ppdu_info->nss = nsts + 1;
+ ppdu_info->dcm = dcm;
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
break;
}
@@ -1038,9 +1064,15 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
break;
}
case HAL_PHYRX_HE_SIG_B1_MU: {
- /* TODO: Check if resource unit(RU) allocation stats
- * are required
- */
+ struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu =
+ (struct hal_rx_he_sig_b1_mu_info *)tlv_data;
+ u16 ru_tones;
+
+ info0 = __le32_to_cpu(he_sig_b1_mu->info0);
+
+ ru_tones = FIELD_GET(HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION,
+ info0);
+ ppdu_info->ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
break;
}
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
index bb022c781c48..e863e4abfcc1 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -99,6 +99,8 @@ struct hal_rx_mon_ppdu_info {
u8 beamformed;
u8 rssi_comb;
u8 tid;
+ u8 dcm;
+ u8 ru_alloc;
u8 reception_type;
u64 rx_duration;
};
@@ -325,6 +327,34 @@ enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,
struct sk_buff *skb);
+
+static inline u32 ath11k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
+{
+ u32 ret = 0;
+
+ switch (ru_tones) {
+ case RU_26:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ case RU_52:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ break;
+ case RU_106:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ break;
+ case RU_242:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ break;
+ case RU_484:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ break;
+ case RU_996:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ }
+ return ret;
+}
+
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0 0xDDBEEF
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1 0xADBEEF
#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index dd39333ec0ea..9973477ae373 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -62,6 +62,7 @@
#define TARGET_RX_BATCHMODE 1
#define ATH11K_HW_MAX_QUEUES 4
+#define ATH11K_QUEUE_LEN 4096
#define ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK 0x4
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 6640662f5ede..9f8bc19cc5ae 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -178,6 +178,22 @@ u8 ath11k_mac_bw_to_mac80211_bw(u8 bw)
return ret;
}
+enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw)
+{
+ switch (bw) {
+ case RATE_INFO_BW_20:
+ return ATH11K_BW_20;
+ case RATE_INFO_BW_40:
+ return ATH11K_BW_40;
+ case RATE_INFO_BW_80:
+ return ATH11K_BW_80;
+ case RATE_INFO_BW_160:
+ return ATH11K_BW_160;
+ default:
+ return ATH11K_BW_20;
+ }
+}
+
int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
u16 *rate)
{
@@ -369,8 +385,10 @@ struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id)
flags,
ath11k_get_arvif_iter,
&arvif_iter);
- if (!arvif_iter.arvif)
+ if (!arvif_iter.arvif) {
+ ath11k_warn(ar->ab, "No VIF found for vdev %d\n", vdev_id);
return NULL;
+ }
return arvif_iter.arvif;
}
@@ -398,14 +416,12 @@ struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id)
{
int i;
struct ath11k_pdev *pdev;
- struct ath11k_vif *arvif;
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->ar) {
- arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id);
- if (arvif)
- return arvif->ar;
+ if (pdev->ar->allocated_vdev_map & (1LL << vdev_id))
+ return pdev->ar;
}
}
@@ -1940,6 +1956,31 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
&info->he_obss_pd);
+ if (changed & BSS_CHANGED_HE_BSS_COLOR) {
+ if (vif->type == NL80211_IFTYPE_AP) {
+ ret = ath11k_wmi_send_obss_color_collision_cfg_cmd(
+ ar, arvif->vdev_id, info->he_bss_color.color,
+ ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS,
+ !info->he_bss_color.disabled);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ } else if (vif->type == NL80211_IFTYPE_STATION) {
+ ret = ath11k_wmi_send_bss_color_change_enable_cmd(ar,
+ arvif->vdev_id,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to enable bss color change on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ ret = ath11k_wmi_send_obss_color_collision_cfg_cmd(
+ ar, arvif->vdev_id, 0,
+ ATH11K_BSS_COLOR_COLLISION_DETECTION_STA_PERIOD_MS, 1);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -2309,6 +2350,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
const u8 *peer_addr;
int ret = 0;
u32 flags = 0;
@@ -2366,15 +2408,53 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto exit;
}
+ ret = ath11k_dp_peer_rx_pn_replay_config(arvif, peer_addr, cmd, key);
+ if (ret) {
+ ath11k_warn(ab, "failed to offload PN replay detection %d\n", ret);
+ goto exit;
+ }
+
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
- if (peer && cmd == SET_KEY)
+ if (peer && cmd == SET_KEY) {
peer->keys[key->keyidx] = key;
- else if (peer && cmd == DISABLE_KEY)
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ peer->ucast_keyidx = key->keyidx;
+ peer->sec_type = ath11k_dp_tx_get_encrypt_type(key->cipher);
+ } else {
+ peer->mcast_keyidx = key->keyidx;
+ peer->sec_type_grp = ath11k_dp_tx_get_encrypt_type(key->cipher);
+ }
+ } else if (peer && cmd == DISABLE_KEY) {
peer->keys[key->keyidx] = NULL;
- else if (!peer)
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ peer->ucast_keyidx = 0;
+ else
+ peer->mcast_keyidx = 0;
+ } else if (!peer)
/* impossible unless FW goes crazy */
ath11k_warn(ab, "peer %pM disappeared!\n", peer_addr);
+
+ if (sta) {
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (cmd == SET_KEY)
+ arsta->pn_type = HAL_PN_TYPE_WPA;
+ else
+ arsta->pn_type = HAL_PN_TYPE_NONE;
+ break;
+ default:
+ arsta->pn_type = HAL_PN_TYPE_NONE;
+ break;
+ }
+ }
+
spin_unlock_bh(&ab->base_lock);
exit:
@@ -2786,6 +2866,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k_peer *peer;
int ret = 0;
/* cancel must be done outside the mutex to avoid deadlock */
@@ -2818,6 +2899,17 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
sta->addr, arvif->vdev_id);
ath11k_mac_dec_num_stations(arvif, sta);
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer && peer->sta == sta) {
+ ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ peer->sta = NULL;
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
kfree(arsta->tx_stats);
arsta->tx_stats = NULL;
@@ -3874,6 +3966,7 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
ar->num_started_vdevs = 0;
ar->num_created_vdevs = 0;
ar->num_peers = 0;
+ ar->allocated_vdev_map = 0;
/* Configure monitor status ring with default rx_filter to get rx status
* such as rssi, rx_duration.
@@ -3885,6 +3978,9 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
goto err;
}
+ /* Configure the hash seed for hash based reo dest ring selection */
+ ath11k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id);
+
mutex_unlock(&ar->conf_mutex);
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
@@ -4112,8 +4208,9 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
}
ar->num_created_vdevs++;
-
+ ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
ab->free_vdev_map &= ~(1LL << arvif->vdev_id);
+
spin_lock_bh(&ar->data_lock);
list_add(&arvif->list, &ar->arvifs);
spin_unlock_bh(&ar->data_lock);
@@ -4227,6 +4324,7 @@ err_peer_del:
err_vdev_del:
ath11k_wmi_vdev_delete(ar, arvif->vdev_id);
ar->num_created_vdevs--;
+ ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
ab->free_vdev_map |= 1LL << arvif->vdev_id;
spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
@@ -4263,7 +4361,6 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
ath11k_dbg(ab, ATH11K_DBG_MAC, "mac remove interface (vdev %d)\n",
arvif->vdev_id);
- ab->free_vdev_map |= 1LL << (arvif->vdev_id);
spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
spin_unlock_bh(&ar->data_lock);
@@ -4281,6 +4378,8 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
ar->num_created_vdevs--;
+ ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
+ ab->free_vdev_map |= 1LL << (arvif->vdev_id);
ath11k_peer_cleanup(ar, arvif->vdev_id);
@@ -5699,6 +5798,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER);
ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(ar->hw, USES_RSS);
}
ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
@@ -5730,6 +5830,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
ar->hw->queues = ATH11K_HW_MAX_QUEUES;
+ ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN;
ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
@@ -5873,6 +5974,8 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
init_completion(&ar->bss_survey_done);
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
+ init_completion(&ar->thermal.wmi_sync);
+
INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work);
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index f286531cdd78..0607479774a9 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -144,4 +144,6 @@ void ath11k_mac_drain_tx(struct ath11k *ar);
void ath11k_mac_peer_cleanup_all(struct ath11k *ar);
int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
u8 ath11k_mac_bw_to_mac80211_bw(u8 bw);
+enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw);
+enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 4bf1dfa498b6..f43deacc01bd 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -228,6 +228,9 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
peer->sta = sta;
arvif->ast_hash = peer->ast_hash;
+ peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
+ peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
+
ar->num_peers++;
spin_unlock_bh(&ar->ab->base_lock);
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
index 9a40d1f6ccd9..ccca1523a6ea 100644
--- a/drivers/net/wireless/ath/ath11k/peer.h
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -17,6 +17,15 @@ struct ath11k_peer {
/* protected by ab->data_lock */
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
struct dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
+
+ /* Info used in MMIC verification of
+ * RX fragments
+ */
+ struct crypto_shash *tfm_mmic;
+ u8 mcast_keyidx;
+ u8 ucast_keyidx;
+ u16 sec_type;
+ u16 sec_type_grp;
};
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 2377895a58ec..c00a99ad8dbc 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -2365,6 +2365,7 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
break;
case ATH11K_QMI_EVENT_FW_READY:
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
+ ath11k_hal_dump_srng_stats(ab);
queue_work(ab->workqueue, &ab->restart_work);
break;
}
diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
index a5aff801f17f..1c4264637a41 100644
--- a/drivers/net/wireless/ath/ath11k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
@@ -342,7 +342,7 @@ struct rx_attention {
#define RX_MPDU_START_INFO0_PROTO_VER_ERR BIT(12)
#define RX_MPDU_START_INFO0_AST_LOOKUP_VALID BIT(13)
-#define RX_MPDU_START_INFO1_MPDU_CTRL_VALID BIT(0)
+#define RX_MPDU_START_INFO1_MPDU_FCTRL_VALID BIT(0)
#define RX_MPDU_START_INFO1_MPDU_DUR_VALID BIT(1)
#define RX_MPDU_START_INFO1_MAC_ADDR1_VALID BIT(2)
#define RX_MPDU_START_INFO1_MAC_ADDR2_VALID BIT(3)
@@ -1209,4 +1209,12 @@ struct hal_rx_desc {
u8 msdu_payload[0];
} __packed;
+#define HAL_RX_RU_ALLOC_TYPE_MAX 6
+#define RU_26 1
+#define RU_52 2
+#define RU_106 4
+#define RU_242 9
+#define RU_484 18
+#define RU_996 37
+
#endif /* ATH11K_RX_DESC_H */
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
new file mode 100644
index 000000000000..259dddbda2c7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include "core.h"
+#include "debug.h"
+
+static int
+ath11k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = ATH11K_THERMAL_THROTTLE_MAX;
+
+ return 0;
+}
+
+static int
+ath11k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct ath11k *ar = cdev->devdata;
+
+ mutex_lock(&ar->conf_mutex);
+ *state = ar->thermal.throttle_state;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int
+ath11k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long throttle_state)
+{
+ struct ath11k *ar = cdev->devdata;
+ int ret;
+
+ if (throttle_state > ATH11K_THERMAL_THROTTLE_MAX) {
+ ath11k_warn(ar->ab, "throttle state %ld is exceeding the limit %d\n",
+ throttle_state, ATH11K_THERMAL_THROTTLE_MAX);
+ return -EINVAL;
+ }
+ mutex_lock(&ar->conf_mutex);
+ ret = ath11k_thermal_set_throttling(ar, throttle_state);
+ if (ret == 0)
+ ar->thermal.throttle_state = throttle_state;
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static struct thermal_cooling_device_ops ath11k_thermal_ops = {
+ .get_max_state = ath11k_thermal_get_max_throttle_state,
+ .get_cur_state = ath11k_thermal_get_cur_throttle_state,
+ .set_cur_state = ath11k_thermal_set_cur_throttle_state,
+};
+
+static ssize_t ath11k_thermal_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ath11k *ar = dev_get_drvdata(dev);
+ int ret, temperature;
+ unsigned long time_left;
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* Can't get temperature when the card is off */
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ reinit_completion(&ar->thermal.wmi_sync);
+ ret = ath11k_wmi_send_pdev_temperature_cmd(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to read temperature %d\n", ret);
+ goto out;
+ }
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) {
+ ret = -ESHUTDOWN;
+ goto out;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
+ ATH11K_THERMAL_SYNC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath11k_warn(ar->ab, "failed to synchronize thermal read\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ temperature = ar->thermal.temperature;
+ spin_unlock_bh(&ar->data_lock);
+
+ /* display in millidegree celcius */
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+void ath11k_thermal_event_temperature(struct ath11k *ar, int temperature)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->thermal.temperature = temperature;
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->thermal.wmi_sync);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, ath11k_thermal_show_temp,
+ NULL, 0);
+
+static struct attribute *ath11k_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ath11k_hwmon);
+
+int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state)
+{
+ struct ath11k_base *sc = ar->ab;
+ struct thermal_mitigation_params param;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON)
+ return 0;
+
+ memset(&param, 0, sizeof(param));
+ param.pdev_id = ar->pdev->pdev_id;
+ param.enable = throttle_state ? 1 : 0;
+ param.dc = ATH11K_THERMAL_DEFAULT_DUTY_CYCLE;
+ param.dc_per_event = 0xFFFFFFFF;
+
+ param.levelconf[0].tmplwm = ATH11K_THERMAL_TEMP_LOW_MARK;
+ param.levelconf[0].tmphwm = ATH11K_THERMAL_TEMP_HIGH_MARK;
+ param.levelconf[0].dcoffpercent = throttle_state;
+ param.levelconf[0].priority = 0; /* disable all data tx queues */
+
+ ret = ath11k_wmi_send_thermal_mitigation_param_cmd(ar, &param);
+ if (ret) {
+ ath11k_warn(sc, "failed to send thermal mitigation duty cycle %u ret %d\n",
+ throttle_state, ret);
+ }
+
+ return ret;
+}
+
+int ath11k_thermal_register(struct ath11k_base *sc)
+{
+ struct thermal_cooling_device *cdev;
+ struct device *hwmon_dev;
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i, ret;
+
+ for (i = 0; i < sc->num_radios; i++) {
+ pdev = &sc->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ cdev = thermal_cooling_device_register("ath11k_thermal", ar,
+ &ath11k_thermal_ops);
+
+ if (IS_ERR(cdev)) {
+ ath11k_err(sc, "failed to setup thermal device result: %ld\n",
+ PTR_ERR(cdev));
+ return -EINVAL;
+ }
+
+ ret = sysfs_create_link(&ar->hw->wiphy->dev.kobj, &cdev->device.kobj,
+ "cooling_device");
+ if (ret) {
+ ath11k_err(sc, "failed to create cooling device symlink\n");
+ goto err_thermal_destroy;
+ }
+
+ ar->thermal.cdev = cdev;
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return 0;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&ar->hw->wiphy->dev,
+ "ath11k_hwmon", ar,
+ ath11k_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ ath11k_err(ar->ab, "failed to register hwmon device: %ld\n",
+ PTR_ERR(hwmon_dev));
+ ret = -EINVAL;
+ goto err_thermal_destroy;
+ }
+ }
+
+ return 0;
+
+err_thermal_destroy:
+ ath11k_thermal_unregister(sc);
+ return ret;
+}
+
+void ath11k_thermal_unregister(struct ath11k_base *sc)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < sc->num_radios; i++) {
+ pdev = &sc->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ sysfs_remove_link(&ar->hw->wiphy->dev.kobj, "cooling_device");
+ thermal_cooling_device_unregister(ar->thermal.cdev);
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/thermal.h b/drivers/net/wireless/ath/ath11k/thermal.h
new file mode 100644
index 000000000000..459b8d49c184
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/thermal.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ATH11K_THERMAL_
+#define _ATH11K_THERMAL_
+
+#define ATH11K_THERMAL_TEMP_LOW_MARK -100
+#define ATH11K_THERMAL_TEMP_HIGH_MARK 150
+#define ATH11K_THERMAL_THROTTLE_MAX 100
+#define ATH11K_THERMAL_DEFAULT_DUTY_CYCLE 100
+#define ATH11K_HWMON_NAME_LEN 15
+#define ATH11K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ)
+
+struct ath11k_thermal {
+ struct thermal_cooling_device *cdev;
+ struct completion wmi_sync;
+
+ /* protected by conf_mutex */
+ u32 throttle_state;
+ /* temperature value in Celcius degree
+ * protected by data_lock
+ */
+ int temperature;
+};
+
+#if IS_REACHABLE(CONFIG_THERMAL)
+int ath11k_thermal_register(struct ath11k_base *sc);
+void ath11k_thermal_unregister(struct ath11k_base *sc);
+int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state);
+void ath11k_thermal_event_temperature(struct ath11k *ar, int temperature);
+#else
+static inline int ath11k_thermal_register(struct ath11k_base *sc)
+{
+ return 0;
+}
+
+static inline void ath11k_thermal_unregister(struct ath11k *ar)
+{
+}
+
+static inline int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state)
+{
+}
+
+static inline void ath11k_thermal_event_temperature(struct ath11k *ar,
+ int temperature)
+{
+}
+
+#endif
+#endif /* _ATH11K_THERMAL_ */
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index a9b301ceb24b..e7ce36966d6a 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -1471,6 +1471,34 @@ int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
return ret;
}
+int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k *ar)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_get_pdev_temperature_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_GET_TEMPERATURE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
+
+ return ret;
+}
+
int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
u32 vdev_id, u32 bcn_ctrl_op)
{
@@ -2442,6 +2470,70 @@ out:
return ret;
}
+int
+ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
+ struct thermal_mitigation_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_therm_throt_config_request_cmd *cmd;
+ struct wmi_therm_throt_level_config_info *lvl_conf;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int i, ret, len;
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE +
+ THERMAL_LEVELS * sizeof(struct wmi_therm_throt_level_config_info);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_therm_throt_config_request_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_CONFIG_REQUEST) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = ar->pdev->pdev_id;
+ cmd->enable = param->enable;
+ cmd->dc = param->dc;
+ cmd->dc_per_event = param->dc_per_event;
+ cmd->therm_throt_levels = THERMAL_LEVELS;
+
+ tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN,
+ (THERMAL_LEVELS *
+ sizeof(struct wmi_therm_throt_level_config_info)));
+
+ lvl_conf = (struct wmi_therm_throt_level_config_info *)(skb->data +
+ sizeof(*cmd) +
+ TLV_HDR_SIZE);
+ for (i = 0; i < THERMAL_LEVELS; i++) {
+ lvl_conf->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*lvl_conf) - TLV_HDR_SIZE);
+
+ lvl_conf->temp_lwm = param->levelconf[i].tmplwm;
+ lvl_conf->temp_hwm = param->levelconf[i].tmphwm;
+ lvl_conf->dc_off_percent = param->levelconf[i].dcoffpercent;
+ lvl_conf->prio = param->levelconf[i].priority;
+ lvl_conf++;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_THERM_THROT_SET_CONF_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send THERM_THROT_SET_CONF cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI vdev set thermal throt pdev_id %d enable %d dc %d dc_per_event %x levels %d\n",
+ ar->pdev->pdev_id, param->enable, param->dc,
+ param->dc_per_event, THERMAL_LEVELS);
+
+ return ret;
+}
+
int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
@@ -2615,6 +2707,84 @@ ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
return ret;
}
+int
+ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
+ u8 bss_color, u32 period,
+ bool enable)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_obss_color_collision_cfg_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->evt_type = enable ? ATH11K_OBSS_COLOR_COLLISION_DETECTION :
+ ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE;
+ cmd->current_bss_color = bss_color;
+ cmd->detection_period_ms = period;
+ cmd->scan_period_ms = ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS;
+ cmd->free_slot_expiry_time_ms = 0;
+ cmd->flags = 0;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
+ cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
+ cmd->detection_period_ms, cmd->scan_period_ms);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
+ bool enable)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_bss_color_change_enable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BSS_COLOR_CHANGE_ENABLE) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->enable = enable ? 1 : 0;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi_send_bss_color_change_enable id %d enable %d\n",
+ cmd->vdev_id, cmd->enable);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_TWT_DIeABLE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
static void
ath11k_fill_band_to_mac_param(struct ath11k_base *soc,
struct wmi_host_pdev_band_to_mac *band_to_mac)
@@ -2825,6 +2995,41 @@ static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi,
return ret;
}
+int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar,
+ int pdev_id)
+{
+ struct ath11k_wmi_pdev_lro_config_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath11k_wmi_pdev_lro_config_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_LRO_INFO_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ get_random_bytes(cmd->th_4, sizeof(uint32_t) * ATH11K_IPV4_TH_SEED_SIZE);
+ get_random_bytes(cmd->th_6, sizeof(uint32_t) * ATH11K_IPV6_TH_SEED_SIZE);
+
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send lro cfg req wmi cmd\n");
+ goto err;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab)
{
unsigned long time_left;
@@ -4168,6 +4373,31 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
return 0;
}
+static int
+ath11k_pull_pdev_temp_ev(struct ath11k_base *ab, u8 *evt_buf,
+ u32 len, const struct wmi_pdev_temperature_event *ev)
+{
+ const void **tb;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev temp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ kfree(tb);
+ return 0;
+}
+
size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head)
{
struct ath11k_fw_stats_vdev *i;
@@ -5345,15 +5575,18 @@ static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff
"peer assoc conf ev vdev id %d macaddr %pM\n",
peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
+ rcu_read_lock();
ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
peer_assoc_conf.vdev_id);
+ rcu_read_unlock();
return;
}
complete(&ar->peer_assoc_done);
+ rcu_read_unlock();
}
static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
@@ -5511,6 +5744,30 @@ exit:
kfree(tb);
}
+static void
+ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k *ar;
+ struct wmi_pdev_temperature_event ev = {0};
+
+ if (ath11k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
+ ath11k_warn(ab, "failed to extract pdev temperature event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev.pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
+ return;
+ }
+
+ ath11k_thermal_event_temperature(ar, ev.temp);
+}
+
static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -5588,6 +5845,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb);
break;
+ case WMI_PDEV_TEMPERATURE_EVENTID:
+ ath11k_wmi_pdev_temperature_event(ab, skb);
+ break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_VDEV_DELETE_RESP_EVENTID:
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 1fde15c762ad..510f9c6bc1d7 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -442,6 +442,10 @@ enum wmi_tlv_cmd_id {
WMI_DBGLOG_TIME_STAMP_SYNC_CMDID,
WMI_SET_MULTIPLE_MCAST_FILTER_CMDID,
WMI_READ_DATA_FROM_FLASH_CMDID,
+ WMI_THERM_THROT_SET_CONF_CMDID,
+ WMI_RUNTIME_DPD_RECAL_CMDID,
+ WMI_GET_TPC_POWER_CMDID,
+ WMI_IDLE_TRIGGER_MONITOR_CMDID,
WMI_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_GPIO),
WMI_GPIO_OUTPUT_CMDID,
WMI_TXBF_CMDID,
@@ -484,6 +488,7 @@ enum wmi_tlv_cmd_id {
WMI_SAR_LIMITS_CMDID,
WMI_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_OBSS_OFL),
WMI_OBSS_SCAN_DISABLE_CMDID,
+ WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID,
WMI_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_LPI),
WMI_LPI_START_SCAN_CMDID,
WMI_LPI_STOP_SCAN_CMDID,
@@ -3300,6 +3305,12 @@ struct wmi_request_stats_cmd {
u32 pdev_id;
} __packed;
+struct wmi_get_pdev_temperature_cmd {
+ u32 tlv_header;
+ u32 param;
+ u32 pdev_id;
+} __packed;
+
#define WMI_BEACON_TX_BUFFER_SIZE 512
struct wmi_bcn_tmpl_cmd {
@@ -3605,6 +3616,39 @@ struct wmi_init_country_cmd {
} cc_info;
} __packed;
+#define THERMAL_LEVELS 1
+struct tt_level_config {
+ u32 tmplwm;
+ u32 tmphwm;
+ u32 dcoffpercent;
+ u32 priority;
+};
+
+struct thermal_mitigation_params {
+ u32 pdev_id;
+ u32 enable;
+ u32 dc;
+ u32 dc_per_event;
+ struct tt_level_config levelconf[THERMAL_LEVELS];
+};
+
+struct wmi_therm_throt_config_request_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+ u32 dc;
+ u32 dc_per_event;
+ u32 therm_throt_levels;
+} __packed;
+
+struct wmi_therm_throt_level_config_info {
+ u32 tlv_header;
+ u32 temp_lwm;
+ u32 temp_hwm;
+ u32 dc_off_percent;
+ u32 prio;
+} __packed;
+
struct wmi_pdev_pktlog_filter_info {
u32 tlv_header;
struct wmi_mac_addr peer_macaddr;
@@ -4095,6 +4139,12 @@ struct wmi_pdev_radar_ev {
s32 sidx;
} __packed;
+struct wmi_pdev_temperature_event {
+ /* temperature value in Celcius degree */
+ s32 temp;
+ u32 pdev_id;
+} __packed;
+
#define WMI_RX_STATUS_OK 0x00
#define WMI_RX_STATUS_ERR_CRC 0x01
#define WMI_RX_STATUS_ERR_DECRYPT 0x08
@@ -4571,6 +4621,42 @@ struct wmi_obss_spatial_reuse_params_cmd {
u32 vdev_id;
} __packed;
+#define ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS 200
+#define ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE 0
+#define ATH11K_OBSS_COLOR_COLLISION_DETECTION 1
+
+#define ATH11K_BSS_COLOR_COLLISION_DETECTION_STA_PERIOD_MS 10000
+#define ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS 5000
+
+struct wmi_obss_color_collision_cfg_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 flags;
+ u32 evt_type;
+ u32 current_bss_color;
+ u32 detection_period_ms;
+ u32 scan_period_ms;
+ u32 free_slot_expiry_time_ms;
+} __packed;
+
+struct wmi_bss_color_change_enable_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 enable;
+} __packed;
+
+#define ATH11K_IPV4_TH_SEED_SIZE 5
+#define ATH11K_IPV6_TH_SEED_SIZE 11
+
+struct ath11k_wmi_pdev_lro_config_cmd {
+ u32 tlv_header;
+ u32 lro_enable;
+ u32 res;
+ u32 th_4[ATH11K_IPV4_TH_SEED_SIZE];
+ u32 th_6[ATH11K_IPV6_TH_SEED_SIZE];
+ u32 pdev_id;
+} __packed;
+
struct target_resource_config {
u32 num_vdevs;
u32 num_peers;
@@ -4726,6 +4812,7 @@ int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
enum wmi_bss_chan_info_req_type type);
int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
struct stats_request_params *param);
+int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k *ar);
int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar,
u8 peer_addr[ETH_ALEN],
struct peer_flush_params *param);
@@ -4740,6 +4827,9 @@ int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
int
ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
struct wmi_init_country_params init_cc_param);
+int
+ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
+ struct thermal_mitigation_params *param);
int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter);
int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar);
int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable);
@@ -4761,4 +4851,10 @@ int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id);
int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
struct ieee80211_he_obss_pd *he_obss_pd);
+int ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
+ u8 bss_color, u32 period,
+ bool enable);
+int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
+ bool enable);
+int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar, int pdev_id);
#endif
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index 802f8f87773a..96010d4b00e7 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -54,7 +54,7 @@ config ATH5K_TRACER
config ATH5K_AHB
bool "Atheros 5xxx AHB bus support"
- depends on ATH25
+ depends on ATH25 && ATH5K
---help---
This adds support for WiSoC type chipsets of the 5xxx Atheros
family.
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 94f70047d3fc..2eaba1ccab20 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -201,35 +201,35 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
u64 tsf;
v = ath5k_hw_reg_read(ah, AR5K_BEACON);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
"AR5K_BEACON", v, v & AR5K_BEACON_PERIOD,
(v & AR5K_BEACON_TIM) >> AR5K_BEACON_TIM_S);
- len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n",
"AR5K_LAST_TSTP", ath5k_hw_reg_read(ah, AR5K_LAST_TSTP));
- len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n\n",
"AR5K_BEACON_CNT", ath5k_hw_reg_read(ah, AR5K_BEACON_CNT));
v = ath5k_hw_reg_read(ah, AR5K_TIMER0);
- len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER0 (TBTT)", v, v);
v = ath5k_hw_reg_read(ah, AR5K_TIMER1);
- len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER1 (DMA)", v, v >> 3);
v = ath5k_hw_reg_read(ah, AR5K_TIMER2);
- len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER2 (SWBA)", v, v >> 3);
v = ath5k_hw_reg_read(ah, AR5K_TIMER3);
- len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER3 (ATIM)", v, v);
tsf = ath5k_hw_get_tsf64(ah);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"TSF\t\t0x%016llx\tTU: %08x\n",
(unsigned long long)tsf, TSF_TO_TU(tsf));
@@ -320,16 +320,16 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
unsigned int len = 0;
unsigned int i;
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
for (i = 0; i < ARRAY_SIZE(dbg_info) - 1; i++) {
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"%10s %c 0x%08x - %s\n", dbg_info[i].name,
ah->debug.level & dbg_info[i].level ? '+' : ' ',
dbg_info[i].level, dbg_info[i].desc);
}
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"%10s %c 0x%08x - %s\n", dbg_info[i].name,
ah->debug.level == dbg_info[i].level ? '+' : ' ',
dbg_info[i].level, dbg_info[i].desc);
@@ -383,60 +383,60 @@ static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
unsigned int i;
unsigned int v;
- len += snprintf(buf + len, sizeof(buf) - len, "antenna mode\t%d\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "antenna mode\t%d\n",
ah->ah_ant_mode);
- len += snprintf(buf + len, sizeof(buf) - len, "default antenna\t%d\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "default antenna\t%d\n",
ah->ah_def_ant);
- len += snprintf(buf + len, sizeof(buf) - len, "tx antenna\t%d\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "tx antenna\t%d\n",
ah->ah_tx_ant);
- len += snprintf(buf + len, sizeof(buf) - len, "\nANTENNA\t\tRX\tTX\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "\nANTENNA\t\tRX\tTX\n");
for (i = 1; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"[antenna %d]\t%d\t%d\n",
i, ah->stats.antenna_rx[i], ah->stats.antenna_tx[i]);
}
- len += snprintf(buf + len, sizeof(buf) - len, "[invalid]\t%d\t%d\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "[invalid]\t%d\t%d\n",
ah->stats.antenna_rx[0], ah->stats.antenna_tx[0]);
v = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v);
v = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n",
(v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_DESC_ANTENNA\t%d\n",
(v & AR5K_STA_ID1_DESC_ANTENNA) != 0);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_RTS_DEF_ANTENNA\t%d\n",
(v & AR5K_STA_ID1_RTS_DEF_ANTENNA) != 0);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n",
(v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0);
v = ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n",
(v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0);
v = ath5k_hw_reg_read(ah, AR5K_PHY_RESTART);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_PHY_RESTART_DIV_GC\t\t%x\n",
(v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S);
v = ath5k_hw_reg_read(ah, AR5K_PHY_FAST_ANT_DIV);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
(v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_0);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"\nAR5K_PHY_ANT_SWITCH_TABLE_0\t0x%08x\n", v);
v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_1);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_PHY_ANT_SWITCH_TABLE_1\t0x%08x\n", v);
if (len > sizeof(buf))
@@ -495,36 +495,36 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
unsigned int len = 0;
u32 filt = ath5k_hw_get_rx_filter(ah);
- len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
ah->bssidmask);
- len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
+ len += scnprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
filt);
if (filt & AR5K_RX_FILTER_UCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " UCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " UCAST");
if (filt & AR5K_RX_FILTER_MCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " MCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " MCAST");
if (filt & AR5K_RX_FILTER_BCAST)
- len += snprintf(buf + len, sizeof(buf) - len, " BCAST");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BCAST");
if (filt & AR5K_RX_FILTER_CONTROL)
- len += snprintf(buf + len, sizeof(buf) - len, " CONTROL");
+ len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL");
if (filt & AR5K_RX_FILTER_BEACON)
- len += snprintf(buf + len, sizeof(buf) - len, " BEACON");
+ len += scnprintf(buf + len, sizeof(buf) - len, " BEACON");
if (filt & AR5K_RX_FILTER_PROM)
- len += snprintf(buf + len, sizeof(buf) - len, " PROM");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PROM");
if (filt & AR5K_RX_FILTER_XRPOLL)
- len += snprintf(buf + len, sizeof(buf) - len, " XRPOLL");
+ len += scnprintf(buf + len, sizeof(buf) - len, " XRPOLL");
if (filt & AR5K_RX_FILTER_PROBEREQ)
- len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
if (filt & AR5K_RX_FILTER_PHYERR_5212)
- len += snprintf(buf + len, sizeof(buf) - len, " PHYERR-5212");
+ len += scnprintf(buf + len, sizeof(buf) - len, " PHYERR-5212");
if (filt & AR5K_RX_FILTER_RADARERR_5212)
- len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5212");
+ len += scnprintf(buf + len, sizeof(buf) - len, " RADARERR-5212");
if (filt & AR5K_RX_FILTER_PHYERR_5211)
snprintf(buf + len, sizeof(buf) - len, " PHYERR-5211");
if (filt & AR5K_RX_FILTER_RADARERR_5211)
- len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5211");
+ len += scnprintf(buf + len, sizeof(buf) - len, " RADARERR-5211");
- len += snprintf(buf + len, sizeof(buf) - len, "\nopmode: %s (%d)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "\nopmode: %s (%d)\n",
ath_opmode_to_string(ah->opmode), ah->opmode);
if (len > sizeof(buf))
@@ -551,65 +551,65 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
unsigned int len = 0;
int i;
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"RX\n---------------------\n");
- len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
st->rxerr_crc,
st->rx_all_count > 0 ?
st->rxerr_crc * 100 / st->rx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "PHY\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "PHY\t%u\t(%u%%)\n",
st->rxerr_phy,
st->rx_all_count > 0 ?
st->rxerr_phy * 100 / st->rx_all_count : 0);
for (i = 0; i < 32; i++) {
if (st->rxerr_phy_code[i])
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
" phy_err[%u]\t%u\n",
i, st->rxerr_phy_code[i]);
}
- len += snprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n",
st->rxerr_fifo,
st->rx_all_count > 0 ?
st->rxerr_fifo * 100 / st->rx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "decrypt\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "decrypt\t%u\t(%u%%)\n",
st->rxerr_decrypt,
st->rx_all_count > 0 ?
st->rxerr_decrypt * 100 / st->rx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "MIC\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "MIC\t%u\t(%u%%)\n",
st->rxerr_mic,
st->rx_all_count > 0 ?
st->rxerr_mic * 100 / st->rx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "process\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "process\t%u\t(%u%%)\n",
st->rxerr_proc,
st->rx_all_count > 0 ?
st->rxerr_proc * 100 / st->rx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "jumbo\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "jumbo\t%u\t(%u%%)\n",
st->rxerr_jumbo,
st->rx_all_count > 0 ?
st->rxerr_jumbo * 100 / st->rx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "[RX all\t%u]\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "[RX all\t%u]\n",
st->rx_all_count);
- len += snprintf(buf + len, sizeof(buf) - len, "RX-all-bytes\t%u\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "RX-all-bytes\t%u\n",
st->rx_bytes_count);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"\nTX\n---------------------\n");
- len += snprintf(buf + len, sizeof(buf) - len, "retry\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "retry\t%u\t(%u%%)\n",
st->txerr_retry,
st->tx_all_count > 0 ?
st->txerr_retry * 100 / st->tx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n",
st->txerr_fifo,
st->tx_all_count > 0 ?
st->txerr_fifo * 100 / st->tx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "filter\t%u\t(%u%%)\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "filter\t%u\t(%u%%)\n",
st->txerr_filt,
st->tx_all_count > 0 ?
st->txerr_filt * 100 / st->tx_all_count : 0);
- len += snprintf(buf + len, sizeof(buf) - len, "[TX all\t%u]\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "[TX all\t%u]\n",
st->tx_all_count);
- len += snprintf(buf + len, sizeof(buf) - len, "TX-all-bytes\t%u\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "TX-all-bytes\t%u\n",
st->tx_bytes_count);
if (len > sizeof(buf))
@@ -670,56 +670,56 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
char buf[700];
unsigned int len = 0;
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"HW has PHY error counters:\t%s\n",
ah->ah_capabilities.cap_has_phyerr_counters ?
"yes" : "no");
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"HW max spur immunity level:\t%d\n",
as->max_spur_level);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"\nANI state\n--------------------------------------------\n");
- len += snprintf(buf + len, sizeof(buf) - len, "operating mode:\t\t\t");
+ len += scnprintf(buf + len, sizeof(buf) - len, "operating mode:\t\t\t");
switch (as->ani_mode) {
case ATH5K_ANI_MODE_OFF:
- len += snprintf(buf + len, sizeof(buf) - len, "OFF\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "OFF\n");
break;
case ATH5K_ANI_MODE_MANUAL_LOW:
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"MANUAL LOW\n");
break;
case ATH5K_ANI_MODE_MANUAL_HIGH:
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"MANUAL HIGH\n");
break;
case ATH5K_ANI_MODE_AUTO:
- len += snprintf(buf + len, sizeof(buf) - len, "AUTO\n");
+ len += scnprintf(buf + len, sizeof(buf) - len, "AUTO\n");
break;
default:
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"??? (not good)\n");
break;
}
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"noise immunity level:\t\t%d\n",
as->noise_imm_level);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"spur immunity level:\t\t%d\n",
as->spur_level);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"firstep level:\t\t\t%d\n",
as->firstep_level);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"OFDM weak signal detection:\t%s\n",
as->ofdm_weak_sig ? "on" : "off");
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"CCK weak signal detection:\t%s\n",
as->cck_weak_sig ? "on" : "off");
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"\nMIB INTERRUPTS:\t\t%u\n",
st->mib_intr);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"beacon RSSI average:\t%d\n",
(int)ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg));
@@ -728,35 +728,35 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
_struct.cycles > 0 ? \
_struct._field * 100 / _struct.cycles : 0
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"profcnt tx\t\t%u\t(%d%%)\n",
CC_PRINT(as->last_cc, tx_frame));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"profcnt rx\t\t%u\t(%d%%)\n",
CC_PRINT(as->last_cc, rx_frame));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"profcnt busy\t\t%u\t(%d%%)\n",
CC_PRINT(as->last_cc, rx_busy));
#undef CC_PRINT
- len += snprintf(buf + len, sizeof(buf) - len, "profcnt cycles\t\t%u\n",
+ len += scnprintf(buf + len, sizeof(buf) - len, "profcnt cycles\t\t%u\n",
as->last_cc.cycles);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"listen time\t\t%d\tlast: %d\n",
as->listen_time, as->last_listen);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"OFDM errors\t\t%u\tlast: %u\tsum: %u\n",
as->ofdm_errors, as->last_ofdm_errors,
as->sum_ofdm_errors);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"CCK errors\t\t%u\tlast: %u\tsum: %u\n",
as->cck_errors, as->last_cck_errors,
as->sum_cck_errors);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_PHYERR_CNT1\t%x\t(=%d)\n",
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1),
ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1)));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_PHYERR_CNT2\t%x\t(=%d)\n",
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2),
ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
@@ -836,13 +836,13 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
struct ath5k_buf *bf, *bf0;
int i, n;
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"available txbuffers: %d\n", ah->txbuf_len);
for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
txq = &ah->txqs[i];
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
"%02d: %ssetup\n", i, txq->setup ? "" : "not ");
if (!txq->setup)
@@ -854,9 +854,9 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
n++;
spin_unlock_bh(&txq->lock);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
" len: %d bufs: %d\n", txq->txq_len, n);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += scnprintf(buf + len, sizeof(buf) - len,
" stuck: %d\n", txq->txq_stuck);
}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 0548aa3702e3..457e9b0d21ca 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1196,6 +1196,9 @@ static void ath9k_tpc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
int *power = data;
+ if (vif->bss_conf.txpower == INT_MIN)
+ return;
+
if (*power < vif->bss_conf.txpower)
*power = vif->bss_conf.txpower;
}
@@ -1457,6 +1460,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
}
+ if (changed & IEEE80211_CONF_CHANGE_POWER)
+ ath9k_set_txpower(sc, NULL);
+
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 31e7b108279c..e60d4737fc6e 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2095,10 +2095,13 @@ static void setup_frame_info(struct ieee80211_hw *hw,
if (tx_info->control.vif) {
struct ieee80211_vif *vif = tx_info->control.vif;
-
+ if (vif->bss_conf.txpower == INT_MIN)
+ goto nonvifpower;
txpower = 2 * vif->bss_conf.txpower;
} else {
- struct ath_softc *sc = hw->priv;
+ struct ath_softc *sc;
+ nonvifpower:
+ sc = hw->priv;
txpower = sc->cur_chan->cur_txpower;
}
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
index a9b6dc17e408..19009aafc4e1 100644
--- a/drivers/net/wireless/ath/carl9170/debug.c
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -45,7 +45,7 @@
#include "cmd.h"
#define ADD(buf, off, max, fmt, args...) \
- off += snprintf(&buf[off], max - off, fmt, ##args);
+ off += scnprintf(&buf[off], max - off, fmt, ##args);
struct carl9170_debugfs_fops {
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index fef10886ca4a..e481674485c2 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -95,6 +95,7 @@ out_bad:
}
static const struct ethtool_ops wil_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = cfg80211_get_drvinfo,
.get_coalesce = wil_ethtoolops_get_coalesce,
.set_coalesce = wil_ethtoolops_set_coalesce,
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.h b/drivers/net/wireless/atmel/at76c50x-usb.h
index f56863403b05..746e64dfd8aa 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.h
+++ b/drivers/net/wireless/atmel/at76c50x-usb.h
@@ -151,7 +151,7 @@ struct at76_command {
u8 cmd;
u8 reserved;
__le16 size;
- u8 data[0];
+ u8 data[];
} __packed;
/* Length of Atmel-specific Rx header before 802.11 frame */
diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c
index 1325727a74ed..dc1819ca52ac 100644
--- a/drivers/net/wireless/broadcom/b43/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43/debugfs.c
@@ -51,7 +51,7 @@ struct b43_dfs_file *fops_to_dfs_file(struct b43_wldev *dev,
#define fappend(fmt, x...) \
do { \
if (bufsize - count) \
- count += snprintf(buf + count, \
+ count += scnprintf(buf + count, \
bufsize - count, \
fmt , ##x); \
else \
diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
index 082aab8353b8..fa133dfb2ecb 100644
--- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c
+++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c
@@ -54,7 +54,7 @@ struct b43legacy_dfs_file * fops_to_dfs_file(struct b43legacy_wldev *dev,
#define fappend(fmt, x...) \
do { \
if (bufsize - count) \
- count += snprintf(buf + count, \
+ count += scnprintf(buf + count, \
bufsize - count, \
fmt , ##x); \
else \
diff --git a/drivers/net/wireless/broadcom/b43legacy/sysfs.c b/drivers/net/wireless/broadcom/b43legacy/sysfs.c
index 9312c1dd3417..eec087ca30e6 100644
--- a/drivers/net/wireless/broadcom/b43legacy/sysfs.c
+++ b/drivers/net/wireless/broadcom/b43legacy/sysfs.c
@@ -25,13 +25,15 @@
static int get_integer(const char *buf, size_t count)
{
char tmp[10 + 1] = { 0 };
- int ret = -EINVAL;
+ int ret = -EINVAL, res;
if (count == 0)
goto out;
count = min_t(size_t, count, 10);
memcpy(tmp, buf, count);
- ret = simple_strtol(tmp, NULL, 10);
+ ret = kstrtoint(tmp, 10, &res);
+ if (!ret)
+ return res;
out:
return ret;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index a2328d3eee03..2ba165330038 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2953,7 +2953,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) {
bphy_err(drvr, "Bss info is larger than buffer. Discarding\n");
- return 0;
+ return -EINVAL;
}
if (!bi->ctl_ch) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 282d0bc14e8e..a3a257089696 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -723,6 +723,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
return 0x200000;
case BRCM_CC_4359_CHIP_ID:
return (ci->pub.chiprev < 9) ? 0x180000 : 0x160000;
+ case BRCM_CC_4364_CHIP_ID:
case CY_CC_4373_CHIP_ID:
return 0x160000;
default:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index 3347439543bb..46c66415b4a6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -60,7 +60,7 @@ struct brcmf_fw_request {
u16 bus_nr;
u32 n_items;
const char *board_type;
- struct brcmf_fw_item items[0];
+ struct brcmf_fw_item items[];
};
struct brcmf_fw_name {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index 79c8a858b6d6..a5cced2c89ac 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -32,7 +32,7 @@ struct brcmf_fweh_queue_item {
u8 ifaddr[ETH_ALEN];
struct brcmf_event_msg_be emsg;
u32 datalen;
- u8 data[0];
+ u8 data[];
};
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 5e1a11c07551..8cc52935fd41 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -404,7 +404,7 @@ struct brcmf_fws_mac_descriptor {
u8 traffic_lastreported_bmp;
};
-#define BRCMF_FWS_HANGER_MAXITEMS 1024
+#define BRCMF_FWS_HANGER_MAXITEMS 3072
/**
* enum brcmf_fws_hanger_item_state - state of hanger item.
@@ -2145,8 +2145,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb);
brcmf_fws_schedule_deq(fws);
} else {
- bphy_err(drvr, "drop skb: no hanger slot\n");
- brcmf_txfinalize(ifp, skb, false);
+ bphy_err(drvr, "no hanger slot available\n");
rc = -ENOMEM;
}
brcmf_fws_unlock(fws);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 5105f62767fb..39381cbde89e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -52,6 +52,7 @@ BRCMF_FW_DEF(4356, "brcmfmac4356-pcie");
BRCMF_FW_DEF(43570, "brcmfmac43570-pcie");
BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
+BRCMF_FW_DEF(4364, "brcmfmac4364-pcie");
BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie");
BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
@@ -70,6 +71,7 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
+ BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFFF, 4364),
BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
@@ -2105,6 +2107,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index f9047db6a11d..3a08252f1a53 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -1938,6 +1938,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
+ brcmf_sdio_rxfail(bus, true, true);
+ sdio_release_host(bus->sdiodev->func1);
brcmu_pkt_buf_free_skb(pkt);
continue;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 575ed19e9195..ac5463838fcf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -164,7 +164,6 @@ struct brcmf_usbdev_info {
struct urb *bulk_urb; /* used for FW download */
- bool wowl_enabled;
struct brcmf_mp_device *settings;
};
@@ -312,27 +311,43 @@ static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
int err = 0;
int timeout = 0;
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+ struct usb_interface *intf = to_usb_interface(dev);
brcmf_dbg(USB, "Enter\n");
- if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
- return -EIO;
- if (test_and_set_bit(0, &devinfo->ctl_op))
- return -EIO;
+ err = usb_autopm_get_interface(intf);
+ if (err)
+ goto out;
+
+ if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
+ err = -EIO;
+ goto fail;
+ }
+
+ if (test_and_set_bit(0, &devinfo->ctl_op)) {
+ err = -EIO;
+ goto fail;
+ }
devinfo->ctl_completed = false;
err = brcmf_usb_send_ctl(devinfo, buf, len);
if (err) {
brcmf_err("fail %d bytes: %d\n", err, len);
clear_bit(0, &devinfo->ctl_op);
- return err;
+ goto fail;
}
timeout = brcmf_usb_ioctl_resp_wait(devinfo);
- clear_bit(0, &devinfo->ctl_op);
if (!timeout) {
brcmf_err("Txctl wait timed out\n");
+ usb_kill_urb(devinfo->ctl_urb);
err = -EIO;
+ goto fail;
}
+ clear_bit(0, &devinfo->ctl_op);
+
+fail:
+ usb_autopm_put_interface(intf);
+out:
return err;
}
@@ -341,32 +356,46 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
int err = 0;
int timeout = 0;
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+ struct usb_interface *intf = to_usb_interface(dev);
brcmf_dbg(USB, "Enter\n");
- if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
- return -EIO;
- if (test_and_set_bit(0, &devinfo->ctl_op))
- return -EIO;
+ err = usb_autopm_get_interface(intf);
+ if (err)
+ goto out;
+
+ if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
+ err = -EIO;
+ goto fail;
+ }
+
+ if (test_and_set_bit(0, &devinfo->ctl_op)) {
+ err = -EIO;
+ goto fail;
+ }
devinfo->ctl_completed = false;
err = brcmf_usb_recv_ctl(devinfo, buf, len);
if (err) {
brcmf_err("fail %d bytes: %d\n", err, len);
clear_bit(0, &devinfo->ctl_op);
- return err;
+ goto fail;
}
timeout = brcmf_usb_ioctl_resp_wait(devinfo);
err = devinfo->ctl_urb_status;
- clear_bit(0, &devinfo->ctl_op);
if (!timeout) {
brcmf_err("rxctl wait timed out\n");
+ usb_kill_urb(devinfo->ctl_urb);
err = -EIO;
+ goto fail;
}
+ clear_bit(0, &devinfo->ctl_op);
+fail:
+ usb_autopm_put_interface(intf);
if (!err)
return devinfo->ctl_urb_actual_length;
- else
- return err;
+out:
+ return err;
}
static struct brcmf_usbreq *brcmf_usb_deq(struct brcmf_usbdev_info *devinfo,
@@ -500,10 +529,12 @@ static void brcmf_usb_rx_complete(struct urb *urb)
return;
}
- if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
+ if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP ||
+ devinfo->bus_pub.state == BRCMFMAC_USB_STATE_SLEEP) {
skb_put(skb, urb->actual_length);
brcmf_rx_frame(devinfo->dev, skb, true);
brcmf_usb_rx_refill(devinfo, req);
+ usb_mark_last_busy(urb->dev);
} else {
brcmu_pkt_buf_free_skb(skb);
brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL);
@@ -587,6 +618,11 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
struct brcmf_usbreq *req;
int ret;
unsigned long flags;
+ struct usb_interface *intf = to_usb_interface(dev);
+
+ ret = usb_autopm_get_interface(intf);
+ if (ret)
+ goto out;
brcmf_dbg(USB, "Enter, skb=%p\n", skb);
if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
@@ -625,9 +661,10 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
devinfo->tx_flowblock = true;
}
spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
- return 0;
fail:
+ usb_autopm_put_interface(intf);
+out:
return ret;
}
@@ -991,20 +1028,32 @@ static int
brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
{
int err;
+ struct usb_interface *intf;
brcmf_dbg(USB, "Enter\n");
- if (devinfo == NULL)
- return -ENODEV;
+ if (!devinfo) {
+ err = -ENODEV;
+ goto out;
+ }
if (!devinfo->image) {
brcmf_err("No firmware!\n");
- return -ENOENT;
+ err = -ENOENT;
+ goto out;
}
+ intf = to_usb_interface(devinfo->dev);
+ err = usb_autopm_get_interface(intf);
+ if (err)
+ goto out;
+
err = brcmf_usb_dlstart(devinfo,
(u8 *)devinfo->image, devinfo->image_len);
if (err == 0)
err = brcmf_usb_dlrun(devinfo);
+
+ usb_autopm_put_interface(intf);
+out:
return err;
}
@@ -1105,18 +1154,6 @@ error:
return NULL;
}
-static void brcmf_usb_wowl_config(struct device *dev, bool enabled)
-{
- struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
-
- brcmf_dbg(USB, "Configuring WOWL, enabled=%d\n", enabled);
- devinfo->wowl_enabled = enabled;
- if (enabled)
- device_set_wakeup_enable(devinfo->dev, true);
- else
- device_set_wakeup_enable(devinfo->dev, false);
-}
-
static
int brcmf_usb_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
{
@@ -1143,7 +1180,6 @@ static const struct brcmf_bus_ops brcmf_usb_bus_ops = {
.txdata = brcmf_usb_tx,
.txctl = brcmf_usb_tx_ctlpkt,
.rxctl = brcmf_usb_rx_ctlpkt,
- .wowl_config = brcmf_usb_wowl_config,
.get_fwname = brcmf_usb_get_fwname,
};
@@ -1332,6 +1368,8 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
usb_set_intfdata(intf, devinfo);
+ intf->needs_remote_wakeup = 1;
+
/* Check that the device supports only one configuration */
if (usb->descriptor.bNumConfigurations != 1) {
brcmf_err("Number of configurations: %d not supported\n",
@@ -1445,12 +1483,8 @@ static int brcmf_usb_suspend(struct usb_interface *intf, pm_message_t state)
brcmf_dbg(USB, "Enter\n");
devinfo->bus_pub.state = BRCMFMAC_USB_STATE_SLEEP;
- if (devinfo->wowl_enabled) {
- brcmf_cancel_all_urbs(devinfo);
- } else {
- brcmf_detach(&usb->dev);
- brcmf_free(&usb->dev);
- }
+ brcmf_cancel_all_urbs(devinfo);
+ device_set_wakeup_enable(devinfo->dev, true);
return 0;
}
@@ -1463,22 +1497,10 @@ static int brcmf_usb_resume(struct usb_interface *intf)
struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
brcmf_dbg(USB, "Enter\n");
- if (!devinfo->wowl_enabled) {
- int err;
-
- err = brcmf_alloc(&usb->dev, devinfo->settings);
- if (err)
- return err;
-
- err = brcmf_attach(devinfo->dev);
- if (err) {
- brcmf_free(devinfo->dev);
- return err;
- }
- }
devinfo->bus_pub.state = BRCMFMAC_USB_STATE_UP;
brcmf_usb_rx_fill_all(devinfo);
+ device_set_wakeup_enable(devinfo->dev, false);
return 0;
}
@@ -1535,6 +1557,7 @@ static struct usb_driver brcmf_usbdrvr = {
.suspend = brcmf_usb_suspend,
.resume = brcmf_usb_resume,
.reset_resume = brcmf_usb_reset_resume,
+ .supports_autosuspend = true,
.disable_hub_initiated_lpm = 1,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index d1037b6ef2d6..c6c4be05159d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -44,6 +44,7 @@
#define BRCM_CC_4358_CHIP_ID 0x4358
#define BRCM_CC_4359_CHIP_ID 0x4359
#define BRCM_CC_43602_CHIP_ID 43602
+#define BRCM_CC_4364_CHIP_ID 0x4364
#define BRCM_CC_4365_CHIP_ID 0x4365
#define BRCM_CC_4366_CHIP_ID 0x4366
#define BRCM_CC_43664_CHIP_ID 43664
@@ -74,6 +75,7 @@
#define BRCM_PCIE_43602_2G_DEVICE_ID 0x43bb
#define BRCM_PCIE_43602_5G_DEVICE_ID 0x43bc
#define BRCM_PCIE_43602_RAW_DEVICE_ID 43602
+#define BRCM_PCIE_4364_DEVICE_ID 0x4464
#define BRCM_PCIE_4365_DEVICE_ID 0x43ca
#define BRCM_PCIE_4365_2G_DEVICE_ID 0x43cb
#define BRCM_PCIE_4365_5G_DEVICE_ID 0x43cc
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 5dfcce77d094..97ea6e2035e6 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -629,30 +629,30 @@ static char *snprint_line(char *buf, size_t count,
int out, i, j, l;
char c;
- out = snprintf(buf, count, "%08X", ofs);
+ out = scnprintf(buf, count, "%08X", ofs);
for (l = 0, i = 0; i < 2; i++) {
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
for (j = 0; j < 8 && l < len; j++, l++)
- out += snprintf(buf + out, count - out, "%02X ",
+ out += scnprintf(buf + out, count - out, "%02X ",
data[(i * 8 + j)]);
for (; j < 8; j++)
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
}
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
for (l = 0, i = 0; i < 2; i++) {
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
for (j = 0; j < 8 && l < len; j++, l++) {
c = data[(i * 8 + j)];
if (!isascii(c) || !isprint(c))
c = '.';
- out += snprintf(buf + out, count - out, "%c", c);
+ out += scnprintf(buf + out, count - out, "%c", c);
}
for (; j < 8; j++)
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
}
return buf;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 5ef6f87a48ac..60b5e08dd6df 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -223,30 +223,30 @@ static int snprint_line(char *buf, size_t count,
int out, i, j, l;
char c;
- out = snprintf(buf, count, "%08X", ofs);
+ out = scnprintf(buf, count, "%08X", ofs);
for (l = 0, i = 0; i < 2; i++) {
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
for (j = 0; j < 8 && l < len; j++, l++)
- out += snprintf(buf + out, count - out, "%02X ",
+ out += scnprintf(buf + out, count - out, "%02X ",
data[(i * 8 + j)]);
for (; j < 8; j++)
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
}
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
for (l = 0, i = 0; i < 2; i++) {
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
for (j = 0; j < 8 && l < len; j++, l++) {
c = data[(i * 8 + j)];
if (!isascii(c) || !isprint(c))
c = '.';
- out += snprintf(buf + out, count - out, "%c", c);
+ out += scnprintf(buf + out, count - out, "%c", c);
}
for (; j < 8; j++)
- out += snprintf(buf + out, count - out, " ");
+ out += scnprintf(buf + out, count - out, " ");
}
return out;
@@ -1279,12 +1279,12 @@ static ssize_t show_event_log(struct device *d,
log_len = log_size / sizeof(*log);
ipw_capture_event_log(priv, log_len, log);
- len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
for (i = 0; i < log_len; i++)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"\n%08X%08X%08X",
log[i].time, log[i].event, log[i].data);
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
kfree(log);
return len;
}
@@ -1298,13 +1298,13 @@ static ssize_t show_error(struct device *d,
u32 len = 0, i;
if (!priv->error)
return 0;
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"%08lX%08X%08X%08X",
priv->error->jiffies,
priv->error->status,
priv->error->config, priv->error->elem_len);
for (i = 0; i < priv->error->elem_len; i++)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"\n%08X%08X%08X%08X%08X%08X%08X",
priv->error->elem[i].time,
priv->error->elem[i].desc,
@@ -1314,15 +1314,15 @@ static ssize_t show_error(struct device *d,
priv->error->elem[i].link2,
priv->error->elem[i].data);
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"\n%08X", priv->error->log_len);
for (i = 0; i < priv->error->log_len; i++)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"\n%08X%08X%08X",
priv->error->log[i].time,
priv->error->log[i].event,
priv->error->log[i].data);
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
@@ -1350,7 +1350,7 @@ static ssize_t show_cmd_log(struct device *d,
(i != priv->cmdlog_pos) && (len < PAGE_SIZE);
i = (i + 1) % priv->cmdlog_len) {
len +=
- snprintf(buf + len, PAGE_SIZE - len,
+ scnprintf(buf + len, PAGE_SIZE - len,
"\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
priv->cmdlog[i].cmd.len);
@@ -1358,9 +1358,9 @@ static ssize_t show_cmd_log(struct device *d,
snprintk_buf(buf + len, PAGE_SIZE - len,
(u8 *) priv->cmdlog[i].cmd.param,
priv->cmdlog[i].cmd.len);
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
}
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
@@ -9608,24 +9608,24 @@ static int ipw_wx_get_powermode(struct net_device *dev,
int level = IPW_POWER_LEVEL(priv->power_mode);
char *p = extra;
- p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
+ p += scnprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
switch (level) {
case IPW_POWER_AC:
- p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
+ p += scnprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
break;
case IPW_POWER_BATTERY:
- p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
+ p += scnprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
break;
default:
- p += snprintf(p, MAX_WX_STRING - (p - extra),
+ p += scnprintf(p, MAX_WX_STRING - (p - extra),
"(Timeout %dms, Period %dms)",
timeout_duration[level - 1] / 1000,
period_duration[level - 1] / 1000);
}
if (!(priv->power_mode & IPW_POWER_ENABLED))
- p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
+ p += scnprintf(p, MAX_WX_STRING - (p - extra), " OFF");
wrqu->data.length = p - extra + 1;
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
index 0cb36d1b983a..5a2a723e480b 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
@@ -1156,7 +1156,7 @@ static int libipw_parse_info_param(struct libipw_info_element
for (i = 0; i < network->rates_len; i++) {
network->rates[i] = info_element->data[i];
#ifdef CONFIG_LIBIPW_DEBUG
- p += snprintf(p, sizeof(rates_str) -
+ p += scnprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
network->rates[i]);
#endif
@@ -1183,7 +1183,7 @@ static int libipw_parse_info_param(struct libipw_info_element
for (i = 0; i < network->rates_ex_len; i++) {
network->rates_ex[i] = info_element->data[i];
#ifdef CONFIG_LIBIPW_DEBUG
- p += snprintf(p, sizeof(rates_str) -
+ p += scnprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
network->rates_ex[i]);
#endif
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
index 3d558b47168b..a0cf78c418ac 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
@@ -213,7 +213,7 @@ static char *libipw_translate_scan(struct libipw_device *ieee,
* for given network. */
iwe.cmd = IWEVCUSTOM;
p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
" Last beacon: %ums ago",
elapsed_jiffies_msecs(network->last_scanned));
iwe.u.data.length = p - custom;
@@ -223,18 +223,18 @@ static char *libipw_translate_scan(struct libipw_device *ieee,
/* Add spectrum management information */
iwe.cmd = -1;
p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Channel flags: ");
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), " Channel flags: ");
if (libipw_get_channel_flags(ieee, network->channel) &
LIBIPW_CH_INVALID) {
iwe.cmd = IWEVCUSTOM;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "INVALID ");
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), "INVALID ");
}
if (libipw_get_channel_flags(ieee, network->channel) &
LIBIPW_CH_RADAR_DETECT) {
iwe.cmd = IWEVCUSTOM;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "DFS ");
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), "DFS ");
}
if (iwe.cmd == IWEVCUSTOM) {
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 206b43b9dff8..9167c3d2711d 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -228,9 +228,7 @@ il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
static int
il3945_remove_static_key(struct il_priv *il)
{
- int ret = -EOPNOTSUPP;
-
- return ret;
+ return -EOPNOTSUPP;
}
static int
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index 34d0579132ce..fc8fa5818de7 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -416,7 +416,6 @@ il4965_set_ucode_ptrs(struct il_priv *il)
{
dma_addr_t pinst;
dma_addr_t pdata;
- int ret = 0;
/* bits 35:4 for 4965 */
pinst = il->ucode_code.p_addr >> 4;
@@ -433,7 +432,7 @@ il4965_set_ucode_ptrs(struct il_priv *il)
il->ucode_code.len | BSM_DRAM_INST_LOAD);
D_INFO("Runtime uCode pointers are set.\n");
- return ret;
+ return 0;
}
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 355af47c5f73..bc49cdd819df 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -57,7 +57,7 @@
#include "iwl-prph.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 52
+#define IWL_22000_UCODE_API_MAX 53
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -76,10 +76,8 @@
#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
-#define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
-#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0-"
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
#define IWL_QU_C_JF_B_FW_PRE "iwlwifi-Qu-c0-jf-b0-"
@@ -98,14 +96,10 @@
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_JF_MODULE_FIRMWARE(api) \
IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
- IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(api) \
IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
- IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QUZ_A_JF_B_MODULE_FIRMWARE(api) \
@@ -235,6 +229,38 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
}, \
}
+const struct iwl_cfg_trans_params iwl_qu_trans_cfg = {
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .device_family = IWL_DEVICE_FAMILY_22000,
+ .base_params = &iwl_22000_base_params,
+ .integrated = true,
+ .xtal_latency = 5000,
+};
+
+const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .device_family = IWL_DEVICE_FAMILY_22000,
+ .base_params = &iwl_22000_base_params,
+ .integrated = true,
+ .xtal_latency = 12000,
+ .low_latency_xtal = true,
+};
+
+const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .device_family = IWL_DEVICE_FAMILY_22000,
+ .base_params = &iwl_22000_base_params,
+};
+
/*
* If the device doesn't support HE, no need to have that many buffers.
* 22000 devices can split multiple frames into a single RB, so fewer are
@@ -246,6 +272,64 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
#define IWL_NUM_RBDS_22000_HE 2048
#define IWL_NUM_RBDS_AX210_HE 4096
+/*
+ * All JF radio modules are part of the 9000 series, but the MAC part
+ * looks more like 22000. That's why this device is here, but called
+ * 9560 nevertheless.
+ */
+const struct iwl_cfg iwl9560_qu_b0_jf_b0_cfg = {
+ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
+};
+
+const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg = {
+ .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
+};
+
+const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg = {
+ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
+};
+
+const struct iwl_cfg iwl9560_qnj_b0_jf_b0_cfg = {
+ .fw_name_pre = IWL_QNJ_B_JF_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ .num_rbds = IWL_NUM_RBDS_NON_HE,
+};
+
+const struct iwl_cfg_trans_params iwl_ax200_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_22000,
+ .base_params = &iwl_22000_base_params,
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .bisr_workaround = 1,
+};
+
+const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
+
+const char iwl_ax200_killer_1650w_name[] =
+ "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)";
+const char iwl_ax200_killer_1650x_name[] =
+ "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)";
+
const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
.name = "Intel(R) Wi-Fi 6 AX101",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
@@ -354,35 +438,6 @@ const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
};
const struct iwl_cfg iwl_ax200_cfg_cc = {
- .name = "Intel(R) Wi-Fi 6 AX200 160MHz",
- .fw_name_pre = IWL_CC_A_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .trans.bisr_workaround = 1,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg killer1650x_2ax_cfg = {
- .name = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)",
- .fw_name_pre = IWL_CC_A_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .trans.bisr_workaround = 1,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg killer1650w_2ax_cfg = {
- .name = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)",
.fw_name_pre = IWL_CC_A_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -391,188 +446,9 @@ const struct iwl_cfg killer1650w_2ax_cfg = {
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .trans.bisr_workaround = 1,
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
-/*
- * All JF radio modules are part of the 9000 series, but the MAC part
- * looks more like 22000. That's why this device is here, but called
- * 9560 nevertheless.
- */
-const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9461",
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9462",
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9560",
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9461_2ac_cfg_qu_c0_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9461",
- .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9462_2ac_cfg_qu_c0_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9462",
- .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_2ac_cfg_qu_c0_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9560",
- .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_2ac_160_cfg_qu_c0_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL_QNJ_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .integrated = true,
- .soc_latency = 5000,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .integrated = true,
- .soc_latency = 5000,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc = {
- .name = "Intel(R) Dual Band Wireless AC 9461",
- .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .integrated = true,
- .soc_latency = 5000,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc = {
- .name = "Intel(R) Dual Band Wireless AC 9462",
- .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .integrated = true,
- .soc_latency = 5000,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc = {
- .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .integrated = true,
- .soc_latency = 5000,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc = {
- .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .integrated = true,
- .soc_latency = 5000,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
- .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
-const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
- .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- .num_rbds = IWL_NUM_RBDS_NON_HE,
-};
-
const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
.name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
@@ -625,32 +501,6 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
-const struct iwl_cfg iwl22000_2ax_cfg_jf = {
- .name = "Intel(R) Dual Band Wireless AX 22000",
- .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
-const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
- .name = "Intel(R) Dual Band Wireless AX 22000",
- .fw_name_pre = IWL_22000_HR_A_F0_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_B_FW_PRE,
@@ -664,19 +514,6 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
.num_rbds = IWL_NUM_RBDS_22000_HE,
};
-const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
- .name = "Intel(R) Dual Band Wireless AX 22000",
- .fw_name_pre = IWL_22000_HR_A0_FW_PRE,
- IWL_DEVICE_22500,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
- .num_rbds = IWL_NUM_RBDS_22000_HE,
-};
-
const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_22000_SO_A_JF_B_FW_PRE,
@@ -725,9 +562,7 @@ const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = {
MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index 379ea788e424..f84b8e5d3f0b 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -123,8 +123,6 @@ static const struct iwl_tt_params iwl9000_tt_params = {
#define IWL_DEVICE_9000 \
.ucode_api_max = IWL9000_UCODE_API_MAX, \
.ucode_api_min = IWL9000_UCODE_API_MIN, \
- .trans.device_family = IWL_DEVICE_FAMILY_9000, \
- .trans.base_params = &iwl9000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = 10, \
.non_shared_ant = ANT_B, \
@@ -137,11 +135,9 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
.thermal_params = &iwl9000_tt_params, \
.apmg_not_supported = true, \
- .trans.mq_rx_supported = true, \
.num_rbds = 512, \
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
- .trans.rf_id = true, \
.nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true, \
.min_umac_error_event_table = 0x800000, \
@@ -178,173 +174,54 @@ const struct iwl_cfg_trans_params iwl9000_trans_cfg = {
.rf_id = true,
};
-const struct iwl_cfg iwl9160_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 9160",
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
-};
-
-const struct iwl_cfg iwl9260_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 9260",
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
-};
-
-const char iwl9260_160_name[] = "Intel(R) Wireless-AC 9260 160MHz";
-const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz";
-
-const struct iwl_cfg iwl9260_2ac_160_cfg = {
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
-};
-
-const struct iwl_cfg iwl9260_killer_2ac_cfg = {
- .name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)",
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
-};
-
-const struct iwl_cfg iwl9270_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 9270",
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
-};
-
-const struct iwl_cfg iwl9460_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 9460",
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
-};
-
-const struct iwl_cfg iwl9460_2ac_cfg_soc = {
- .name = "Intel(R) Dual Band Wireless AC 9460",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
+const struct iwl_cfg_trans_params iwl9560_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_9000,
+ .base_params = &iwl9000_base_params,
+ .mq_rx_supported = true,
+ .rf_id = true,
.integrated = true,
- .soc_latency = 5000,
+ .xtal_latency = 5000,
};
-const struct iwl_cfg iwl9461_2ac_cfg_soc = {
- .name = "Intel(R) Dual Band Wireless AC 9461",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
+const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_9000,
+ .base_params = &iwl9000_base_params,
+ .mq_rx_supported = true,
+ .rf_id = true,
.integrated = true,
- .soc_latency = 5000,
+ .xtal_latency = 5000,
+ .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
};
-const struct iwl_cfg iwl9462_2ac_cfg_soc = {
- .name = "Intel(R) Dual Band Wireless AC 9462",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
-};
+const char iwl9162_name[] = "Intel(R) Wireless-AC 9162";
+const char iwl9260_name[] = "Intel(R) Wireless-AC 9260";
+const char iwl9260_1_name[] = "Intel(R) Wireless-AC 9260-1";
+const char iwl9270_name[] = "Intel(R) Wireless-AC 9270";
+const char iwl9461_name[] = "Intel(R) Wireless-AC 9461";
+const char iwl9462_name[] = "Intel(R) Wireless-AC 9462";
+const char iwl9560_name[] = "Intel(R) Wireless-AC 9560";
+const char iwl9162_160_name[] = "Intel(R) Wireless-AC 9162 160MHz";
+const char iwl9260_160_name[] = "Intel(R) Wireless-AC 9260 160MHz";
+const char iwl9270_160_name[] = "Intel(R) Wireless-AC 9270 160MHz";
+const char iwl9461_160_name[] = "Intel(R) Wireless-AC 9461 160MHz";
+const char iwl9462_160_name[] = "Intel(R) Wireless-AC 9462 160MHz";
+const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz";
-const struct iwl_cfg iwl9560_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 9560",
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
-};
+const char iwl9260_killer_1550_name[] =
+ "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)";
+const char iwl9560_killer_1550i_name[] =
+ "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)";
+const char iwl9560_killer_1550s_name[] =
+ "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)";
-const struct iwl_cfg iwl9560_2ac_160_cfg = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
+const struct iwl_cfg iwl9260_2ac_cfg = {
.fw_name_pre = IWL9260_FW_PRE,
IWL_DEVICE_9000,
};
const struct iwl_cfg iwl9560_2ac_cfg_soc = {
- .name = "Intel(R) Dual Band Wireless AC 9560",
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
-};
-
-const struct iwl_cfg iwl9560_2ac_160_cfg_soc = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
-};
-
-const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
- .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
-};
-
-const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
- .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
-};
-
-const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
- .name = "Intel(R) Dual Band Wireless AC 9460",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
- .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
-};
-
-const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = {
- .name = "Intel(R) Dual Band Wireless AC 9461",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
- .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
-};
-
-const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = {
- .name = "Intel(R) Dual Band Wireless AC 9462",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
- .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
-};
-
-const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
- .name = "Intel(R) Dual Band Wireless AC 9560",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
- .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
-};
-
-const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk = {
- .name = "Intel(R) Wireless-AC 9560 160MHz",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
- .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
-};
-
-const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
- .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
- .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
-};
-
-const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = {
- .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .integrated = true,
- .soc_latency = 5000,
- .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
};
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 22dff2c92d4f..4f46f3ed8794 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -5,9 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2012 - 2014, 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,9 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2012 - 2014, 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -647,6 +647,11 @@ enum iwl_system_subcmd_ids {
SHARED_MEM_CFG_CMD = 0x0,
/**
+ * @SOC_CONFIGURATION_CMD: &struct iwl_soc_configuration_cmd
+ */
+ SOC_CONFIGURATION_CMD = 0x01,
+
+ /**
* @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd
*/
INIT_EXTENDED_CFG_CMD = 0x03,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index a0d6802c2715..0214e553d5ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -427,6 +427,9 @@ struct iwl_tof_range_req_ap_entry_v2 {
* Default algo type is ML.
* @IWL_INITIATOR_AP_FLAGS_MCSI_REPORT: Send the MCSI for each FTM frame to the
* driver.
+ * @IWL_INITIATOR_AP_FLAGS_NON_TB: Use non trigger based flow
+ * @IWL_INITIATOR_AP_FLAGS_TB: Use trigger based flow
+ * @IWL_INITIATOR_AP_FLAGS_SECURED: request secured measurement
*/
enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1),
@@ -436,6 +439,9 @@ enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_ALGO_LR = BIT(5),
IWL_INITIATOR_AP_FLAGS_ALGO_FFT = BIT(6),
IWL_INITIATOR_AP_FLAGS_MCSI_REPORT = BIT(8),
+ IWL_INITIATOR_AP_FLAGS_NON_TB = BIT(9),
+ IWL_INITIATOR_AP_FLAGS_TB = BIT(10),
+ IWL_INITIATOR_AP_FLAGS_SECURED = BIT(11),
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 1b2b5fa56e19..3d770f406c38 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -95,6 +95,7 @@ struct iwl_ssid_ie {
#define IWL_SCAN_MAX_BLACKLIST_LEN 64
#define IWL_SCAN_SHORT_BLACKLIST_LEN 16
#define IWL_SCAN_MAX_PROFILES 11
+#define IWL_SCAN_MAX_PROFILES_V2 8
#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512
#define SCAN_NUM_BAND_PROBE_DATA_V_1 2
#define SCAN_NUM_BAND_PROBE_DATA_V_2 3
@@ -160,8 +161,7 @@ struct iwl_scan_offload_profile {
} __packed;
/**
- * struct iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1
- * @profiles: profiles to search for match
+ * struct iwl_scan_offload_profile_cfg_data
* @blacklist_len: length of blacklist
* @num_profiles: num of profiles in the list
* @match_notify: clients waiting for match found notification
@@ -170,8 +170,7 @@ struct iwl_scan_offload_profile {
* @any_beacon_notify: clients waiting for match notification without match
* @reserved: reserved
*/
-struct iwl_scan_offload_profile_cfg {
- struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
+struct iwl_scan_offload_profile_cfg_data {
u8 blacklist_len;
u8 num_profiles;
u8 match_notify;
@@ -182,6 +181,26 @@ struct iwl_scan_offload_profile_cfg {
} __packed;
/**
+ * struct iwl_scan_offload_profile_cfg
+ * @profiles: profiles to search for match
+ * @data: the rest of the data for profile_cfg
+ */
+struct iwl_scan_offload_profile_cfg_v1 {
+ struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
+ struct iwl_scan_offload_profile_cfg_data data;
+} __packed; /* SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1-2*/
+
+/**
+ * struct iwl_scan_offload_profile_cfg
+ * @profiles: profiles to search for match
+ * @data: the rest of the data for profile_cfg
+ */
+struct iwl_scan_offload_profile_cfg {
+ struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES_V2];
+ struct iwl_scan_offload_profile_cfg_data data;
+} __packed; /* SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_3*/
+
+/**
* struct iwl_scan_schedule_lmac - schedule of scan offload
* @delay: delay between iterations, in seconds.
* @iterations: num of scan iterations
@@ -702,13 +721,16 @@ struct iwl_scan_channel_cfg_umac {
u8 channel_num;
u8 iter_count;
__le16 iter_interval;
- } v1; /* SCAN_CHANNEL_CFG_S_VER1 */
+ } v1; /* SCAN_CHANNEL_CONFIG_API_S_VER_1 */
struct {
u8 channel_num;
u8 band;
u8 iter_count;
u8 iter_interval;
- } v2; /* SCAN_CHANNEL_CFG_S_VER2 */
+ } v2; /* SCAN_CHANNEL_CONFIG_API_S_VER_2
+ * SCAN_CHANNEL_CONFIG_API_S_VER_3
+ * SCAN_CHANNEL_CONFIG_API_S_VER_4
+ */
};
} __packed;
@@ -943,6 +965,25 @@ struct iwl_scan_channel_params_v4 {
u8 adwell_ch_override_bitmap[16];
} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_4 also
SCAN_CHANNEL_PARAMS_API_S_VER_5 */
+
+/**
+ * struct iwl_scan_channel_params_v6
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @n_aps_override: override the number of APs the FW uses to calculate dwell
+ * time when adaptive dwell is used.
+ * Channel k will use n_aps_override[i] when BIT(20 + i) is set in
+ * channel_config[k].flags
+ * @channel_config: array of explicit channel configurations
+ * for 2.4Ghz and 5.2Ghz bands
+ */
+struct iwl_scan_channel_params_v6 {
+ u8 flags;
+ u8 count;
+ u8 n_aps_override[2];
+ struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3];
+} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_6 */
+
/**
* struct iwl_scan_general_params_v10
* @flags: &enum iwl_umac_scan_flags
@@ -1024,6 +1065,20 @@ struct iwl_scan_req_params_v13 {
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_13 */
/**
+ * struct iwl_scan_req_params_v14
+ * @general_params: &struct iwl_scan_general_params_v10
+ * @channel_params: &struct iwl_scan_channel_params_v6
+ * @periodic_params: &struct iwl_scan_periodic_parms_v1
+ * @probe_params: &struct iwl_scan_probe_params_v4
+ */
+struct iwl_scan_req_params_v14 {
+ struct iwl_scan_general_params_v10 general_params;
+ struct iwl_scan_channel_params_v6 channel_params;
+ struct iwl_scan_periodic_parms_v1 periodic_params;
+ struct iwl_scan_probe_params_v4 probe_params;
+} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_14 */
+
+/**
* struct iwl_scan_req_umac_v12
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
@@ -1048,6 +1103,18 @@ struct iwl_scan_req_umac_v13 {
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_13 */
/**
+ * struct iwl_scan_req_umac_v14
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @scan_params: scan parameters
+ */
+struct iwl_scan_req_umac_v14 {
+ __le32 uid;
+ __le32 ooc_priority;
+ struct iwl_scan_req_params_v14 scan_params;
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_14 */
+
+/**
* struct iwl_umac_scan_abort
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @flags: reserved
@@ -1121,7 +1188,7 @@ struct iwl_scan_offload_profiles_query_v1 {
u8 resume_while_scanning;
u8 self_recovery;
__le16 reserved;
- struct iwl_scan_offload_profile_match_v1 matches[IWL_SCAN_MAX_PROFILES];
+ struct iwl_scan_offload_profile_match_v1 matches[0];
} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
/**
@@ -1165,7 +1232,7 @@ struct iwl_scan_offload_profiles_query {
u8 resume_while_scanning;
u8 self_recovery;
__le16 reserved;
- struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
+ struct iwl_scan_offload_profile_match matches[0];
} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h b/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h
new file mode 100644
index 000000000000..aadca78e9846
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h
@@ -0,0 +1,87 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_soc_h__
+#define __iwl_fw_api_soc_h__
+
+#define SOC_CONFIG_CMD_FLAGS_DISCRETE BIT(0)
+#define SOC_CONFIG_CMD_FLAGS_LOW_LATENCY BIT(1)
+
+/**
+ * struct iwl_soc_configuration_cmd - Set device stabilization latency
+ *
+ * @flags: soc settings flags. In VER_1, we can only set the DISCRETE
+ * flag, because the FW treats the whole value as an integer. In
+ * VER_2, we can set the bits independently.
+ * @latency: time for SOC to ensure stable power & XTAL
+ */
+struct iwl_soc_configuration_cmd {
+ __le32 flags;
+ __le32 latency;
+} __packed; /*
+ * SOC_CONFIGURATION_CMD_S_VER_1 (see description above)
+ * SOC_CONFIGURATION_CMD_S_VER_2
+ */
+
+#endif /* __iwl_fw_api_soc_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 8796ab8f2a5f..14ac7153a3e7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1108,6 +1108,38 @@ static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
+static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, int idx)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ __le32 *val = range->data;
+ u32 addr = le32_to_cpu(reg->addrs[idx]) +
+ le32_to_cpu(reg->dev_addr.offset);
+ int i;
+
+ /* we shouldn't get here if the trans doesn't have read_config32 */
+ if (WARN_ON_ONCE(!trans->ops->read_config32))
+ return -EOPNOTSUPP;
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = reg->dev_addr.size;
+ for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
+ int ret;
+ u32 tmp;
+
+ ret = trans->ops->read_config32(trans, addr + i, &tmp);
+ if (ret < 0)
+ return ret;
+
+ *val++ = cpu_to_le32(tmp);
+ }
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
@@ -1701,13 +1733,7 @@ iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
- struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
- u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id), size;
-
- fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
- if (le32_to_cpu(fw_mon_cfg->buf_location) !=
- IWL_FW_INI_LOCATION_SRAM_PATH)
- return 0;
+ u32 size;
size = le32_to_cpu(reg->internal_buffer.size);
if (!size)
@@ -2048,7 +2074,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
.fill_range = iwl_dump_ini_csr_iter,
},
[IWL_FW_INI_REGION_DRAM_IMR] = {},
- [IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = {},
+ [IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = {
+ .get_num_of_ranges = iwl_dump_ini_mem_ranges,
+ .get_size = iwl_dump_ini_mem_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .fill_range = iwl_dump_ini_config_iter,
+ },
};
static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 1554f5fdd483..35f42e529a6d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -5,11 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2008 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,11 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2008 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -369,6 +365,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* is supported.
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
* @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used)
+ * @IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT: the firmware supports setting
+ * stabilization latency for SoCs.
* @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
* @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
* @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
@@ -437,6 +435,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31,
/* set 1 */
+ IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT = (__force iwl_ucode_tlv_capa_t)37,
IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38,
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39,
IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index f8c6ed823bc5..da0d90e2b537 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -69,8 +69,6 @@
#include "iwl-eeprom-parse.h"
#include "fw/acpi.h"
-#define IWL_FW_DBG_DOMAIN IWL_TRANS_FW_DBG_DOMAIN(fwrt->trans)
-
struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index be6a2bf9ce74..d5d984d7ce83 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -287,27 +287,36 @@ struct iwl_pwr_tx_backoff {
/**
* struct iwl_cfg_trans - information needed to start the trans
*
- * These values cannot be changed when multiple configs are used for a
- * single PCI ID, because they are needed before the HW REV or RFID
- * can be read.
+ * These values are specific to the device ID and do not change when
+ * multiple configs are used for a single device ID. They values are
+ * used, among other things, to boot the NIC so that the HW REV or
+ * RFID can be read before deciding the remaining parameters to use.
*
* @base_params: pointer to basic parameters
* @csr: csr flags and addresses that are different across devices
* @device_family: the device family
* @umac_prph_offset: offset to add to UMAC periphery address
+ * @xtal_latency: power up latency to get the xtal stabilized
+ * @extra_phy_cfg_flags: extra configuration flags to pass to the PHY
* @rf_id: need to read rf_id to determine the firmware image
* @use_tfh: use TFH
* @gen2: 22000 and on transport operation
* @mq_rx_supported: multi-queue rx support
+ * @integrated: discrete or integrated
+ * @low_latency_xtal: use the low latency xtal if supported
*/
struct iwl_cfg_trans_params {
const struct iwl_base_params *base_params;
enum iwl_device_family device_family;
u32 umac_prph_offset;
+ u32 xtal_latency;
+ u32 extra_phy_cfg_flags;
u32 rf_id:1,
use_tfh:1,
gen2:1,
mq_rx_supported:1,
+ integrated:1,
+ low_latency_xtal:1,
bisr_workaround:1;
};
@@ -374,7 +383,6 @@ struct iwl_fw_mon_regs {
* @smem_offset: offset from which the SMEM begins
* @smem_len: the length of SMEM
* @vht_mu_mimo_supported: VHT MU-MIMO support
- * @integrated: discrete or integrated
* @cdb: CDB support
* @nvm_type: see &enum iwl_nvm_type
* @d3_debug_data_base_addr: base address where D3 debug data is stored
@@ -413,7 +421,6 @@ struct iwl_cfg {
u32 dccm2_len;
u32 smem_offset;
u32 smem_len;
- u32 soc_latency;
u16 nvm_ver;
u16 nvm_calib_ver;
u32 rx_with_siso_diversity:1,
@@ -427,7 +434,6 @@ struct iwl_cfg {
disable_dummy_notification:1,
apmg_not_supported:1,
vht_mu_mimo_supported:1,
- integrated:1,
cdb:1,
dbgc_supported:1,
uhb_supported:1;
@@ -442,7 +448,6 @@ struct iwl_cfg {
u8 ucode_api_min;
u16 num_rbds;
u32 min_umac_error_event_table;
- u32 extra_phy_cfg_flags;
u32 d3_debug_data_base_addr;
u32 d3_debug_data_length;
u32 min_txq_size;
@@ -454,9 +459,43 @@ struct iwl_cfg {
#define IWL_CFG_ANY (~0)
+#define IWL_CFG_MAC_TYPE_PU 0x31
+#define IWL_CFG_MAC_TYPE_PNJ 0x32
+#define IWL_CFG_MAC_TYPE_TH 0x32
+#define IWL_CFG_MAC_TYPE_QU 0x33
+#define IWL_CFG_MAC_TYPE_QUZ 0x35
+#define IWL_CFG_MAC_TYPE_QNJ 0x36
+
+#define IWL_CFG_RF_TYPE_TH 0x105
+#define IWL_CFG_RF_TYPE_TH1 0x108
+#define IWL_CFG_RF_TYPE_JF2 0x105
+#define IWL_CFG_RF_TYPE_JF1 0x108
+
+#define IWL_CFG_RF_ID_TH 0x1
+#define IWL_CFG_RF_ID_TH1 0x1
+#define IWL_CFG_RF_ID_JF 0x3
+#define IWL_CFG_RF_ID_JF1 0x6
+#define IWL_CFG_RF_ID_JF1_DIV 0xA
+
+#define IWL_CFG_NO_160 0x0
+#define IWL_CFG_160 0x1
+
+#define IWL_CFG_CORES_BT 0x0
+#define IWL_CFG_CORES_BT_GNSS 0x5
+
+#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
+#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0100) >> 9)
+#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
+
struct iwl_dev_info {
u16 device;
u16 subdevice;
+ u16 mac_type;
+ u16 rf_type;
+ u8 mac_step;
+ u8 rf_id;
+ u8 no_160;
+ u8 cores;
const struct iwl_cfg *cfg;
const char *name;
};
@@ -465,8 +504,31 @@ struct iwl_dev_info {
* This list declares the config structures for all devices.
*/
extern const struct iwl_cfg_trans_params iwl9000_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl9560_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_qnj_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
+extern const char iwl9162_name[];
+extern const char iwl9260_name[];
+extern const char iwl9260_1_name[];
+extern const char iwl9270_name[];
+extern const char iwl9461_name[];
+extern const char iwl9462_name[];
+extern const char iwl9560_name[];
+extern const char iwl9162_160_name[];
extern const char iwl9260_160_name[];
+extern const char iwl9270_160_name[];
+extern const char iwl9461_160_name[];
+extern const char iwl9462_160_name[];
extern const char iwl9560_160_name[];
+extern const char iwl9260_killer_1550_name[];
+extern const char iwl9560_killer_1550i_name[];
+extern const char iwl9560_killer_1550s_name[];
+extern const char iwl_ax200_name[];
+extern const char iwl_ax200_killer_1650w_name[];
+extern const char iwl_ax200_killer_1650x_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
@@ -533,38 +595,15 @@ extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl8265_2ac_cfg;
extern const struct iwl_cfg iwl8275_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
-extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
-extern const struct iwl_cfg iwl9260_2ac_160_cfg;
-extern const struct iwl_cfg iwl9260_killer_2ac_cfg;
-extern const struct iwl_cfg iwl9270_2ac_cfg;
-extern const struct iwl_cfg iwl9460_2ac_cfg;
-extern const struct iwl_cfg iwl9560_2ac_cfg;
-extern const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
-extern const struct iwl_cfg iwl9560_2ac_160_cfg;
-extern const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
-extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
-extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
-extern const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
-extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
-extern const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
+extern const struct iwl_cfg iwl9560_qu_b0_jf_b0_cfg;
+extern const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg;
+extern const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg;
+extern const struct iwl_cfg iwl9560_qnj_b0_jf_b0_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
-extern const struct iwl_cfg iwl9560_2ac_160_cfg_soc;
-extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
-extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
-extern const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc;
-extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc;
-extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
-extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
-extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
-extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
-extern const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk;
-extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
-extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
-extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
@@ -578,22 +617,8 @@ extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0;
extern const struct iwl_cfg killer1650x_2ax_cfg;
extern const struct iwl_cfg killer1650w_2ax_cfg;
-extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
-extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0;
-extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0;
-extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0;
-extern const struct iwl_cfg iwl9461_2ac_cfg_qu_c0_jf_b0;
-extern const struct iwl_cfg iwl9462_2ac_cfg_qu_c0_jf_b0;
-extern const struct iwl_cfg iwl9560_2ac_cfg_qu_c0_jf_b0;
-extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_c0_jf_b0;
-extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0;
-extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0;
-extern const struct iwl_cfg iwl22000_2ax_cfg_jf;
-extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
-extern const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0;
-extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 4208e720f6e6..bf2f00b89214 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -236,6 +236,12 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
return -EINVAL;
}
+ if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG &&
+ !trans->ops->read_config32) {
+ IWL_ERR(trans, "WRT: Unsupported region type %u\n", type);
+ return -EOPNOTSUPP;
+ }
+
active_reg = &trans->dbg.active_regions[id];
if (*active_reg) {
IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 0481796f75bc..ff52e69c1c80 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1715,6 +1715,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
.bt_coex_active = true,
.power_level = IWL_POWER_INDEX_1,
.uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
+ .enable_ini = true,
/* the rest are 0 by default */
};
IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1837,7 +1838,7 @@ MODULE_PARM_DESC(uapsd_disable,
module_param_named(enable_ini, iwlwifi_mod_params.enable_ini,
bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(enable_ini,
- "Enable debug INI TLV FW debug infrastructure (default: 0");
+ "Enable debug INI TLV FW debug infrastructure (default: true");
/*
* set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index bab0999f002c..9e9810d2b262 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -684,7 +684,9 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
},
};
-static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
+static void iwl_init_he_hw_capab(struct iwl_trans *trans,
+ struct iwl_nvm_data *data,
+ struct ieee80211_supported_band *sband,
u8 tx_chains, u8 rx_chains)
{
sband->iftype_data = iwl_he_capa;
@@ -728,7 +730,7 @@ static void iwl_init_sbands(struct iwl_trans *trans,
tx_chains, rx_chains);
if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
- iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+ iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
sband = &data->bands[NL80211_BAND_5GHZ];
sband->band = NL80211_BAND_5GHZ;
@@ -743,7 +745,7 @@ static void iwl_init_sbands(struct iwl_trans *trans,
tx_chains, rx_chains);
if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
- iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+ iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
if (n_channels != n_used)
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 7b3b1f4c99b4..bba527b339b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -544,6 +544,8 @@ struct iwl_trans_rxq_dma_data {
* @read_mem: read device's SRAM in DWORD
* @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
* will be zeroed.
+ * @read_config32: read a u32 value from the device's config space at
+ * the given offset.
* @configure: configure parameters required by the transport layer from
* the op_mode. May be called several times before start_fw, can't be
* called after that.
@@ -614,6 +616,7 @@ struct iwl_trans_ops {
void *buf, int dwords);
int (*write_mem)(struct iwl_trans *trans, u32 addr,
const void *buf, int dwords);
+ int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
void (*configure)(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 22a32eb10f01..122ca7624073 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1709,6 +1709,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
};
int ret, len;
size_t query_len, matches_len;
+ int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw);
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
@@ -1720,11 +1721,11 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
query_len = sizeof(struct iwl_scan_offload_profiles_query);
matches_len = sizeof(struct iwl_scan_offload_profile_match) *
- IWL_SCAN_MAX_PROFILES;
+ max_profiles;
} else {
query_len = sizeof(struct iwl_scan_offload_profiles_query_v1);
matches_len = sizeof(struct iwl_scan_offload_profile_match_v1) *
- IWL_SCAN_MAX_PROFILES;
+ max_profiles;
}
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 190cf15b825c..3beef8d077b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -461,6 +461,8 @@ static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf,
desc += rs_pretty_print_rate(buff + desc, bufsz - desc,
lq_sta->last_rate_n_flags);
+ if (desc < bufsz - 1)
+ buff[desc++] = '\n';
mutex_unlock(&mvm->mutex);
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
@@ -1013,6 +1015,8 @@ static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
(int)(ARRAY_SIZE(stats->last_rates) - i));
pos += rs_pretty_print_rate(pos, endpos - pos,
stats->last_rates[idx]);
+ if (pos < endpos - 1)
+ *pos++ = '\n';
}
spin_unlock_bh(&mvm->drv_stats_lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 6e1ea921c299..9e21f5e5d364 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -278,6 +278,10 @@ iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
return -EINVAL;
}
+ /* non EDCA based measurement must use HE preamble */
+ if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
+ *format_bw |= IWL_LOCATION_FRAME_FORMAT_HE;
+
*ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
@@ -349,6 +353,11 @@ iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
FTM_PUT_FLAG(ALGO_LR);
else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
FTM_PUT_FLAG(ALGO_FFT);
+
+ if (peer->ftm.trigger_based)
+ FTM_PUT_FLAG(TB);
+ else if (peer->ftm.non_trigger_based)
+ FTM_PUT_FLAG(NON_TB);
}
static int
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index e3eb812e0248..05a06f88db6c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -73,6 +73,7 @@
#include "fw/api/datapath.h"
#include "fw/api/phy.h"
#include "fw/api/config.h"
+#include "fw/api/soc.h"
#include "fw/api/alive.h"
#include "fw/api/binding.h"
#include "fw/api/cmdhdr.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 98263cd37944..a4038f289ab3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -89,6 +87,36 @@ struct iwl_mvm_alive_data {
u32 scd_base_addr;
};
+/* set device type and latency */
+static int iwl_set_soc_latency(struct iwl_mvm *mvm)
+{
+ struct iwl_soc_configuration_cmd cmd = {};
+ int ret;
+
+ /*
+ * In VER_1 of this command, the discrete value is considered
+ * an integer; In VER_2, it's a bitmask. Since we have only 2
+ * values in VER_1, this is backwards-compatible with VER_2,
+ * as long as we don't set any other bits.
+ */
+ if (!mvm->trans->trans_cfg->integrated)
+ cmd.flags = cpu_to_le32(SOC_CONFIG_CMD_FLAGS_DISCRETE);
+
+ if (iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_REQ_UMAC) >= 2 &&
+ (mvm->trans->trans_cfg->low_latency_xtal))
+ cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
+
+ cmd.latency = cpu_to_le32(mvm->trans->trans_cfg->xtal_latency);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SOC_CONFIGURATION_CMD,
+ SYSTEM_GROUP, 0), 0,
+ sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to set soc latency: %d\n", ret);
+ return ret;
+}
+
static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
{
struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
@@ -544,7 +572,8 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
/* set flags extra PHY configuration flags from the device's cfg */
- phy_cfg_cmd.phy_cfg |= cpu_to_le32(mvm->cfg->extra_phy_cfg_flags);
+ phy_cfg_cmd.phy_cfg |=
+ cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags);
phy_cfg_cmd.calib_control.event_trigger =
mvm->fw->default_calib[ucode_type].event_trigger;
@@ -1110,6 +1139,13 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
goto error;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
+ ret = iwl_set_soc_latency(mvm);
+ if (ret)
+ goto error;
+ }
+
/* Init RSS configuration */
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
ret = iwl_configure_rxq(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 02df603b6400..7aa1350b093e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -193,6 +193,8 @@ static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
.non_asap = 1,
.request_lci = 1,
.request_civicloc = 1,
+ .trigger_based = 1,
+ .non_trigger_based = 1,
.max_bursts_exponent = -1, /* all supported */
.max_ftms_per_burst = 0, /* no limits */
.bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
@@ -201,7 +203,8 @@ static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
BIT(NL80211_CHAN_WIDTH_80),
.preambles = BIT(NL80211_PREAMBLE_LEGACY) |
BIT(NL80211_PREAMBLE_HT) |
- BIT(NL80211_PREAMBLE_VHT),
+ BIT(NL80211_PREAMBLE_VHT) |
+ BIT(NL80211_PREAMBLE_HE),
},
};
@@ -614,7 +617,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->max_sched_scan_reqs = 1;
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
- hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+ hw->wiphy->max_match_sets = iwl_umac_scan_get_max_profiles(mvm->fw);
/* we create the 802.11 header and zero length SSID IE. */
hw->wiphy->max_sched_scan_ie_len =
SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
@@ -702,7 +705,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
- mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
+ mvm->wowlan.max_nd_match_sets =
+ iwl_umac_scan_get_max_profiles(mvm->fw);
hw->wiphy->wowlan = &mvm->wowlan;
}
#endif
@@ -2019,7 +2023,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
.sta_id = sta_id,
.tid_limit = IWL_MAX_TID_COUNT,
- .bss_color = vif->bss_conf.bss_color,
+ .bss_color = vif->bss_conf.he_bss_color.color,
.htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext,
.frame_time_rts_th =
cpu_to_le16(vif->bss_conf.frame_time_rts_th),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index d6ecc2d2bf21..afcf2b98a9cb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -2147,4 +2147,11 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
iwl_mvm_get_ctrl_pos(chandef));
}
+static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw)
+{
+ u8 ver = iwl_mvm_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
+ SCAN_OFFLOAD_UPDATE_PROFILES_CMD);
+ return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ?
+ IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2;
+}
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index ca99a9c4f70e..15d11fb72aca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -416,8 +416,7 @@ u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta)
return IEEE80211_MAX_MPDU_LEN_VHT_7991;
default:
return IEEE80211_MAX_MPDU_LEN_VHT_3895;
- }
-
+ }
} else if (ht_cap->ht_supported) {
if (ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU)
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 1a990ed9c3ca..c1aba2bf73cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -3697,7 +3697,7 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
!(rate & RATE_MCS_HE_MSK)) {
int index = iwl_hwrate_to_plcp_idx(rate);
- return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n",
+ return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps",
rs_pretty_ant(ant),
index == IWL_RATE_INVALID ? "BAD" :
iwl_rate_mcs[index].mbps);
@@ -3740,7 +3740,7 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
}
return scnprintf(buf, bufsz,
- "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
+ "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s",
rate, type, rs_pretty_ant(ant), bw, mcs, nss,
(rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
(rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
@@ -3888,6 +3888,8 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
desc += scnprintf(buff + desc, bufsz - desc,
" rate[%d] 0x%X ", i, r);
desc += rs_pretty_print_rate(buff + desc, bufsz - desc, r);
+ if (desc < bufsz - 1)
+ buff[desc++] = '\n';
}
ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 3b263c81bcae..7a6ad1ff7055 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -91,8 +91,14 @@
#define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
/* number of scan channels */
#define IWL_SCAN_NUM_CHANNELS 112
-/* adaptive dwell default number of APs override */
-#define IWL_SCAN_ADWELL_DEFAULT_N_APS_OVERRIDE 10
+/* adaptive dwell number of APs override mask for p2p friendly GO */
+#define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT BIT(20)
+/* adaptive dwell number of APs override mask for social channels */
+#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT BIT(21)
+/* adaptive dwell number of APs override for p2p friendly GO channels */
+#define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
+/* adaptive dwell number of APs override for social channels */
+#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
struct iwl_mvm_scan_timing_params {
u32 suspend_time;
@@ -588,11 +594,15 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req)
{
struct iwl_scan_offload_profile *profile;
- struct iwl_scan_offload_profile_cfg *profile_cfg;
+ struct iwl_scan_offload_profile_cfg_v1 *profile_cfg_v1;
struct iwl_scan_offload_blacklist *blacklist;
+ struct iwl_scan_offload_profile_cfg_data *data;
+ int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw);
+ int profile_cfg_size = sizeof(*data) +
+ sizeof(*profile) * max_profiles;
struct iwl_host_cmd cmd = {
.id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
- .len[1] = sizeof(*profile_cfg),
+ .len[1] = profile_cfg_size,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
.dataflags[1] = IWL_HCMD_DFL_NOCOPY,
};
@@ -600,7 +610,7 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
int i;
int ret;
- if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
+ if (WARN_ON(req->n_match_sets > max_profiles))
return -EIO;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
@@ -612,27 +622,37 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
if (!blacklist)
return -ENOMEM;
- profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
- if (!profile_cfg) {
+ profile_cfg_v1 = kzalloc(profile_cfg_size, GFP_KERNEL);
+ if (!profile_cfg_v1) {
ret = -ENOMEM;
goto free_blacklist;
}
cmd.data[0] = blacklist;
cmd.len[0] = sizeof(*blacklist) * blacklist_len;
- cmd.data[1] = profile_cfg;
+ cmd.data[1] = profile_cfg_v1;
+
+ /* if max_profile is MAX_PROFILES_V2, we have the new API */
+ if (max_profiles == IWL_SCAN_MAX_PROFILES_V2) {
+ struct iwl_scan_offload_profile_cfg *profile_cfg =
+ (struct iwl_scan_offload_profile_cfg *)profile_cfg_v1;
+
+ data = &profile_cfg->data;
+ } else {
+ data = &profile_cfg_v1->data;
+ }
/* No blacklist configuration */
+ data->num_profiles = req->n_match_sets;
+ data->active_clients = SCAN_CLIENT_SCHED_SCAN;
+ data->pass_match = SCAN_CLIENT_SCHED_SCAN;
+ data->match_notify = SCAN_CLIENT_SCHED_SCAN;
- profile_cfg->num_profiles = req->n_match_sets;
- profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
- profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
- profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
- profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
+ data->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
for (i = 0; i < req->n_match_sets; i++) {
- profile = &profile_cfg->profiles[i];
+ profile = &profile_cfg_v1->profiles[i];
profile->ssid_index = i;
/* Support any cipher and auth algorithm */
profile->unicast_cipher = 0xff;
@@ -645,7 +665,7 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
ret = iwl_mvm_send_cmd(mvm, &cmd);
- kfree(profile_cfg);
+ kfree(profile_cfg_v1);
free_blacklist:
kfree(blacklist);
@@ -1529,14 +1549,19 @@ static int iwl_mvm_scan_ch_and_band_to_idx(u8 channel_id, u8 band)
return -EINVAL;
}
+static const u8 p2p_go_friendly_chs[] = {
+ 36, 40, 44, 48, 149, 153, 157, 161, 165,
+};
+
+static const u8 social_chs[] = {
+ 1, 6, 11
+};
+
static void iwl_mvm_scan_ch_add_n_aps_override(enum nl80211_iftype vif_type,
u8 ch_id, u8 band, u8 *ch_bitmap,
size_t bitmap_n_entries)
{
int i;
- static const u8 p2p_go_friendly_chs[] = {
- 36, 40, 44, 48, 149, 153, 157, 161, 165,
- };
if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
return;
@@ -1561,6 +1586,35 @@ static void iwl_mvm_scan_ch_add_n_aps_override(enum nl80211_iftype vif_type,
}
}
+static u32 iwl_mvm_scan_ch_n_aps_flag(enum nl80211_iftype vif_type, u8 ch_id)
+{
+ int i;
+ u32 flags = 0;
+
+ if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
+ if (p2p_go_friendly_chs[i] == ch_id) {
+ flags |= IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT;
+ break;
+ }
+ }
+
+ if (flags)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(social_chs); i++) {
+ if (social_chs[i] == ch_id) {
+ flags |= IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT;
+ break;
+ }
+ }
+
+out:
+ return flags;
+}
+
static void
iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
struct ieee80211_channel **channels,
@@ -1615,6 +1669,30 @@ iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm,
}
}
+static void
+iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm,
+ struct ieee80211_channel **channels,
+ struct iwl_scan_channel_params_v6 *cp,
+ int n_channels, u32 flags,
+ enum nl80211_iftype vif_type)
+{
+ int i;
+
+ for (i = 0; i < n_channels; i++) {
+ enum nl80211_band band = channels[i]->band;
+ struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i];
+ u32 n_aps_flag =
+ iwl_mvm_scan_ch_n_aps_flag(vif_type,
+ cfg->v2.channel_num);
+
+ cfg->flags = cpu_to_le32(flags | n_aps_flag);
+ cfg->v2.channel_num = channels[i]->hw_value;
+ cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band);
+ cfg->v2.iter_count = 1;
+ cfg->v2.iter_interval = 0;
+ }
+}
+
static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
@@ -1915,7 +1993,7 @@ iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
{
cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
cp->count = params->n_channels;
- cp->num_of_aps_override = IWL_SCAN_ADWELL_DEFAULT_N_APS_OVERRIDE;
+ cp->num_of_aps_override = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
iwl_mvm_umac_scan_cfg_channels_v4(mvm, params->channels, cp,
params->n_channels,
@@ -1923,6 +2001,23 @@ iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
vif->type);
}
+static void
+iwl_mvm_scan_umac_fill_ch_p_v6(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_channel_params_v6 *cp,
+ u32 channel_cfg_flags)
+{
+ cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
+ cp->count = params->n_channels;
+ cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
+ cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
+
+ iwl_mvm_umac_scan_cfg_channels_v6(mvm, params->channels, cp,
+ params->n_channels,
+ channel_cfg_flags,
+ vif->type);
+}
static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_scan_params *params, int type,
@@ -1990,6 +2085,40 @@ static int iwl_mvm_scan_umac_v13(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return 0;
}
+static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params, int type,
+ int uid)
+{
+ struct iwl_scan_req_umac_v14 *cmd = mvm->scan_cmd;
+ struct iwl_scan_req_params_v14 *scan_p = &cmd->scan_params;
+ int ret;
+ u16 gen_flags;
+ u32 bitmap_ssid = 0;
+
+ mvm->scan_uid_status[uid] = type;
+
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
+ cmd->uid = cpu_to_le32(uid);
+
+ gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
+ iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
+ &scan_p->general_params,
+ gen_flags);
+
+ ret = iwl_mvm_fill_scan_sched_params(params,
+ scan_p->periodic_params.schedule,
+ &scan_p->periodic_params.delay);
+ if (ret)
+ return ret;
+
+ iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params,
+ &bitmap_ssid);
+ iwl_mvm_scan_umac_fill_ch_p_v6(mvm, params, vif,
+ &scan_p->channel_params, bitmap_ssid);
+
+ return 0;
+}
+
static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
{
return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
@@ -2105,6 +2234,7 @@ struct iwl_scan_umac_handler {
static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
/* set the newest version first to shorten the list traverse time */
+ IWL_SCAN_UMAC_HANDLER(14),
IWL_SCAN_UMAC_HANDLER(13),
IWL_SCAN_UMAC_HANDLER(12),
};
@@ -2463,6 +2593,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
static int iwl_scan_req_umac_get_size(u8 scan_ver)
{
switch (scan_ver) {
+ IWL_SCAN_REQ_UMAC_HANDLE_SIZE(14);
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(13);
IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index 9da0dae78510..368b9d117f73 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -162,7 +162,9 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
* capabilities of the AP station, and choose the watermark accordingly.
*/
if (sta) {
- if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) {
+ if (sta->ht_cap.ht_supported ||
+ sta->vht_cap.vht_supported ||
+ sta->he_cap.has_he) {
switch (sta->rx_nss) {
case 1:
watermark = SF_W_MARK_SISO;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index f441b20e1642..6744c0281ffb 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -513,442 +513,26 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */
- {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-
- {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
-
- {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x1550, iwl9260_killer_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_trans_cfg)},
+ {IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9560_trans_cfg)},
+ {IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9560_trans_cfg)},
+ {IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_trans_cfg)},
+ {IWL_PCI_DEVICE(0x31DC, PCI_ANY_ID, iwl9560_shared_clk_trans_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, PCI_ANY_ID, iwl9560_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA370, PCI_ANY_ID, iwl9560_trans_cfg)},
- {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
-
- {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-
- {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
-
- {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_160_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_160_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_160_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_160_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_160_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)},
- {IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)},
-
- {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-
- {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-
- {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-
- {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
-
- {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
-
- {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x0030, iwl9560_2ac_cfg_qnj_jf_b0)},
-
-/* 22000 Series */
- {IWL_PCI_DEVICE(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0244, iwl_ax101_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x02F0, 0x4244, iwl_ax101_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0244, iwl_ax101_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x06F0, 0x4244, iwl_ax101_cfg_quz_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0000, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0040, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0044, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0070, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0310, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x1080, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x2720, 0x2074, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x4070, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x4244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x4244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x43F0, 0x4244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4244, iwl_ax101_cfg_qu_hr)},
-
- {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)},
- {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)},
- {IWL_PCI_DEVICE(0x2723, 0x0088, iwl_ax200_cfg_cc)},
- {IWL_PCI_DEVICE(0x2723, 0x008C, iwl_ax200_cfg_cc)},
- {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x2080, iwl_ax200_cfg_cc)},
- {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
- {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
+/* Qu devices */
+ {IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
+ {IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
+ {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_trans_cfg)},
+ {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_trans_cfg)},
+
+ {IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)},
+
+ {IWL_PCI_DEVICE(0x2720, PCI_ANY_ID, iwl_qnj_trans_cfg)},
+
+ {IWL_PCI_DEVICE(0x2723, PCI_ANY_ID, iwl_ax200_trans_cfg)},
{IWL_PCI_DEVICE(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0)},
@@ -971,31 +555,398 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
-#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
- { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
- .name = _name }
+#define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
+ _rf_id, _no_160, _cores, _cfg, _name) \
+ { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
+ .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, \
+ .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
+ .mac_step = _mac_step }
+
+#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
+ _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
+ _cfg, _name)
static const struct iwl_dev_info iwl_dev_info_table[] = {
#if IS_ENABLED(CONFIG_IWLMVM)
- IWL_DEV_INFO(0x2526, 0x0010, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x0014, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x0018, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x001C, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x4010, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x4018, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x401C, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x6010, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x6014, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x8014, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0x8010, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0xA014, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0xE010, iwl9260_2ac_160_cfg, iwl9260_160_name),
- IWL_DEV_INFO(0x2526, 0xE014, iwl9260_2ac_160_cfg, iwl9260_160_name),
-
- IWL_DEV_INFO(0x2526, 0x0030, iwl9560_2ac_160_cfg, iwl9560_160_name),
- IWL_DEV_INFO(0x2526, 0x0038, iwl9560_2ac_160_cfg, iwl9560_160_name),
- IWL_DEV_INFO(0x2526, 0x003C, iwl9560_2ac_160_cfg, iwl9560_160_name),
- IWL_DEV_INFO(0x2526, 0x4030, iwl9560_2ac_160_cfg, iwl9560_160_name),
+/* 9000 */
+ IWL_DEV_INFO(0x2526, 0x1550, iwl9260_2ac_cfg, iwl9260_killer_1550_name),
+ IWL_DEV_INFO(0x2526, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
+ IWL_DEV_INFO(0x2526, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
+ IWL_DEV_INFO(0x30DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
+ IWL_DEV_INFO(0x30DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
+ IWL_DEV_INFO(0x31DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
+ IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
+
+ IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
+
+/* AX200 */
+ IWL_DEV_INFO(0x2723, 0x1653, iwl_ax200_cfg_cc, iwl_ax200_killer_1650w_name),
+ IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name),
+ IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name),
+
+/* Qu with Hr */
+ IWL_DEV_INFO(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x43F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0xA0F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x0244, iwl_ax101_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x4244, iwl_ax101_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x0244, iwl_ax101_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x06F0, 0x4244, iwl_ax101_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x34F0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
+
+ IWL_DEV_INFO(0x3DF0, 0x0044, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x0244, iwl_ax101_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+ IWL_DEV_INFO(0x3DF0, 0x4244, iwl_ax101_cfg_qu_hr, NULL),
+
+ IWL_DEV_INFO(0x2720, 0x0000, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x0040, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x0044, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x0070, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x0074, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x0078, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x007C, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x0244, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x0310, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x0A10, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x1080, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x1651, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x1652, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x2074, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x4070, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+ IWL_DEV_INFO(0x2720, 0x4244, iwl22000_2ax_cfg_qnj_hr_b0, NULL),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_2ac_cfg_soc, iwl9461_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_2ac_cfg_soc, iwl9461_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_2ac_cfg_soc, iwl9462_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_2ac_cfg_soc, iwl9462_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_2ac_cfg_soc, iwl9560_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_2ac_cfg_soc, iwl9560_name),
+
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9461_160_name),
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9461_name),
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9462_160_name),
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9462_name),
+
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9560_160_name),
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9560_name),
+
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_CORES_BT_GNSS,
+ iwl9260_2ac_cfg, iwl9270_160_name),
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS,
+ iwl9260_2ac_cfg, iwl9270_name),
+
+ _IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9162_160_name),
+ _IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9162_name),
+
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9260_160_name),
+ _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9260_2ac_cfg, iwl9260_name),
+
+ /* Qu with Jf */
+ /* Qu B step */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_b0_jf_b0_cfg, iwl9461_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_b0_jf_b0_cfg, iwl9462_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_b0_jf_b0_cfg, iwl9560_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
+
+ /* Qu C step */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_c0_jf_b0_cfg, iwl9461_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_c0_jf_b0_cfg, iwl9462_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_c0_jf_b0_cfg, iwl9560_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
+ IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name),
+
+ /* QuZ */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_quz_a0_jf_b0_cfg, iwl9461_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_quz_a0_jf_b0_cfg, iwl9462_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_quz_a0_jf_b0_cfg, iwl9560_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
+ IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name),
+
+ /* QnJ */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qnj_b0_jf_b0_cfg, iwl9461_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qnj_b0_jf_b0_cfg, iwl9461_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qnj_b0_jf_b0_cfg, iwl9462_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qnj_b0_jf_b0_cfg, iwl9462_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qnj_b0_jf_b0_cfg, iwl9560_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qnj_b0_jf_b0_cfg, iwl9560_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT,
+ iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550s_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
+ IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+ iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
#endif /* CONFIG_IWLMVM */
};
@@ -1031,16 +982,34 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* the trans_cfg should never change, so set it now */
iwl_trans->trans_cfg = trans;
+ iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
+
for (i = 0; i < ARRAY_SIZE(iwl_dev_info_table); i++) {
const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
-
- if ((dev_info->device == IWL_CFG_ANY ||
+ if ((dev_info->device == (u16)IWL_CFG_ANY ||
dev_info->device == pdev->device) &&
- (dev_info->subdevice == IWL_CFG_ANY ||
- dev_info->subdevice == pdev->subsystem_device)) {
+ (dev_info->subdevice == (u16)IWL_CFG_ANY ||
+ dev_info->subdevice == pdev->subsystem_device) &&
+ (dev_info->mac_type == (u16)IWL_CFG_ANY ||
+ dev_info->mac_type ==
+ CSR_HW_REV_TYPE(iwl_trans->hw_rev)) &&
+ (dev_info->mac_step == (u8)IWL_CFG_ANY ||
+ dev_info->mac_step ==
+ CSR_HW_REV_STEP(iwl_trans->hw_rev)) &&
+ (dev_info->rf_type == (u16)IWL_CFG_ANY ||
+ dev_info->rf_type ==
+ CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id)) &&
+ (dev_info->rf_id == (u8)IWL_CFG_ANY ||
+ dev_info->rf_id ==
+ IWL_SUBDEVICE_RF_ID(pdev->subsystem_device)) &&
+ (dev_info->no_160 == (u8)IWL_CFG_ANY ||
+ dev_info->no_160 ==
+ IWL_SUBDEVICE_NO_160(pdev->subsystem_device)) &&
+ (dev_info->cores == (u8)IWL_CFG_ANY ||
+ dev_info->cores ==
+ IWL_SUBDEVICE_CORES(pdev->subsystem_device))) {
iwl_trans->cfg = dev_info->cfg;
iwl_trans->name = dev_info->name;
- goto found;
}
}
@@ -1062,8 +1031,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
iwl_trans->cfg = cfg_7265d;
- iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
-
if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) {
iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
@@ -1092,9 +1059,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
iwl_trans->cfg = &iwl_ax101_cfg_qu_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
- iwl_trans->cfg = &iwl22000_2ax_cfg_jf;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
IWL_ERR(iwl_trans, "RF ID HRCDB is not supported\n");
return -EINVAL;
@@ -1103,59 +1067,31 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id));
return -EINVAL;
}
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
- u32 hw_status;
-
- hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS);
- if (CSR_HW_RF_STEP(iwl_trans->hw_rf_id) == SILICON_B_STEP)
- iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
- else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
- CSR_HW_RF_STEP(iwl_trans->hw_rf_id) ==
- SILICON_A_STEP)
- iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
}
/*
* This is a hack to switch from Qu B0 to Qu C0. We need to
- * do this for all cfgs that use Qu B0. All this code is in
- * urgent need for a refactor, but for now this is the easiest
- * thing to do to support Qu C-step.
+ * do this for all cfgs that use Qu B0, except for those using
+ * Jf, which have already been moved to the new table. The
+ * rest must be removed once we convert Qu with Hr as well.
*/
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) {
- if (cfg == &iwl_ax101_cfg_qu_hr)
+ if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
- else if (cfg == &iwl_ax201_cfg_qu_hr)
+ else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
- else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
- iwl_trans->cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0;
- else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
- iwl_trans->cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0;
- else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
- iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
- else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
- iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
- else if (cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
+ else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
- else if (cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
+ else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0;
}
/* same thing for QuZ... */
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
- if (cfg == &iwl_ax101_cfg_qu_hr)
- cfg = &iwl_ax101_cfg_quz_hr;
- else if (cfg == &iwl_ax201_cfg_qu_hr)
- cfg = &iwl_ax201_cfg_quz_hr;
- else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
- else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
- else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
- cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
- else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
- cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
+ if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
+ iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
+ else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
+ iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
}
#endif
@@ -1166,7 +1102,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!iwl_trans->cfg)
iwl_trans->cfg = cfg;
-found:
/* if we don't have a name yet, copy name from the old cfg */
if (!iwl_trans->name)
iwl_trans->name = iwl_trans->cfg->name;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 72f144c3a46e..595e6873d56e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -718,7 +718,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
-void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_txq *txq);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 427fcea5cb2d..8c29071cb415 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1043,7 +1043,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
RFH_GEN_CFG_SERVICE_DMA_SNOOP |
RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
- trans->cfg->integrated ?
+ trans->trans_cfg->integrated ?
RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
RFH_GEN_CFG_RB_CHUNK_SIZE_128));
/* Enable the relevant rx queues */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 38d8fe21690a..e4cbd8daa7c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1817,7 +1817,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
iwl_trans_pcie_sw_reset(trans);
if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
- trans->cfg->integrated) {
+ trans->trans_cfg->integrated) {
err = iwl_pcie_gen2_force_power_gating(trans);
if (err)
return err;
@@ -2206,6 +2206,13 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
return ret;
}
+static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
+ u32 *val)
+{
+ return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
+ ofs, val);
+}
+
static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
unsigned long txqs,
bool freeze)
@@ -3380,6 +3387,7 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
.write_prph = iwl_trans_pcie_write_prph, \
.read_mem = iwl_trans_pcie_read_mem, \
.write_mem = iwl_trans_pcie_write_mem, \
+ .read_config32 = iwl_trans_pcie_read_config32, \
.configure = iwl_trans_pcie_configure, \
.set_pmi = iwl_trans_pcie_set_pmi, \
.sw_reset = iwl_trans_pcie_sw_reset, \
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 2f69a6469fe7..4582d418ba4d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1287,7 +1287,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
* need to be reclaimed. As result, some free space forms. If there is
* enough free space (> low mark), wake the stack that feeds us.
*/
-void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
+static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id];
diff --git a/drivers/net/wireless/intersil/hostap/hostap_common.h b/drivers/net/wireless/intersil/hostap/hostap_common.h
index 22543538239b..dd29a8e8d349 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_common.h
+++ b/drivers/net/wireless/intersil/hostap/hostap_common.h
@@ -322,7 +322,7 @@ struct prism2_download_param {
u32 addr; /* wlan card address */
u32 len;
void __user *ptr; /* pointer to data in user space */
- } data[0];
+ } data[];
};
#define PRISM2_MAX_DOWNLOAD_AREA_LEN 131072
diff --git a/drivers/net/wireless/intersil/hostap/hostap_download.c b/drivers/net/wireless/intersil/hostap/hostap_download.c
index 8722000b6c27..7c6a5a6d1d45 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_download.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_download.c
@@ -232,11 +232,11 @@ static int prism2_download_aux_dump_proc_open(struct inode *inode, struct file *
return ret;
}
-static const struct file_operations prism2_download_aux_dump_proc_fops = {
- .open = prism2_download_aux_dump_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_private,
+static const struct proc_ops prism2_download_aux_dump_proc_ops = {
+ .proc_open = prism2_download_aux_dump_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release_private,
};
diff --git a/drivers/net/wireless/intersil/hostap/hostap_wlan.h b/drivers/net/wireless/intersil/hostap/hostap_wlan.h
index 487883fbb58c..dd2603d9b5d3 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/intersil/hostap/hostap_wlan.h
@@ -615,7 +615,7 @@ struct prism2_download_data {
u32 addr; /* wlan card address */
u32 len;
u8 *data; /* allocated data */
- } data[0];
+ } data[];
};
diff --git a/drivers/net/wireless/intersil/orinoco/fw.c b/drivers/net/wireless/intersil/orinoco/fw.c
index 400a35217644..015af782881b 100644
--- a/drivers/net/wireless/intersil/orinoco/fw.c
+++ b/drivers/net/wireless/intersil/orinoco/fw.c
@@ -49,7 +49,7 @@ struct orinoco_fw_header {
__le32 pdr_offset; /* Offset to PDR data from eof header */
__le32 pri_offset; /* Offset to primary plug data */
__le32 compat_offset; /* Offset to compatibility data*/
- char signature[0]; /* FW signature length headersize-20 */
+ char signature[]; /* FW signature length headersize-20 */
} __packed;
/* Check the range of various header entries. Return a pointer to a
diff --git a/drivers/net/wireless/intersil/orinoco/hermes.h b/drivers/net/wireless/intersil/orinoco/hermes.h
index 121fdd8e5da2..9f668185b7d2 100644
--- a/drivers/net/wireless/intersil/orinoco/hermes.h
+++ b/drivers/net/wireless/intersil/orinoco/hermes.h
@@ -341,7 +341,7 @@ struct agere_ext_scan_info {
__le64 timestamp;
__le16 beacon_interval;
__le16 capabilities;
- u8 data[0];
+ u8 data[];
} __packed;
#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000)
diff --git a/drivers/net/wireless/intersil/orinoco/hermes_dld.c b/drivers/net/wireless/intersil/orinoco/hermes_dld.c
index 4a10b7aca043..dbeadfcfefe2 100644
--- a/drivers/net/wireless/intersil/orinoco/hermes_dld.c
+++ b/drivers/net/wireless/intersil/orinoco/hermes_dld.c
@@ -64,7 +64,7 @@
struct dblock {
__le32 addr; /* adapter address where to write the block */
__le16 len; /* length of the data only, in bytes */
- char data[0]; /* data to be written */
+ char data[]; /* data to be written */
} __packed;
/*
@@ -76,7 +76,7 @@ struct pdr {
__le32 id; /* record ID */
__le32 addr; /* adapter address where to write the data */
__le32 len; /* expected length of the data, in bytes */
- char next[0]; /* next PDR starts here */
+ char next[]; /* next PDR starts here */
} __packed;
/*
@@ -87,7 +87,7 @@ struct pdr {
struct pdi {
__le16 len; /* length of ID and data, in words */
__le16 id; /* record ID */
- char data[0]; /* plug data */
+ char data[]; /* plug data */
} __packed;
/*** FW data block access functions ***/
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 0e42de291803..651c676b5506 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -202,7 +202,7 @@ struct ezusb_packet {
__le16 crc; /* CRC up to here */
__le16 hermes_len;
__le16 hermes_rid;
- u8 data[0];
+ u8 data[];
} __packed;
/* Table of devices that work or may work with this driver */
diff --git a/drivers/net/wireless/intersil/p54/eeprom.h b/drivers/net/wireless/intersil/p54/eeprom.h
index b8f46883a292..1d0aaf54389a 100644
--- a/drivers/net/wireless/intersil/p54/eeprom.h
+++ b/drivers/net/wireless/intersil/p54/eeprom.h
@@ -24,7 +24,7 @@
struct pda_entry {
__le16 len; /* includes both code and data */
__le16 code;
- u8 data[0];
+ u8 data[];
} __packed;
struct eeprom_pda_wrap {
@@ -32,7 +32,7 @@ struct eeprom_pda_wrap {
__le16 pad;
__le16 len;
__le32 arm_opcode;
- u8 data[0];
+ u8 data[];
} __packed;
struct p54_iq_autocal_entry {
@@ -87,7 +87,7 @@ struct pda_pa_curve_data {
u8 channels;
u8 points_per_channel;
u8 padding;
- u8 data[0];
+ u8 data[];
} __packed;
struct pda_rssi_cal_ext_entry {
@@ -119,7 +119,7 @@ struct pda_custom_wrapper {
__le16 entry_size;
__le16 offset;
__le16 len;
- u8 data[0];
+ u8 data[];
} __packed;
/*
diff --git a/drivers/net/wireless/intersil/p54/lmac.h b/drivers/net/wireless/intersil/p54/lmac.h
index e00761536cfc..8adde6ba35ab 100644
--- a/drivers/net/wireless/intersil/p54/lmac.h
+++ b/drivers/net/wireless/intersil/p54/lmac.h
@@ -81,7 +81,7 @@ struct p54_hdr {
__le16 type; /* enum p54_control_frame_types */
u8 rts_tries;
u8 tries;
- u8 data[0];
+ u8 data[];
} __packed;
#define GET_REQ_ID(skb) \
@@ -176,7 +176,7 @@ struct p54_rx_data {
u8 rssi_raw;
__le32 tsf32;
__le32 unalloc0;
- u8 align[0];
+ u8 align[];
} __packed;
enum p54_trap_type {
@@ -267,7 +267,7 @@ struct p54_tx_data {
} __packed normal;
} __packed;
u8 unalloc2[2];
- u8 align[0];
+ u8 align[];
} __packed;
/* unit is ms */
diff --git a/drivers/net/wireless/intersil/p54/p54.h b/drivers/net/wireless/intersil/p54/p54.h
index 0a9c1a19380f..3356ea708d81 100644
--- a/drivers/net/wireless/intersil/p54/p54.h
+++ b/drivers/net/wireless/intersil/p54/p54.h
@@ -126,7 +126,7 @@ struct p54_cal_database {
size_t entry_size;
size_t offset;
size_t len;
- u8 data[0];
+ u8 data[];
};
#define EEPROM_READBACK_LEN 0x3fc
diff --git a/drivers/net/wireless/intersil/prism54/oid_mgt.c b/drivers/net/wireless/intersil/prism54/oid_mgt.c
index 5705ad925a51..9fd307ca4b6d 100644
--- a/drivers/net/wireless/intersil/prism54/oid_mgt.c
+++ b/drivers/net/wireless/intersil/prism54/oid_mgt.c
@@ -780,17 +780,17 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
{
switch (isl_oid[n].flags & OID_FLAG_TYPE) {
case OID_TYPE_U32:
- return snprintf(str, PRIV_STR_SIZE, "%u\n", r->u);
+ return scnprintf(str, PRIV_STR_SIZE, "%u\n", r->u);
case OID_TYPE_BUFFER:{
struct obj_buffer *buff = r->ptr;
- return snprintf(str, PRIV_STR_SIZE,
+ return scnprintf(str, PRIV_STR_SIZE,
"size=%u\naddr=0x%X\n", buff->size,
buff->addr);
}
break;
case OID_TYPE_BSS:{
struct obj_bss *bss = r->ptr;
- return snprintf(str, PRIV_STR_SIZE,
+ return scnprintf(str, PRIV_STR_SIZE,
"age=%u\nchannel=%u\n"
"capinfo=0x%X\nrates=0x%X\n"
"basic_rates=0x%X\n", bss->age,
@@ -801,9 +801,9 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
case OID_TYPE_BSSLIST:{
struct obj_bsslist *list = r->ptr;
int i, k;
- k = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", list->nr);
+ k = scnprintf(str, PRIV_STR_SIZE, "nr=%u\n", list->nr);
for (i = 0; i < list->nr; i++)
- k += snprintf(str + k, PRIV_STR_SIZE - k,
+ k += scnprintf(str + k, PRIV_STR_SIZE - k,
"bss[%u] :\nage=%u\nchannel=%u\n"
"capinfo=0x%X\nrates=0x%X\n"
"basic_rates=0x%X\n",
@@ -819,23 +819,23 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
struct obj_frequencies *freq = r->ptr;
int i, t;
printk("nr : %u\n", freq->nr);
- t = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", freq->nr);
+ t = scnprintf(str, PRIV_STR_SIZE, "nr=%u\n", freq->nr);
for (i = 0; i < freq->nr; i++)
- t += snprintf(str + t, PRIV_STR_SIZE - t,
+ t += scnprintf(str + t, PRIV_STR_SIZE - t,
"mhz[%u]=%u\n", i, freq->mhz[i]);
return t;
}
break;
case OID_TYPE_MLME:{
struct obj_mlme *mlme = r->ptr;
- return snprintf(str, PRIV_STR_SIZE,
+ return scnprintf(str, PRIV_STR_SIZE,
"id=0x%X\nstate=0x%X\ncode=0x%X\n",
mlme->id, mlme->state, mlme->code);
}
break;
case OID_TYPE_MLMEEX:{
struct obj_mlmeex *mlme = r->ptr;
- return snprintf(str, PRIV_STR_SIZE,
+ return scnprintf(str, PRIV_STR_SIZE,
"id=0x%X\nstate=0x%X\n"
"code=0x%X\nsize=0x%X\n", mlme->id,
mlme->state, mlme->code, mlme->size);
@@ -843,7 +843,7 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
break;
case OID_TYPE_ATTACH:{
struct obj_attachment *attach = r->ptr;
- return snprintf(str, PRIV_STR_SIZE,
+ return scnprintf(str, PRIV_STR_SIZE,
"id=%d\nsize=%d\n",
attach->id,
attach->size);
@@ -851,7 +851,7 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
break;
case OID_TYPE_SSID:{
struct obj_ssid *ssid = r->ptr;
- return snprintf(str, PRIV_STR_SIZE,
+ return scnprintf(str, PRIV_STR_SIZE,
"length=%u\noctets=%.*s\n",
ssid->length, ssid->length,
ssid->octets);
@@ -860,13 +860,13 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
case OID_TYPE_KEY:{
struct obj_key *key = r->ptr;
int t, i;
- t = snprintf(str, PRIV_STR_SIZE,
+ t = scnprintf(str, PRIV_STR_SIZE,
"type=0x%X\nlength=0x%X\nkey=0x",
key->type, key->length);
for (i = 0; i < key->length; i++)
- t += snprintf(str + t, PRIV_STR_SIZE - t,
+ t += scnprintf(str + t, PRIV_STR_SIZE - t,
"%02X:", key->key[i]);
- t += snprintf(str + t, PRIV_STR_SIZE - t, "\n");
+ t += scnprintf(str + t, PRIV_STR_SIZE - t, "\n");
return t;
}
break;
@@ -874,11 +874,11 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
case OID_TYPE_ADDR:{
unsigned char *buff = r->ptr;
int t, i;
- t = snprintf(str, PRIV_STR_SIZE, "hex data=");
+ t = scnprintf(str, PRIV_STR_SIZE, "hex data=");
for (i = 0; i < isl_oid[n].size; i++)
- t += snprintf(str + t, PRIV_STR_SIZE - t,
+ t += scnprintf(str + t, PRIV_STR_SIZE - t,
"%02X:", buff[i]);
- t += snprintf(str + t, PRIV_STR_SIZE - t, "\n");
+ t += scnprintf(str + t, PRIV_STR_SIZE - t, "\n");
return t;
}
break;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 03738107fd10..7fe8207db6ae 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -4,7 +4,7 @@
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
*/
/*
@@ -33,6 +33,9 @@
#include <net/netns/generic.h>
#include <linux/rhashtable.h>
#include <linux/nospec.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
#include "mac80211_hwsim.h"
#define WARN_QUEUE 100
@@ -300,14 +303,12 @@ static struct net_device *hwsim_mon; /* global monitor netdev */
.band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_freq), \
- .max_power = 20, \
}
#define CHAN5G(_freq) { \
.band = NL80211_BAND_5GHZ, \
.center_freq = (_freq), \
.hw_value = (_freq), \
- .max_power = 20, \
}
static const struct ieee80211_channel hwsim_channels_2ghz[] = {
@@ -615,14 +616,14 @@ static const struct genl_multicast_group hwsim_mcgrps[] = {
/* MAC80211_HWSIM netlink policy */
static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
- [HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
- [HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
+ [HWSIM_ATTR_ADDR_RECEIVER] = NLA_POLICY_ETH_ADDR_COMPAT,
+ [HWSIM_ATTR_ADDR_TRANSMITTER] = NLA_POLICY_ETH_ADDR_COMPAT,
[HWSIM_ATTR_FRAME] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
[HWSIM_ATTR_FLAGS] = { .type = NLA_U32 },
[HWSIM_ATTR_RX_RATE] = { .type = NLA_U32 },
[HWSIM_ATTR_SIGNAL] = { .type = NLA_U32 },
- [HWSIM_ATTR_TX_INFO] = { .type = NLA_UNSPEC,
+ [HWSIM_ATTR_TX_INFO] = { .type = NLA_BINARY,
.len = IEEE80211_TX_MAX_RATES *
sizeof(struct hwsim_tx_rate)},
[HWSIM_ATTR_COOKIE] = { .type = NLA_U64 },
@@ -632,15 +633,61 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 },
[HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG },
[HWSIM_ATTR_SUPPORT_P2P_DEVICE] = { .type = NLA_FLAG },
+ [HWSIM_ATTR_USE_CHANCTX] = { .type = NLA_FLAG },
[HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE] = { .type = NLA_FLAG },
[HWSIM_ATTR_RADIO_NAME] = { .type = NLA_STRING },
[HWSIM_ATTR_NO_VIF] = { .type = NLA_FLAG },
[HWSIM_ATTR_FREQ] = { .type = NLA_U32 },
- [HWSIM_ATTR_PERM_ADDR] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
+ [HWSIM_ATTR_TX_INFO_FLAGS] = { .type = NLA_BINARY },
+ [HWSIM_ATTR_PERM_ADDR] = NLA_POLICY_ETH_ADDR_COMPAT,
[HWSIM_ATTR_IFTYPE_SUPPORT] = { .type = NLA_U32 },
[HWSIM_ATTR_CIPHER_SUPPORT] = { .type = NLA_BINARY },
};
+#if IS_REACHABLE(CONFIG_VIRTIO)
+
+/* MAC80211_HWSIM virtio queues */
+static struct virtqueue *hwsim_vqs[HWSIM_NUM_VQS];
+static bool hwsim_virtio_enabled;
+static spinlock_t hwsim_virtio_lock;
+
+static void hwsim_virtio_rx_work(struct work_struct *work);
+static DECLARE_WORK(hwsim_virtio_rx, hwsim_virtio_rx_work);
+
+static int hwsim_tx_virtio(struct mac80211_hwsim_data *data,
+ struct sk_buff *skb)
+{
+ struct scatterlist sg[1];
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&hwsim_virtio_lock, flags);
+ if (!hwsim_virtio_enabled) {
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ sg_init_one(sg, skb->head, skb_end_offset(skb));
+ err = virtqueue_add_outbuf(hwsim_vqs[HWSIM_VQ_TX], sg, 1, skb,
+ GFP_ATOMIC);
+ if (err)
+ goto out_free;
+ virtqueue_kick(hwsim_vqs[HWSIM_VQ_TX]);
+ spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
+ return 0;
+
+out_free:
+ spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
+ nlmsg_free(skb);
+ return err;
+}
+#else
+/* cause a linker error if this ends up being needed */
+extern int hwsim_tx_virtio(struct mac80211_hwsim_data *data,
+ struct sk_buff *skb);
+#define hwsim_virtio_enabled false
+#endif
+
static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct ieee80211_channel *chan);
@@ -1140,8 +1187,14 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
goto nla_put_failure;
genlmsg_end(skb, msg_head);
- if (hwsim_unicast_netgroup(data, skb, dst_portid))
- goto err_free_txskb;
+
+ if (hwsim_virtio_enabled) {
+ if (hwsim_tx_virtio(data, skb))
+ goto err_free_txskb;
+ } else {
+ if (hwsim_unicast_netgroup(data, skb, dst_portid))
+ goto err_free_txskb;
+ }
/* Enqueue the packet */
skb_queue_tail(&data->pending, my_skb);
@@ -1443,7 +1496,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
/* wmediumd mode check */
_portid = READ_ONCE(data->wmediumd);
- if (_portid)
+ if (_portid || hwsim_virtio_enabled)
return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
/* NO wmediumd detected, perfect medium simulation */
@@ -1549,7 +1602,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
mac80211_hwsim_monitor_rx(hw, skb, chan);
- if (_pid)
+ if (_pid || hwsim_virtio_enabled)
return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
mac80211_hwsim_tx_frame_no_nl(hw, skb, chan);
@@ -1595,6 +1648,11 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
mac80211_hwsim_tx_frame(hw, skb,
rcu_dereference(vif->chanctx_conf)->def.chan);
+ while ((skb = ieee80211_get_buffered_bc(hw, vif)) != NULL) {
+ mac80211_hwsim_tx_frame(hw, skb,
+ rcu_dereference(vif->chanctx_conf)->def.chan);
+ }
+
if (vif->csa_active && ieee80211_csa_is_complete(vif))
ieee80211_csa_finish(vif);
}
@@ -2925,11 +2983,15 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
ieee80211_hw_set(hw, TDLS_WIDER_BW);
if (rctbl)
ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_AP_UAPSD |
@@ -2940,6 +3002,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
NL80211_FEATURE_DYNAMIC_SMPS |
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION);
hw->wiphy->interface_modes = param->iftypes;
@@ -3285,11 +3348,14 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
if (!data2)
goto out;
- if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup)
- goto out;
+ if (!hwsim_virtio_enabled) {
+ if (hwsim_net_get_netgroup(genl_info_net(info)) !=
+ data2->netgroup)
+ goto out;
- if (info->snd_portid != data2->wmediumd)
- goto out;
+ if (info->snd_portid != data2->wmediumd)
+ goto out;
+ }
/* look for the skb matching the cookie passed back from user */
skb_queue_walk_safe(&data2->pending, skb, tmp) {
@@ -3379,11 +3445,14 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
if (!data2)
goto out;
- if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup)
- goto out;
+ if (!hwsim_virtio_enabled) {
+ if (hwsim_net_get_netgroup(genl_info_net(info)) !=
+ data2->netgroup)
+ goto out;
- if (info->snd_portid != data2->wmediumd)
- goto out;
+ if (info->snd_portid != data2->wmediumd)
+ goto out;
+ }
/* check if radio is configured properly */
@@ -3924,6 +3993,229 @@ static void hwsim_exit_netlink(void)
genl_unregister_family(&hwsim_genl_family);
}
+#if IS_REACHABLE(CONFIG_VIRTIO)
+static void hwsim_virtio_tx_done(struct virtqueue *vq)
+{
+ unsigned int len;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwsim_virtio_lock, flags);
+ while ((skb = virtqueue_get_buf(vq, &len)))
+ nlmsg_free(skb);
+ spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
+}
+
+static int hwsim_virtio_handle_cmd(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlh;
+ struct genlmsghdr *gnlh;
+ struct nlattr *tb[HWSIM_ATTR_MAX + 1];
+ struct genl_info info = {};
+ int err;
+
+ nlh = nlmsg_hdr(skb);
+ gnlh = nlmsg_data(nlh);
+ err = genlmsg_parse(nlh, &hwsim_genl_family, tb, HWSIM_ATTR_MAX,
+ hwsim_genl_policy, NULL);
+ if (err) {
+ pr_err_ratelimited("hwsim: genlmsg_parse returned %d\n", err);
+ return err;
+ }
+
+ info.attrs = tb;
+
+ switch (gnlh->cmd) {
+ case HWSIM_CMD_FRAME:
+ hwsim_cloned_frame_received_nl(skb, &info);
+ break;
+ case HWSIM_CMD_TX_INFO_FRAME:
+ hwsim_tx_info_frame_received_nl(skb, &info);
+ break;
+ default:
+ pr_err_ratelimited("hwsim: invalid cmd: %d\n", gnlh->cmd);
+ return -EPROTO;
+ }
+ return 0;
+}
+
+static void hwsim_virtio_rx_work(struct work_struct *work)
+{
+ struct virtqueue *vq;
+ unsigned int len;
+ struct sk_buff *skb;
+ struct scatterlist sg[1];
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwsim_virtio_lock, flags);
+ if (!hwsim_virtio_enabled)
+ goto out_unlock;
+
+ skb = virtqueue_get_buf(hwsim_vqs[HWSIM_VQ_RX], &len);
+ if (!skb)
+ goto out_unlock;
+ spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
+
+ skb->data = skb->head;
+ skb_set_tail_pointer(skb, len);
+ hwsim_virtio_handle_cmd(skb);
+
+ spin_lock_irqsave(&hwsim_virtio_lock, flags);
+ if (!hwsim_virtio_enabled) {
+ nlmsg_free(skb);
+ goto out_unlock;
+ }
+ vq = hwsim_vqs[HWSIM_VQ_RX];
+ sg_init_one(sg, skb->head, skb_end_offset(skb));
+ err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_KERNEL);
+ if (WARN(err, "virtqueue_add_inbuf returned %d\n", err))
+ nlmsg_free(skb);
+ else
+ virtqueue_kick(vq);
+ schedule_work(&hwsim_virtio_rx);
+
+out_unlock:
+ spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
+}
+
+static void hwsim_virtio_rx_done(struct virtqueue *vq)
+{
+ schedule_work(&hwsim_virtio_rx);
+}
+
+static int init_vqs(struct virtio_device *vdev)
+{
+ vq_callback_t *callbacks[HWSIM_NUM_VQS] = {
+ [HWSIM_VQ_TX] = hwsim_virtio_tx_done,
+ [HWSIM_VQ_RX] = hwsim_virtio_rx_done,
+ };
+ const char *names[HWSIM_NUM_VQS] = {
+ [HWSIM_VQ_TX] = "tx",
+ [HWSIM_VQ_RX] = "rx",
+ };
+
+ return virtio_find_vqs(vdev, HWSIM_NUM_VQS,
+ hwsim_vqs, callbacks, names, NULL);
+}
+
+static int fill_vq(struct virtqueue *vq)
+{
+ int i, err;
+ struct sk_buff *skb;
+ struct scatterlist sg[1];
+
+ for (i = 0; i < virtqueue_get_vring_size(vq); i++) {
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ sg_init_one(sg, skb->head, skb_end_offset(skb));
+ err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_KERNEL);
+ if (err) {
+ nlmsg_free(skb);
+ return err;
+ }
+ }
+ virtqueue_kick(vq);
+ return 0;
+}
+
+static void remove_vqs(struct virtio_device *vdev)
+{
+ int i;
+
+ vdev->config->reset(vdev);
+
+ for (i = 0; i < ARRAY_SIZE(hwsim_vqs); i++) {
+ struct virtqueue *vq = hwsim_vqs[i];
+ struct sk_buff *skb;
+
+ while ((skb = virtqueue_detach_unused_buf(vq)))
+ nlmsg_free(skb);
+ }
+
+ vdev->config->del_vqs(vdev);
+}
+
+static int hwsim_virtio_probe(struct virtio_device *vdev)
+{
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwsim_virtio_lock, flags);
+ if (hwsim_virtio_enabled) {
+ spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
+ return -EEXIST;
+ }
+ spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
+
+ err = init_vqs(vdev);
+ if (err)
+ return err;
+
+ err = fill_vq(hwsim_vqs[HWSIM_VQ_RX]);
+ if (err)
+ goto out_remove;
+
+ spin_lock_irqsave(&hwsim_virtio_lock, flags);
+ hwsim_virtio_enabled = true;
+ spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
+
+ schedule_work(&hwsim_virtio_rx);
+ return 0;
+
+out_remove:
+ remove_vqs(vdev);
+ return err;
+}
+
+static void hwsim_virtio_remove(struct virtio_device *vdev)
+{
+ hwsim_virtio_enabled = false;
+
+ cancel_work_sync(&hwsim_virtio_rx);
+
+ remove_vqs(vdev);
+}
+
+/* MAC80211_HWSIM virtio device id table */
+static const struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_MAC80211_HWSIM, VIRTIO_DEV_ANY_ID },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_hwsim = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = hwsim_virtio_probe,
+ .remove = hwsim_virtio_remove,
+};
+
+static int hwsim_register_virtio_driver(void)
+{
+ spin_lock_init(&hwsim_virtio_lock);
+
+ return register_virtio_driver(&virtio_hwsim);
+}
+
+static void hwsim_unregister_virtio_driver(void)
+{
+ unregister_virtio_driver(&virtio_hwsim);
+}
+#else
+static inline int hwsim_register_virtio_driver(void)
+{
+ return 0;
+}
+
+static inline void hwsim_unregister_virtio_driver(void)
+{
+}
+#endif
+
static int __init init_mac80211_hwsim(void)
{
int i, err;
@@ -3952,10 +4244,14 @@ static int __init init_mac80211_hwsim(void)
if (err)
goto out_unregister_driver;
+ err = hwsim_register_virtio_driver();
+ if (err)
+ goto out_exit_netlink;
+
hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
if (IS_ERR(hwsim_class)) {
err = PTR_ERR(hwsim_class);
- goto out_exit_netlink;
+ goto out_exit_virtio;
}
for (i = 0; i < radios; i++) {
@@ -4067,6 +4363,8 @@ out_free_mon:
free_netdev(hwsim_mon);
out_free_radios:
mac80211_hwsim_free();
+out_exit_virtio:
+ hwsim_unregister_virtio_driver();
out_exit_netlink:
hwsim_exit_netlink();
out_unregister_driver:
@@ -4083,6 +4381,7 @@ static void __exit exit_mac80211_hwsim(void)
{
pr_debug("mac80211_hwsim: unregister radios\n");
+ hwsim_unregister_virtio_driver();
hwsim_exit_netlink();
mac80211_hwsim_free();
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index a85bc7c5c030..28ade92adcb4 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -3,6 +3,7 @@
* mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
+ * Copyright (C) 2020 Intel Corporation
*/
#ifndef __MAC80211_HWSIM_H
@@ -245,4 +246,24 @@ struct hwsim_tx_rate_flag {
s8 idx;
u16 flags;
} __packed;
+
+/**
+ * DOC: Frame transmission support over virtio
+ *
+ * Frame transmission is also supported over virtio to allow communication
+ * with external entities.
+ */
+
+/**
+ * enum hwsim_vqs - queues for virtio frame transmission
+ *
+ * @HWSIM_VQ_TX: send frames to external entity
+ * @HWSIM_VQ_RX: receive frames and transmission info reports
+ * @HWSIM_NUM_VQS: enum limit
+ */
+enum {
+ HWSIM_VQ_TX,
+ HWSIM_VQ_RX,
+ HWSIM_NUM_VQS,
+};
#endif /* __MAC80211_HWSIM_H */
diff --git a/drivers/net/wireless/marvell/libertas/host.h b/drivers/net/wireless/marvell/libertas/host.h
index a4fc3f79bb17..dfa22468b14a 100644
--- a/drivers/net/wireless/marvell/libertas/host.h
+++ b/drivers/net/wireless/marvell/libertas/host.h
@@ -461,7 +461,7 @@ struct cmd_ds_802_11_scan {
uint8_t bsstype;
uint8_t bssid[ETH_ALEN];
- uint8_t tlvbuffer[0];
+ uint8_t tlvbuffer[];
} __packed;
struct cmd_ds_802_11_scan_rsp {
@@ -469,7 +469,7 @@ struct cmd_ds_802_11_scan_rsp {
__le16 bssdescriptsize;
uint8_t nr_sets;
- uint8_t bssdesc_and_tlvbuffer[0];
+ uint8_t bssdesc_and_tlvbuffer[];
} __packed;
struct cmd_ds_802_11_get_log {
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 30f1025ecb9b..acf61b93b782 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -103,7 +103,7 @@ MODULE_FIRMWARE("sd8688.bin");
struct if_sdio_packet {
struct if_sdio_packet *next;
u16 nb;
- u8 buffer[0] __attribute__((aligned(4)));
+ u8 buffer[] __aligned(4);
};
struct if_sdio_card {
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index d07fe82c557e..cd9f8ecf171f 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -35,7 +35,7 @@
struct if_spi_packet {
struct list_head list;
u16 blen;
- u8 buffer[0] __attribute__((aligned(4)));
+ u8 buffer[] __aligned(4);
};
struct if_spi_card {
@@ -235,8 +235,9 @@ static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len)
spi_message_add_tail(&dummy_trans, &m);
} else {
/* Busy-wait while the SPU fills the FIFO */
- reg_trans.delay_usecs =
+ reg_trans.delay.value =
DIV_ROUND_UP((100 + (delay * 10)), 1000);
+ reg_trans.delay.unit = SPI_DELAY_UNIT_USECS;
}
/* read in data */
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.h b/drivers/net/wireless/marvell/libertas/if_usb.h
index 8dc14bec3e16..7d0daeb33c3f 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.h
+++ b/drivers/net/wireless/marvell/libertas/if_usb.h
@@ -91,7 +91,7 @@ struct fwheader {
struct fwdata {
struct fwheader hdr;
__le32 seqnum;
- uint8_t data[0];
+ uint8_t data[];
};
/* fwsyncheader */
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.h b/drivers/net/wireless/marvell/libertas_tf/if_usb.h
index 585ad36f9055..f6dd7373b09e 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.h
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.h
@@ -81,7 +81,7 @@ struct fwheader {
struct fwdata {
struct fwheader hdr;
__le32 seqnum;
- uint8_t data[0];
+ uint8_t data[];
};
/** fwsyncheader */
diff --git a/drivers/net/wireless/marvell/mwifiex/11ac.c b/drivers/net/wireless/marvell/mwifiex/11ac.c
index 59d23fb2365f..756f019ef28a 100644
--- a/drivers/net/wireless/marvell/mwifiex/11ac.c
+++ b/drivers/net/wireless/marvell/mwifiex/11ac.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11ac
+ * NXP Wireless LAN device driver: 802.11ac
*
- * Copyright (C) 2013-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11ac.h b/drivers/net/wireless/marvell/mwifiex/11ac.h
index 1ca92c7a8a4a..29e83468cf3f 100644
--- a/drivers/net/wireless/marvell/mwifiex/11ac.h
+++ b/drivers/net/wireless/marvell/mwifiex/11ac.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11ac
+ * NXP Wireless LAN device driver: 802.11ac
*
- * Copyright (C) 2013-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index 238accfe4f41..d2ee6469e67b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11h
+ * NXP Wireless LAN device driver: 802.11h
*
- * Copyright (C) 2013-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index e435f801bc91..6696bce56178 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n
+ * NXP Wireless LAN device driver: 802.11n
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.h b/drivers/net/wireless/marvell/mwifiex/11n.h
index 33268ce2cd82..83a88eecbda6 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n
+ * NXP Wireless LAN device driver: 802.11n
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
index 088612438530..46f41dbcf30d 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n Aggregation
+ * NXP Wireless LAN device driver: 802.11n Aggregation
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.h b/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
index 8279b159da7c..382c1265c441 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n Aggregation
+ * NXP Wireless LAN device driver: 802.11n Aggregation
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 05a3c61ac603..0bdafe9f66db 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
+ * NXP Wireless LAN device driver: 802.11n RX Re-ordering
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
index 22d991f514c8..465f244b3636 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
+ * NXP Wireless LAN device driver: 802.11n RX Re-ordering
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index d89684168500..1566d2197906 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: CFG80211
+ * NXP Wireless LAN device driver: CFG80211
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
@@ -3052,7 +3052,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
dev->watchdog_timeo = MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT;
- dev->hard_header_len += MWIFIEX_MIN_DATA_HEADER_LEN;
+ dev->needed_headroom = MWIFIEX_MIN_DATA_HEADER_LEN;
dev->ethtool_ops = &mwifiex_ethtool_ops;
mdev_priv = netdev_priv(dev);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.h b/drivers/net/wireless/marvell/mwifiex/cfg80211.h
index 908367857d58..530a63f13f14 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.h
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: CFG80211
+ * NXP Wireless LAN device driver: CFG80211
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index f1522fb1c1e8..fb91ecfc5546 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: Channel, Frequence and Power
+ * NXP Wireless LAN device driver: Channel, Frequence and Power
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index e8788c35a453..7e4b8cd52605 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: commands and events
+ * NXP Wireless LAN device driver: commands and events
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index 8ab114cf3467..dded92db1f37 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: debugfs
+ * NXP Wireless LAN device driver: debugfs
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h
index 46696ea0b23e..6bd23c9b1eed 100644
--- a/drivers/net/wireless/marvell/mwifiex/decl.h
+++ b/drivers/net/wireless/marvell/mwifiex/decl.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: generic data structures and APIs
+ * NXP Wireless LAN device driver: generic data structures and APIs
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/ethtool.c b/drivers/net/wireless/marvell/mwifiex/ethtool.c
index 58400c69ab26..9bdad3f59039 100644
--- a/drivers/net/wireless/marvell/mwifiex/ethtool.c
+++ b/drivers/net/wireless/marvell/mwifiex/ethtool.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: ethtool
+ * NXP Wireless LAN device driver: ethtool
*
- * Copyright (C) 2013-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 1fb76d2f5d3f..a415d73a73e6 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: Firmware specific macros & structures
+ * NXP Wireless LAN device driver: Firmware specific macros & structures
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
@@ -846,7 +846,7 @@ struct mwifiex_ie_types_random_mac {
struct mwifiex_ietypes_chanstats {
struct mwifiex_ie_types_header header;
- struct mwifiex_fw_chan_stats chanstats[0];
+ struct mwifiex_fw_chan_stats chanstats[];
} __packed;
struct mwifiex_ie_types_wildcard_ssid_params {
@@ -1082,7 +1082,7 @@ struct host_cmd_ds_get_hw_spec {
__le32 reserved_6;
__le32 dot_11ac_dev_cap;
__le32 dot_11ac_mcs_support;
- u8 tlvs[0];
+ u8 tlvs[];
} __packed;
struct host_cmd_ds_802_11_rssi_info {
@@ -1140,7 +1140,7 @@ struct ieee_types_assoc_rsp {
__le16 cap_info_bitmap;
__le16 status_code;
__le16 a_id;
- u8 ie_buffer[0];
+ u8 ie_buffer[];
} __packed;
struct host_cmd_ds_802_11_associate_rsp {
@@ -1455,7 +1455,7 @@ struct host_cmd_ds_chan_rpt_event {
__le32 result;
__le64 start_tsf;
__le32 duration;
- u8 tlvbuf[0];
+ u8 tlvbuf[];
} __packed;
struct host_cmd_sdio_sp_rx_aggr_cfg {
@@ -1625,7 +1625,7 @@ struct host_cmd_ds_802_11_bg_scan_config {
__le32 reserved2;
__le32 report_condition;
__le16 reserved3;
- u8 tlv[0];
+ u8 tlv[];
} __packed;
struct host_cmd_ds_802_11_bg_scan_query {
@@ -1720,7 +1720,7 @@ struct mwifiex_ie_types_sta_info {
struct host_cmd_ds_sta_list {
__le16 sta_count;
- u8 tlv[0];
+ u8 tlv[];
} __packed;
struct mwifiex_ie_types_pwr_capability {
@@ -1743,7 +1743,7 @@ struct mwifiex_ie_types_wmm_param_set {
struct mwifiex_ie_types_mgmt_frame {
struct mwifiex_ie_types_header header;
__le16 frame_control;
- u8 frame_contents[0];
+ u8 frame_contents[];
};
struct mwifiex_ie_types_wmm_queue_status {
@@ -1861,7 +1861,7 @@ struct mwifiex_ie_types_2040bssco {
struct mwifiex_ie_types_extcap {
struct mwifiex_ie_types_header header;
- u8 ext_capab[0];
+ u8 ext_capab[];
} __packed;
struct host_cmd_ds_mem_access {
@@ -1918,12 +1918,12 @@ struct mwifiex_assoc_event {
__le16 frame_control;
__le16 cap_info;
__le16 listen_interval;
- u8 data[0];
+ u8 data[];
} __packed;
struct host_cmd_ds_sys_config {
__le16 action;
- u8 tlv[0];
+ u8 tlv[];
};
struct host_cmd_11ac_vht_cfg {
@@ -1956,7 +1956,7 @@ struct host_cmd_tlv_gwk_cipher {
struct host_cmd_tlv_passphrase {
struct mwifiex_ie_types_header header;
- u8 passphrase[0];
+ u8 passphrase[];
} __packed;
struct host_cmd_tlv_wep_key {
@@ -1978,12 +1978,12 @@ struct host_cmd_tlv_encrypt_protocol {
struct host_cmd_tlv_ssid {
struct mwifiex_ie_types_header header;
- u8 ssid[0];
+ u8 ssid[];
} __packed;
struct host_cmd_tlv_rates {
struct mwifiex_ie_types_header header;
- u8 rates[0];
+ u8 rates[];
} __packed;
struct mwifiex_ie_types_bssid_list {
@@ -2100,13 +2100,13 @@ struct mwifiex_fw_mef_entry {
u8 mode;
u8 action;
__le16 exprsize;
- u8 expr[0];
+ u8 expr[];
} __packed;
struct host_cmd_ds_mef_cfg {
__le32 criteria;
__le16 num_entries;
- struct mwifiex_fw_mef_entry mef_entry[0];
+ struct mwifiex_fw_mef_entry mef_entry[];
} __packed;
#define CONNECTION_TYPE_INFRA 0
@@ -2169,7 +2169,7 @@ struct mwifiex_radar_det_event {
struct mwifiex_ie_types_multi_chan_info {
struct mwifiex_ie_types_header header;
__le16 status;
- u8 tlv_buffer[0];
+ u8 tlv_buffer[];
} __packed;
struct mwifiex_ie_types_mc_group_info {
@@ -2185,7 +2185,7 @@ struct mwifiex_ie_types_mc_group_info {
u8 usb_ep_num;
} hid_num;
u8 intf_num;
- u8 bss_type_numlist[0];
+ u8 bss_type_numlist[];
} __packed;
struct meas_rpt_map {
@@ -2250,13 +2250,13 @@ struct coalesce_receive_filt_rule {
u8 num_of_fields;
u8 pkt_type;
__le16 max_coalescing_delay;
- struct coalesce_filt_field_param params[0];
+ struct coalesce_filt_field_param params[];
} __packed;
struct host_cmd_ds_coalesce_cfg {
__le16 action;
__le16 num_of_rules;
- struct coalesce_receive_filt_rule rule[0];
+ struct coalesce_receive_filt_rule rule[];
} __packed;
struct host_cmd_ds_multi_chan_policy {
@@ -2295,7 +2295,7 @@ struct host_cmd_ds_pkt_aggr_ctrl {
struct host_cmd_ds_sta_configure {
__le16 action;
- u8 tlv_buffer[0];
+ u8 tlv_buffer[];
} __packed;
struct host_cmd_ds_command {
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index 580387f9f12a..811abe963af2 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -1,11 +1,11 @@
/*
- * Marvell Wireless LAN device driver: management IE handling- setting and
+ * NXP Wireless LAN device driver: management IE handling- setting and
* deleting IE.
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 1aa93e7e9835..82d69bc3aaaf 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: HW/FW Initialization
+ * NXP Wireless LAN device driver: HW/FW Initialization
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
index 0dd592ea6e83..3db449efa167 100644
--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
+++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: ioctl data structures & APIs
+ * NXP Wireless LAN device driver: ioctl data structures & APIs
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
index d87aeff70cef..5934f7147547 100644
--- a/drivers/net/wireless/marvell/mwifiex/join.c
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: association and ad-hoc start/join
+ * NXP Wireless LAN device driver: association and ad-hoc start/join
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 7d94695e7961..529099137644 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: major functions
+ * NXP Wireless LAN device driver: major functions
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index fa5634af40f7..afaffc325452 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: major data structures and prototypes
+ * NXP Wireless LAN device driver: major data structures and prototypes
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index fc1706d0647d..87b4ccca4b9a 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: PCIE specific handling
+ * NXP Wireless LAN device driver: PCIE specific handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index f7ce9b6db6b4..fc59b522f670 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -3,10 +3,10 @@
* @brief This file contains definitions for PCI-E interface.
* driver.
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index a7968a84aaf8..ff932627a46c 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: scan ioctl and command handling
+ * NXP Wireless LAN device driver: scan ioctl and command handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index fec38b6e86ff..6a2dcb01caf4 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: SDIO specific handling
+ * NXP Wireless LAN device driver: SDIO specific handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index f672bdf52cc1..71cd8629b28e 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: SDIO specific definitions
+ * NXP Wireless LAN device driver: SDIO specific definitions
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 4ed10cf82f9a..0bd93f26bd7f 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: station command handling
+ * NXP Wireless LAN device driver: station command handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 20c206da0631..f21660149f58 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: station command response handling
+ * NXP Wireless LAN device driver: station command response handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 5fdffb114913..bc79ca4cb803 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: station event handling
+ * NXP Wireless LAN device driver: station event handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index fbfa0b15d0c8..653f9e094256 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: functions for station ioctl
+ * NXP Wireless LAN device driver: functions for station ioctl
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
index 52a2ce2e78b0..0d2adf887900 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: station RX data handling
+ * NXP Wireless LAN device driver: station RX data handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
index 37c24b95e642..241305377e20 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: station TX data handling
+ * NXP Wireless LAN device driver: station TX data handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index f8f282ce39bd..97bb87c3676b 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -1,9 +1,10 @@
-/* Marvell Wireless LAN device driver: TDLS handling
+/*
+ * NXP Wireless LAN device driver: TDLS handling
*
- * Copyright (C) 2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available on the worldwide web at
diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c
index e3c1446dd847..a8479b879382 100644
--- a/drivers/net/wireless/marvell/mwifiex/txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/txrx.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: generic TX/RX data handling
+ * NXP Wireless LAN device driver: generic TX/RX data handling
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 0939a8c8f3ab..b48a85d791f6 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: AP specific command handling
+ * NXP Wireless LAN device driver: AP specific command handling
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c
index 86bfa1b9ef9d..9121447e2701 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: AP event handling
+ * NXP Wireless LAN device driver: AP event handling
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 354b09c5e8dc..77c8595f84f8 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: AP TX and RX data handling
+ * NXP Wireless LAN device driver: AP TX and RX data handling
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index c2365eeb7016..6f3cfde4654c 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: USB specific handling
+ * NXP Wireless LAN device driver: USB specific handling
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.h b/drivers/net/wireless/marvell/mwifiex/usb.h
index 37abd228a84f..d822ec15b7e6 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.h
+++ b/drivers/net/wireless/marvell/mwifiex/usb.h
@@ -1,10 +1,10 @@
/*
* This file contains definitions for mwifiex USB interface driver.
*
- * Copyright (C) 2012-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 3b0d31827681..de89a1e710b1 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: utility functions
+ * NXP Wireless LAN device driver: utility functions
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/util.h b/drivers/net/wireless/marvell/mwifiex/util.h
index 7cafcecd7b85..44aa80eb7827 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.h
+++ b/drivers/net/wireless/marvell/mwifiex/util.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: utility functions
+ * NXP Wireless LAN device driver: utility functions
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 132f9e8ed68c..a06fff199ea3 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: WMM
+ * NXP Wireless LAN device driver: WMM
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.h b/drivers/net/wireless/marvell/mwifiex/wmm.h
index 38f09762bd2f..04d7da95e307 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.h
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.h
@@ -1,10 +1,10 @@
/*
- * Marvell Wireless LAN device driver: WMM
+ * NXP Wireless LAN device driver: WMM
*
- * Copyright (C) 2011-2014, Marvell International Ltd.
+ * Copyright 2011-2020 NXP
*
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * This software file (the "File") is distributed by NXP
+ * under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index d55f229abeea..47fb4b3ea004 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -592,7 +592,7 @@ struct mwl8k_cmd_pkt {
__u8 seq_num;
__u8 macid;
__le16 result;
- char payload[0];
+ char payload[];
} __packed;
/*
@@ -806,7 +806,7 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
struct mwl8k_dma_data {
__le16 fwlen;
struct ieee80211_hdr wh;
- char data[0];
+ char data[];
} __packed;
/* Routines to add/remove DMA header from skb. */
@@ -2955,7 +2955,7 @@ mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
struct mwl8k_cmd_set_beacon {
struct mwl8k_cmd_pkt header;
__le16 beacon_len;
- __u8 beacon[0];
+ __u8 beacon[];
};
static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index 99bbc74acda8..d7a1ddc9e407 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
- tx.o agg-rx.o mcu.o airtime.o
+ tx.o agg-rx.o mcu.o
mt76-$(CONFIG_PCI) += pci.o
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 59c187898132..f77f03530259 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -4,7 +4,13 @@
*/
#include "mt76.h"
-#define REORDER_TIMEOUT (HZ / 10)
+static unsigned long mt76_aggr_tid_to_timeo(u8 tidno)
+{
+ /* Currently voice traffic (AC_VO) always runs without aggregation,
+ * no special handling is needed. AC_BE/AC_BK use tids 0-3. Just check
+ * for non AC_BK/AC_BE and set smaller timeout for it. */
+ return HZ / (tidno >= 4 ? 25 : 10);
+}
static void
mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
@@ -71,7 +77,8 @@ mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
nframes--;
status = (struct mt76_rx_status *)skb->cb;
if (!time_after(jiffies,
- status->reorder_time + REORDER_TIMEOUT))
+ status->reorder_time +
+ mt76_aggr_tid_to_timeo(tid->num)))
continue;
mt76_rx_aggr_release_frames(tid, frames, status->seqno);
@@ -101,7 +108,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work)
if (nframes)
ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
- REORDER_TIMEOUT);
+ mt76_aggr_tid_to_timeo(tid->num));
mt76_rx_complete(dev, &frames, NULL);
rcu_read_unlock();
@@ -225,7 +232,7 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
mt76_rx_aggr_release_head(tid, frames);
ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
- REORDER_TIMEOUT);
+ mt76_aggr_tid_to_timeo(tid->num));
out:
spin_unlock_bh(&tid->lock);
@@ -245,6 +252,7 @@ int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
tid->dev = dev;
tid->head = ssn;
tid->size = size;
+ tid->num = tidno;
INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
spin_lock_init(&tid->lock);
@@ -268,6 +276,7 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
if (!skb)
continue;
+ tid->reorder_buf[i] = NULL;
tid->nframes--;
dev_kfree_skb(skb);
}
diff --git a/drivers/net/wireless/mediatek/mt76/airtime.c b/drivers/net/wireless/mediatek/mt76/airtime.c
deleted file mode 100644
index a4a785467748..000000000000
--- a/drivers/net/wireless/mediatek/mt76/airtime.c
+++ /dev/null
@@ -1,326 +0,0 @@
-// SPDX-License-Identifier: ISC
-/*
- * Copyright (C) 2019 Felix Fietkau <nbd@nbd.name>
- */
-
-#include "mt76.h"
-
-#define AVG_PKT_SIZE 1024
-
-/* Number of bits for an average sized packet */
-#define MCS_NBITS (AVG_PKT_SIZE << 3)
-
-/* Number of symbols for a packet with (bps) bits per symbol */
-#define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
-
-/* Transmission time (1024 usec) for a packet containing (syms) * symbols */
-#define MCS_SYMBOL_TIME(sgi, syms) \
- (sgi ? \
- ((syms) * 18 * 1024 + 4 * 1024) / 5 : /* syms * 3.6 us */ \
- ((syms) * 1024) << 2 /* syms * 4 us */ \
- )
-
-/* Transmit duration for the raw data part of an average sized packet */
-#define MCS_DURATION(streams, sgi, bps) \
- MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
-
-#define BW_20 0
-#define BW_40 1
-#define BW_80 2
-
-/*
- * Define group sort order: HT40 -> SGI -> #streams
- */
-#define MT_MAX_STREAMS 4
-#define MT_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */
-#define MT_VHT_STREAM_GROUPS 6 /* BW(=3) * SGI(=2) */
-
-#define MT_HT_GROUPS_NB (MT_MAX_STREAMS * \
- MT_HT_STREAM_GROUPS)
-#define MT_VHT_GROUPS_NB (MT_MAX_STREAMS * \
- MT_VHT_STREAM_GROUPS)
-#define MT_GROUPS_NB (MT_HT_GROUPS_NB + \
- MT_VHT_GROUPS_NB)
-
-#define MT_HT_GROUP_0 0
-#define MT_VHT_GROUP_0 (MT_HT_GROUP_0 + MT_HT_GROUPS_NB)
-
-#define MCS_GROUP_RATES 10
-
-#define HT_GROUP_IDX(_streams, _sgi, _ht40) \
- MT_HT_GROUP_0 + \
- MT_MAX_STREAMS * 2 * _ht40 + \
- MT_MAX_STREAMS * _sgi + \
- _streams - 1
-
-#define _MAX(a, b) (((a)>(b))?(a):(b))
-
-#define GROUP_SHIFT(duration) \
- _MAX(0, 16 - __builtin_clz(duration))
-
-/* MCS rate information for an MCS group */
-#define __MCS_GROUP(_streams, _sgi, _ht40, _s) \
- [HT_GROUP_IDX(_streams, _sgi, _ht40)] = { \
- .shift = _s, \
- .duration = { \
- MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234) >> _s, \
- MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) >> _s \
- } \
-}
-
-#define MCS_GROUP_SHIFT(_streams, _sgi, _ht40) \
- GROUP_SHIFT(MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26))
-
-#define MCS_GROUP(_streams, _sgi, _ht40) \
- __MCS_GROUP(_streams, _sgi, _ht40, \
- MCS_GROUP_SHIFT(_streams, _sgi, _ht40))
-
-#define VHT_GROUP_IDX(_streams, _sgi, _bw) \
- (MT_VHT_GROUP_0 + \
- MT_MAX_STREAMS * 2 * (_bw) + \
- MT_MAX_STREAMS * (_sgi) + \
- (_streams) - 1)
-
-#define BW2VBPS(_bw, r3, r2, r1) \
- (_bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1)
-
-#define __VHT_GROUP(_streams, _sgi, _bw, _s) \
- [VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \
- .shift = _s, \
- .duration = { \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 117, 54, 26)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 234, 108, 52)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 351, 162, 78)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 468, 216, 104)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 702, 324, 156)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 936, 432, 208)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1053, 486, 234)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1170, 540, 260)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1404, 648, 312)) >> _s, \
- MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 1560, 720, 346)) >> _s \
- } \
-}
-
-#define VHT_GROUP_SHIFT(_streams, _sgi, _bw) \
- GROUP_SHIFT(MCS_DURATION(_streams, _sgi, \
- BW2VBPS(_bw, 117, 54, 26)))
-
-#define VHT_GROUP(_streams, _sgi, _bw) \
- __VHT_GROUP(_streams, _sgi, _bw, \
- VHT_GROUP_SHIFT(_streams, _sgi, _bw))
-
-struct mcs_group {
- u8 shift;
- u16 duration[MCS_GROUP_RATES];
-};
-
-static const struct mcs_group airtime_mcs_groups[] = {
- MCS_GROUP(1, 0, BW_20),
- MCS_GROUP(2, 0, BW_20),
- MCS_GROUP(3, 0, BW_20),
- MCS_GROUP(4, 0, BW_20),
-
- MCS_GROUP(1, 1, BW_20),
- MCS_GROUP(2, 1, BW_20),
- MCS_GROUP(3, 1, BW_20),
- MCS_GROUP(4, 1, BW_20),
-
- MCS_GROUP(1, 0, BW_40),
- MCS_GROUP(2, 0, BW_40),
- MCS_GROUP(3, 0, BW_40),
- MCS_GROUP(4, 0, BW_40),
-
- MCS_GROUP(1, 1, BW_40),
- MCS_GROUP(2, 1, BW_40),
- MCS_GROUP(3, 1, BW_40),
- MCS_GROUP(4, 1, BW_40),
-
- VHT_GROUP(1, 0, BW_20),
- VHT_GROUP(2, 0, BW_20),
- VHT_GROUP(3, 0, BW_20),
- VHT_GROUP(4, 0, BW_20),
-
- VHT_GROUP(1, 1, BW_20),
- VHT_GROUP(2, 1, BW_20),
- VHT_GROUP(3, 1, BW_20),
- VHT_GROUP(4, 1, BW_20),
-
- VHT_GROUP(1, 0, BW_40),
- VHT_GROUP(2, 0, BW_40),
- VHT_GROUP(3, 0, BW_40),
- VHT_GROUP(4, 0, BW_40),
-
- VHT_GROUP(1, 1, BW_40),
- VHT_GROUP(2, 1, BW_40),
- VHT_GROUP(3, 1, BW_40),
- VHT_GROUP(4, 1, BW_40),
-
- VHT_GROUP(1, 0, BW_80),
- VHT_GROUP(2, 0, BW_80),
- VHT_GROUP(3, 0, BW_80),
- VHT_GROUP(4, 0, BW_80),
-
- VHT_GROUP(1, 1, BW_80),
- VHT_GROUP(2, 1, BW_80),
- VHT_GROUP(3, 1, BW_80),
- VHT_GROUP(4, 1, BW_80),
-};
-
-static u32
-mt76_calc_legacy_rate_duration(const struct ieee80211_rate *rate, bool short_pre,
- int len)
-{
- u32 duration;
-
- switch (rate->hw_value >> 8) {
- case MT_PHY_TYPE_CCK:
- duration = 144 + 48; /* preamble + PLCP */
- if (short_pre)
- duration >>= 1;
-
- duration += 10; /* SIFS */
- break;
- case MT_PHY_TYPE_OFDM:
- duration = 20 + 16; /* premable + SIFS */
- break;
- default:
- WARN_ON_ONCE(1);
- return 0;
- }
-
- len <<= 3;
- duration += (len * 10) / rate->bitrate;
-
- return duration;
-}
-
-u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
- int len)
-{
- struct ieee80211_supported_band *sband;
- const struct ieee80211_rate *rate;
- bool sgi = status->enc_flags & RX_ENC_FLAG_SHORT_GI;
- bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE;
- int bw, streams;
- u32 duration;
- int group, idx;
-
- switch (status->bw) {
- case RATE_INFO_BW_20:
- bw = BW_20;
- break;
- case RATE_INFO_BW_40:
- bw = BW_40;
- break;
- case RATE_INFO_BW_80:
- bw = BW_80;
- break;
- default:
- WARN_ON_ONCE(1);
- return 0;
- }
-
- switch (status->encoding) {
- case RX_ENC_LEGACY:
- if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ))
- return 0;
-
- sband = dev->hw->wiphy->bands[status->band];
- if (!sband || status->rate_idx >= sband->n_bitrates)
- return 0;
-
- rate = &sband->bitrates[status->rate_idx];
-
- return mt76_calc_legacy_rate_duration(rate, sp, len);
- case RX_ENC_VHT:
- streams = status->nss;
- idx = status->rate_idx;
- group = VHT_GROUP_IDX(streams, sgi, bw);
- break;
- case RX_ENC_HT:
- streams = ((status->rate_idx >> 3) & 3) + 1;
- idx = status->rate_idx & 7;
- group = HT_GROUP_IDX(streams, sgi, bw);
- break;
- default:
- WARN_ON_ONCE(1);
- return 0;
- }
-
- if (WARN_ON_ONCE(streams > 4))
- return 0;
-
- duration = airtime_mcs_groups[group].duration[idx];
- duration <<= airtime_mcs_groups[group].shift;
- duration *= len;
- duration /= AVG_PKT_SIZE;
- duration /= 1024;
-
- duration += 36 + (streams << 2);
-
- return duration;
-}
-
-u32 mt76_calc_tx_airtime(struct mt76_dev *dev, struct ieee80211_tx_info *info,
- int len)
-{
- struct mt76_rx_status stat = {
- .band = info->band,
- };
- u32 duration = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
- struct ieee80211_tx_rate *rate = &info->status.rates[i];
- u32 cur_duration;
-
- if (rate->idx < 0 || !rate->count)
- break;
-
- if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- stat.bw = RATE_INFO_BW_80;
- else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- stat.bw = RATE_INFO_BW_40;
- else
- stat.bw = RATE_INFO_BW_20;
-
- stat.enc_flags = 0;
- if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- stat.enc_flags |= RX_ENC_FLAG_SHORTPRE;
- if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- stat.enc_flags |= RX_ENC_FLAG_SHORT_GI;
-
- stat.rate_idx = rate->idx;
- if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- stat.encoding = RX_ENC_VHT;
- stat.rate_idx = ieee80211_rate_get_vht_mcs(rate);
- stat.nss = ieee80211_rate_get_vht_nss(rate);
- } else if (rate->flags & IEEE80211_TX_RC_MCS) {
- stat.encoding = RX_ENC_HT;
- } else {
- stat.encoding = RX_ENC_LEGACY;
- }
-
- cur_duration = mt76_calc_rx_airtime(dev, &stat, len);
- duration += cur_duration * rate->count;
- }
-
- return duration;
-}
-EXPORT_SYMBOL_GPL(mt76_calc_tx_airtime);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 1847f55e199b..75e659774e07 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -132,6 +132,11 @@ mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
writel(q->ndesc, &q->regs->ring_size);
q->head = readl(&q->regs->dma_idx);
q->tail = q->head;
+}
+
+static void
+mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
writel(q->head, &q->regs->cpu_idx);
}
@@ -141,7 +146,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
struct mt76_sw_queue *sq = &dev->q_tx[qid];
struct mt76_queue *q = sq->q;
struct mt76_queue_entry entry;
- unsigned int n_swq_queued[4] = {};
+ unsigned int n_swq_queued[8] = {};
unsigned int n_queued = 0;
bool wake = false;
int i, last;
@@ -178,15 +183,25 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
spin_lock_bh(&q->lock);
q->queued -= n_queued;
- for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
+ for (i = 0; i < 4; i++) {
if (!n_swq_queued[i])
continue;
dev->q_tx[i].swq_queued -= n_swq_queued[i];
}
- if (flush)
+ /* ext PHY */
+ for (i = 0; i < 4; i++) {
+ if (!n_swq_queued[i])
+ continue;
+
+ dev->q_tx[__MT_TXQ_MAX + i].swq_queued -= n_swq_queued[4 + i];
+ }
+
+ if (flush) {
mt76_dma_sync_idx(dev, q);
+ mt76_dma_kick_queue(dev, q);
+ }
wake = wake && q->stopped &&
qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
@@ -238,7 +253,9 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
if (!q->queued)
return NULL;
- if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
+ if (flush)
+ q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+ else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
return NULL;
q->tail = (q->tail + 1) % q->ndesc;
@@ -247,12 +264,6 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
return mt76_dma_get_buf(dev, q, idx, len, info, more);
}
-static void
-mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
-{
- writel(q->head, &q->regs->cpu_idx);
-}
-
static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
@@ -261,10 +272,13 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct mt76_queue_buf buf;
dma_addr_t addr;
+ if (q->queued + 1 >= q->ndesc - 1)
+ goto error;
+
addr = dma_map_single(dev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev, addr)))
- return -ENOMEM;
+ goto error;
buf.addr = addr;
buf.len = skb->len;
@@ -275,6 +289,10 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
spin_unlock_bh(&q->lock);
return 0;
+
+error:
+ dev_kfree_skb(skb);
+ return -ENOMEM;
}
static int
@@ -286,6 +304,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct mt76_tx_info tx_info = {
.skb = skb,
};
+ struct ieee80211_hw *hw;
int len, n = 0, ret = -ENOMEM;
struct mt76_queue_entry e;
struct mt76_txwi_cache *t;
@@ -295,7 +314,8 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
t = mt76_get_txwi(dev);
if (!t) {
- ieee80211_free_txskb(dev->hw, skb);
+ hw = mt76_tx_status_get_hw(dev, skb);
+ ieee80211_free_txskb(hw, skb);
return -ENOMEM;
}
txwi = mt76_get_txwi_ptr(dev, t);
@@ -427,7 +447,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
int i;
for (i = 0; i < q->ndesc; i++)
- q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+ q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
mt76_dma_rx_cleanup(dev, q);
mt76_dma_sync_idx(dev, q);
@@ -531,6 +551,7 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
dev = container_of(napi->dev, struct mt76_dev, napi_dev);
qid = napi - dev->napi;
+ local_bh_disable();
rcu_read_lock();
do {
@@ -540,6 +561,7 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
} while (cur && done < budget);
rcu_read_unlock();
+ local_bh_enable();
if (done < budget && napi_complete(napi))
dev->drv->rx_poll_complete(dev, qid);
@@ -558,7 +580,6 @@ mt76_dma_init(struct mt76_dev *dev)
netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
64);
mt76_dma_rx_fill(dev, &dev->q_rx[i]);
- skb_queue_head_init(&dev->rx_skb[i]);
napi_enable(&dev->napi[i]);
}
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 804224e81103..c236e303ccfd 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -64,6 +64,16 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
goto out_put_node;
}
+ if (of_property_read_bool(dev->dev->of_node, "big-endian")) {
+ u8 *data = (u8 *)dev->eeprom.data;
+ int i;
+
+ /* convert eeprom data in Little Endian */
+ for (i = 0; i < round_down(len, 2); i += 2)
+ put_unaligned_le16(get_unaligned_be16(&data[i]),
+ &data[i]);
+ }
+
out_put_node:
of_node_put(np);
return ret;
@@ -77,13 +87,11 @@ mt76_eeprom_override(struct mt76_dev *dev)
{
#ifdef CONFIG_OF
struct device_node *np = dev->dev->of_node;
- const u8 *mac;
-
- if (!np)
- return;
+ const u8 *mac = NULL;
- mac = of_get_mac_address(np);
- if (!IS_ERR(mac))
+ if (np)
+ mac = of_get_mac_address(np);
+ if (!IS_ERR_OR_NULL(mac))
ether_addr_copy(dev->macaddr, mac);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 96018fd65779..f44f99184c10 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -121,7 +121,7 @@ static void mt76_init_stream_cap(struct mt76_dev *dev,
bool vht)
{
struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
- int i, nstream = hweight8(dev->antenna_mask);
+ int i, nstream = hweight8(dev->phy.antenna_mask);
struct ieee80211_sta_vht_cap *vht_cap;
u16 mcs_map = 0;
@@ -156,9 +156,9 @@ static void mt76_init_stream_cap(struct mt76_dev *dev,
void mt76_set_stream_caps(struct mt76_dev *dev, bool vht)
{
if (dev->cap.has_2ghz)
- mt76_init_stream_cap(dev, &dev->sband_2g.sband, false);
+ mt76_init_stream_cap(dev, &dev->phy.sband_2g.sband, false);
if (dev->cap.has_5ghz)
- mt76_init_stream_cap(dev, &dev->sband_5g.sband, vht);
+ mt76_init_stream_cap(dev, &dev->phy.sband_5g.sband, vht);
}
EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
@@ -187,8 +187,6 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
sband->n_channels = n_chan;
sband->bitrates = rates;
sband->n_bitrates = n_rates;
- dev->chandef.chan = &sband->channels[0];
- dev->chan_state = &msband->chan[0];
ht_cap = &sband->ht_cap;
ht_cap->ht_supported = true;
@@ -223,9 +221,9 @@ static int
mt76_init_sband_2g(struct mt76_dev *dev, struct ieee80211_rate *rates,
int n_rates)
{
- dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->sband_2g.sband;
+ dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->phy.sband_2g.sband;
- return mt76_init_sband(dev, &dev->sband_2g,
+ return mt76_init_sband(dev, &dev->phy.sband_2g,
mt76_channels_2ghz,
ARRAY_SIZE(mt76_channels_2ghz),
rates, n_rates, false);
@@ -235,18 +233,19 @@ static int
mt76_init_sband_5g(struct mt76_dev *dev, struct ieee80211_rate *rates,
int n_rates, bool vht)
{
- dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->sband_5g.sband;
+ dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->phy.sband_5g.sband;
- return mt76_init_sband(dev, &dev->sband_5g,
+ return mt76_init_sband(dev, &dev->phy.sband_5g,
mt76_channels_5ghz,
ARRAY_SIZE(mt76_channels_5ghz),
rates, n_rates, vht);
}
static void
-mt76_check_sband(struct mt76_dev *dev, int band)
+mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
+ enum nl80211_band band)
{
- struct ieee80211_supported_band *sband = dev->hw->wiphy->bands[band];
+ struct ieee80211_supported_band *sband = &msband->sband;
bool found = false;
int i;
@@ -261,20 +260,145 @@ mt76_check_sband(struct mt76_dev *dev, int band)
break;
}
- if (found)
+ if (found) {
+ phy->chandef.chan = &sband->channels[0];
+ phy->chan_state = &msband->chan[0];
return;
+ }
sband->n_channels = 0;
- dev->hw->wiphy->bands[band] = NULL;
+ phy->hw->wiphy->bands[band] = NULL;
}
+static void
+mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
+{
+ struct wiphy *wiphy = hw->wiphy;
+
+ SET_IEEE80211_DEV(hw, dev->dev);
+ SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+ wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
+
+ wiphy->available_antennas_tx = dev->phy.antenna_mask;
+ wiphy->available_antennas_rx = dev->phy.antenna_mask;
+
+ hw->txq_data_size = sizeof(struct mt76_txq);
+
+ if (!hw->max_tx_fragments)
+ hw->max_tx_fragments = 16;
+
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
+ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(hw, TX_AMSDU);
+ ieee80211_hw_set(hw, TX_FRAG_LIST);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, AP_LINK_PS);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+
+ wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_ADHOC);
+}
+
+struct mt76_phy *
+mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
+ const struct ieee80211_ops *ops)
+{
+ struct ieee80211_hw *hw;
+ struct mt76_phy *phy;
+ unsigned int phy_size, chan_size;
+ unsigned int size_2g, size_5g;
+ void *priv;
+
+ phy_size = ALIGN(sizeof(*phy), 8);
+ chan_size = sizeof(dev->phy.sband_2g.chan[0]);
+ size_2g = ALIGN(ARRAY_SIZE(mt76_channels_2ghz) * chan_size, 8);
+ size_5g = ALIGN(ARRAY_SIZE(mt76_channels_5ghz) * chan_size, 8);
+
+ size += phy_size + size_2g + size_5g;
+ hw = ieee80211_alloc_hw(size, ops);
+ if (!hw)
+ return NULL;
+
+ phy = hw->priv;
+ phy->dev = dev;
+ phy->hw = hw;
+
+ mt76_phy_init(dev, hw);
+
+ priv = hw->priv + phy_size;
+
+ phy->sband_2g = dev->phy.sband_2g;
+ phy->sband_2g.chan = priv;
+ priv += size_2g;
+
+ phy->sband_5g = dev->phy.sband_5g;
+ phy->sband_5g.chan = priv;
+ priv += size_5g;
+
+ phy->priv = priv;
+
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
+
+ mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
+ mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(mt76_alloc_phy);
+
+int
+mt76_register_phy(struct mt76_phy *phy)
+{
+ int ret;
+
+ ret = ieee80211_register_hw(phy->hw);
+ if (ret)
+ return ret;
+
+ phy->dev->phy2 = phy;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_register_phy);
+
+void
+mt76_unregister_phy(struct mt76_phy *phy)
+{
+ struct mt76_dev *dev = phy->dev;
+
+ dev->phy2 = NULL;
+ mt76_tx_status_check(dev, NULL, true);
+ ieee80211_unregister_hw(phy->hw);
+}
+EXPORT_SYMBOL_GPL(mt76_unregister_phy);
+
struct mt76_dev *
mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
const struct mt76_driver_ops *drv_ops)
{
struct ieee80211_hw *hw;
+ struct mt76_phy *phy;
struct mt76_dev *dev;
+ int i;
hw = ieee80211_alloc_hw(size, ops);
if (!hw)
@@ -285,6 +409,10 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
dev->dev = pdev;
dev->drv = drv_ops;
+ phy = &dev->phy;
+ phy->dev = dev;
+ phy->hw = hw;
+
spin_lock_init(&dev->rx_lock);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->cc_lock);
@@ -292,6 +420,15 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
init_waitqueue_head(&dev->tx_wait);
skb_queue_head_init(&dev->status_list);
+ skb_queue_head_init(&dev->mcu.res_q);
+ init_waitqueue_head(&dev->mcu.wait);
+ mutex_init(&dev->mcu.mutex);
+
+ INIT_LIST_HEAD(&dev->txwi_cache);
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
+ skb_queue_head_init(&dev->rx_skb[i]);
+
tasklet_init(&dev->tx_tasklet, mt76_tx_tasklet, (unsigned long)dev);
return dev;
@@ -302,51 +439,11 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
struct ieee80211_rate *rates, int n_rates)
{
struct ieee80211_hw *hw = dev->hw;
- struct wiphy *wiphy = hw->wiphy;
+ struct mt76_phy *phy = &dev->phy;
int ret;
dev_set_drvdata(dev->dev, dev);
-
- INIT_LIST_HEAD(&dev->txwi_cache);
-
- SET_IEEE80211_DEV(hw, dev->dev);
- SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
-
- wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
-
- wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
- wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
-
- wiphy->available_antennas_tx = dev->antenna_mask;
- wiphy->available_antennas_rx = dev->antenna_mask;
-
- hw->txq_data_size = sizeof(struct mt76_txq);
- hw->max_tx_fragments = 16;
-
- ieee80211_hw_set(hw, SIGNAL_DBM);
- ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
- ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
- ieee80211_hw_set(hw, AMPDU_AGGREGATION);
- ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
- ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
- ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
- ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
- ieee80211_hw_set(hw, TX_AMSDU);
- ieee80211_hw_set(hw, TX_FRAG_LIST);
- ieee80211_hw_set(hw, MFP_CAPABLE);
- ieee80211_hw_set(hw, AP_LINK_PS);
- ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
- ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
- ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
-
- wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
- wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_ADHOC);
+ mt76_phy_init(dev, hw);
if (dev->cap.has_2ghz) {
ret = mt76_init_sband_2g(dev, rates, n_rates);
@@ -360,9 +457,9 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
return ret;
}
- wiphy_read_of_freq_limits(dev->hw->wiphy);
- mt76_check_sband(dev, NL80211_BAND_2GHZ);
- mt76_check_sband(dev, NL80211_BAND_5GHZ);
+ wiphy_read_of_freq_limits(hw->wiphy);
+ mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
+ mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
ret = mt76_led_init(dev);
@@ -394,7 +491,10 @@ EXPORT_SYMBOL_GPL(mt76_free_device);
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
{
- if (!test_bit(MT76_STATE_RUNNING, &dev->state)) {
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_phy *phy = mt76_dev_phy(dev, status->ext_phy);
+
+ if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
dev_kfree_skb(skb);
return;
}
@@ -403,13 +503,16 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(mt76_rx);
-bool mt76_has_tx_pending(struct mt76_dev *dev)
+bool mt76_has_tx_pending(struct mt76_phy *phy)
{
+ struct mt76_dev *dev = phy->dev;
struct mt76_queue *q;
- int i;
+ int i, offset;
- for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
- q = dev->q_tx[i].q;
+ offset = __MT_TXQ_MAX * (phy != &dev->phy);
+
+ for (i = 0; i < __MT_TXQ_MAX; i++) {
+ q = dev->q_tx[offset + i].q;
if (q && q->queued)
return true;
}
@@ -419,37 +522,45 @@ bool mt76_has_tx_pending(struct mt76_dev *dev)
EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
static struct mt76_channel_state *
-mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
+mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
{
struct mt76_sband *msband;
int idx;
if (c->band == NL80211_BAND_2GHZ)
- msband = &dev->sband_2g;
+ msband = &phy->sband_2g;
else
- msband = &dev->sband_5g;
+ msband = &phy->sband_5g;
idx = c - &msband->sband.channels[0];
return &msband->chan[idx];
}
+static void
+mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
+{
+ struct mt76_channel_state *state = phy->chan_state;
+
+ state->cc_active += ktime_to_us(ktime_sub(time,
+ phy->survey_time));
+ phy->survey_time = time;
+}
+
void mt76_update_survey(struct mt76_dev *dev)
{
- struct mt76_channel_state *state = dev->chan_state;
ktime_t cur_time;
- if (!test_bit(MT76_STATE_RUNNING, &dev->state))
- return;
-
if (dev->drv->update_survey)
dev->drv->update_survey(dev);
cur_time = ktime_get_boottime();
- state->cc_active += ktime_to_us(ktime_sub(cur_time,
- dev->survey_time));
- dev->survey_time = cur_time;
+ mt76_update_survey_active_time(&dev->phy, cur_time);
+ if (dev->phy2)
+ mt76_update_survey_active_time(dev->phy2, cur_time);
if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
+ struct mt76_channel_state *state = dev->phy.chan_state;
+
spin_lock_bh(&dev->cc_lock);
state->cc_bss_rx += dev->cur_cc_bss_rx;
dev->cur_cc_bss_rx = 0;
@@ -458,31 +569,33 @@ void mt76_update_survey(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_update_survey);
-void mt76_set_channel(struct mt76_dev *dev)
+void mt76_set_channel(struct mt76_phy *phy)
{
- struct ieee80211_hw *hw = dev->hw;
+ struct mt76_dev *dev = phy->dev;
+ struct ieee80211_hw *hw = phy->hw;
struct cfg80211_chan_def *chandef = &hw->conf.chandef;
bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
int timeout = HZ / 5;
- wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), timeout);
+ wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
mt76_update_survey(dev);
- dev->chandef = *chandef;
- dev->chan_state = mt76_channel_state(dev, chandef->chan);
+ phy->chandef = *chandef;
+ phy->chan_state = mt76_channel_state(phy, chandef->chan);
if (!offchannel)
- dev->main_chan = chandef->chan;
+ phy->main_chan = chandef->chan;
- if (chandef->chan != dev->main_chan)
- memset(dev->chan_state, 0, sizeof(*dev->chan_state));
+ if (chandef->chan != phy->main_chan)
+ memset(phy->chan_state, 0, sizeof(*phy->chan_state));
}
EXPORT_SYMBOL_GPL(mt76_set_channel);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
struct mt76_sband *sband;
struct ieee80211_channel *chan;
struct mt76_channel_state *state;
@@ -492,10 +605,10 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
if (idx == 0 && dev->drv->update_survey)
mt76_update_survey(dev);
- sband = &dev->sband_2g;
+ sband = &phy->sband_2g;
if (idx >= sband->sband.n_channels) {
idx -= sband->sband.n_channels;
- sband = &dev->sband_5g;
+ sband = &phy->sband_5g;
}
if (idx >= sband->sband.n_channels) {
@@ -504,13 +617,16 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
}
chan = &sband->sband.channels[idx];
- state = mt76_channel_state(dev, chan);
+ state = mt76_channel_state(phy, chan);
memset(survey, 0, sizeof(*survey));
survey->channel = chan;
survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
survey->filled |= dev->drv->survey_flags;
- if (chan == dev->main_chan) {
+ if (state->noise)
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+
+ if (chan == phy->main_chan) {
survey->filled |= SURVEY_INFO_IN_USE;
if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
@@ -520,6 +636,7 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
survey->time_busy = div_u64(state->cc_busy, 1000);
survey->time_rx = div_u64(state->cc_rx, 1000);
survey->time = div_u64(state->cc_active, 1000);
+ survey->noise = state->noise;
spin_lock_bh(&dev->cc_lock);
survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
@@ -555,8 +672,12 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
EXPORT_SYMBOL(mt76_wcid_key_setup);
-static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
+static void
+mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
+ struct ieee80211_hw **hw,
+ struct ieee80211_sta **sta)
{
+
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct mt76_rx_status mstat;
@@ -581,7 +702,8 @@ static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
memcpy(status->chain_signal, mstat.chain_signal,
sizeof(mstat.chain_signal));
- return wcid_to_sta(mstat.wcid);
+ *sta = wcid_to_sta(mstat.wcid);
+ *hw = mt76_phy_hw(dev, mstat.ext_phy);
}
static int
@@ -628,10 +750,18 @@ mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
int len)
{
struct mt76_wcid *wcid = status->wcid;
+ struct ieee80211_rx_status info = {
+ .enc_flags = status->enc_flags,
+ .rate_idx = status->rate_idx,
+ .encoding = status->encoding,
+ .band = status->band,
+ .nss = status->nss,
+ .bw = status->bw,
+ };
struct ieee80211_sta *sta;
u32 airtime;
- airtime = mt76_calc_rx_airtime(dev, status, len);
+ airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
spin_lock(&dev->cc_lock);
dev->cur_cc_bss_rx += airtime;
spin_unlock(&dev->cc_lock);
@@ -707,12 +837,14 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_sta *sta;
+ struct ieee80211_hw *hw;
struct mt76_wcid *wcid = status->wcid;
bool ps;
int i;
+ hw = mt76_phy_hw(dev, status->ext_phy);
if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
- sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL);
+ sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
if (sta)
wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
}
@@ -770,7 +902,7 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
if (!skb_queue_empty(&mtxq->retry_q))
- ieee80211_schedule_txq(dev->hw, sta->txq[i]);
+ ieee80211_schedule_txq(hw, sta->txq[i]);
}
}
@@ -778,6 +910,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
struct napi_struct *napi)
{
struct ieee80211_sta *sta;
+ struct ieee80211_hw *hw;
struct sk_buff *skb;
spin_lock(&dev->rx_lock);
@@ -787,8 +920,8 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
continue;
}
- sta = mt76_rx_convert(skb);
- ieee80211_rx_napi(dev->hw, sta, skb, napi);
+ mt76_rx_convert(dev, skb, &hw, &sta);
+ ieee80211_rx_napi(hw, sta, skb, napi);
}
spin_unlock(&dev->rx_lock);
}
@@ -812,7 +945,7 @@ EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
static int
mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta, bool ext_phy)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
int ret;
@@ -837,6 +970,9 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
}
ewma_signal_init(&wcid->rssi);
+ if (ext_phy)
+ mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
+ wcid->ext_phy = ext_phy;
rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
out:
@@ -851,9 +987,6 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
int i, idx = wcid->idx;
- rcu_assign_pointer(dev->wcid[idx], NULL);
- synchronize_rcu();
-
for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
mt76_rx_aggr_stop(dev, wcid, i);
@@ -863,7 +996,8 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
mt76_tx_status_check(dev, wcid, true);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76_txq_remove(dev, sta->txq[i]);
- mt76_wcid_free(dev->wcid_mask, idx);
+ mt76_wcid_mask_clear(dev->wcid_mask, idx);
+ mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
}
EXPORT_SYMBOL_GPL(__mt76_sta_remove);
@@ -881,11 +1015,13 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE)
- return mt76_sta_add(dev, vif, sta);
+ return mt76_sta_add(dev, vif, sta, ext_phy);
if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
@@ -900,30 +1036,27 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76_sta_state);
+void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
+
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int *dbm)
{
- struct mt76_dev *dev = hw->priv;
- int n_chains = hweight8(dev->antenna_mask);
+ struct mt76_phy *phy = hw->priv;
+ int n_chains = hweight8(phy->antenna_mask);
+ int delta = mt76_tx_power_nss_delta(n_chains);
- *dbm = DIV_ROUND_UP(dev->txpower_cur, 2);
-
- /* convert from per-chain power to combined
- * output power
- */
- switch (n_chains) {
- case 4:
- *dbm += 6;
- break;
- case 3:
- *dbm += 4;
- break;
- case 2:
- *dbm += 3;
- break;
- default:
- break;
- }
+ *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
return 0;
}
@@ -1005,11 +1138,11 @@ int mt76_get_rate(struct mt76_dev *dev,
int i, offset = 0, len = sband->n_bitrates;
if (cck) {
- if (sband == &dev->sband_5g.sband)
+ if (sband == &dev->phy.sband_5g.sband)
return 0;
idx &= ~BIT(2); /* short preamble */
- } else if (sband == &dev->sband_2g.sband) {
+ } else if (sband == &dev->phy.sband_2g.sband) {
offset = 4;
}
@@ -1025,27 +1158,28 @@ EXPORT_SYMBOL_GPL(mt76_get_rate);
void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
- set_bit(MT76_SCANNING, &dev->state);
+ set_bit(MT76_SCANNING, &phy->state);
}
EXPORT_SYMBOL_GPL(mt76_sw_scan);
void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
- clear_bit(MT76_SCANNING, &dev->state);
+ clear_bit(MT76_SCANNING, &phy->state);
}
EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
mutex_lock(&dev->mutex);
- *tx_ant = dev->antenna_mask;
- *rx_ant = dev->antenna_mask;
+ *tx_ant = phy->antenna_mask;
+ *rx_ant = phy->antenna_mask;
mutex_unlock(&dev->mutex);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c
index 2a976688804d..4048f446e3ee 100644
--- a/drivers/net/wireless/mediatek/mt76/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mcu.c
@@ -9,14 +9,16 @@ struct sk_buff *
mt76_mcu_msg_alloc(const void *data, int head_len,
int data_len, int tail_len)
{
+ int length = head_len + data_len + tail_len;
struct sk_buff *skb;
- skb = alloc_skb(head_len + data_len + tail_len,
- GFP_KERNEL);
+ skb = alloc_skb(length, GFP_KERNEL);
if (!skb)
return NULL;
+ memset(skb->head, 0, length);
skb_reserve(skb, head_len);
+
if (data && data_len)
skb_put_data(skb, data, data_len);
@@ -24,7 +26,6 @@ mt76_mcu_msg_alloc(const void *data, int head_len,
}
EXPORT_SYMBOL_GPL(mt76_mcu_msg_alloc);
-/* mmio */
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires)
{
@@ -34,16 +35,17 @@ struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
return NULL;
timeout = expires - jiffies;
- wait_event_timeout(dev->mmio.mcu.wait,
- !skb_queue_empty(&dev->mmio.mcu.res_q),
+ wait_event_timeout(dev->mcu.wait,
+ (!skb_queue_empty(&dev->mcu.res_q) ||
+ test_bit(MT76_MCU_RESET, &dev->phy.state)),
timeout);
- return skb_dequeue(&dev->mmio.mcu.res_q);
+ return skb_dequeue(&dev->mcu.res_q);
}
EXPORT_SYMBOL_GPL(mt76_mcu_get_response);
void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb)
{
- skb_queue_tail(&dev->mmio.mcu.res_q, skb);
- wake_up(&dev->mmio.mcu.wait);
+ skb_queue_tail(&dev->mcu.res_q, skb);
+ wake_up(&dev->mcu.wait);
}
EXPORT_SYMBOL_GPL(mt76_mcu_rx_event);
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
index 1c974df1fe25..7ead6620bb8b 100644
--- a/drivers/net/wireless/mediatek/mt76/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -94,9 +94,6 @@ void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
dev->bus = &mt76_mmio_ops;
dev->mmio.regs = regs;
- skb_queue_head_init(&dev->mmio.mcu.res_q);
- init_waitqueue_head(&dev->mmio.mcu.wait);
spin_lock_init(&dev->mmio.irq_lock);
- mutex_init(&dev->mmio.mcu.mutex);
}
EXPORT_SYMBOL_GPL(mt76_mmio_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index fb077760347a..8e4759bc8f59 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -22,6 +22,7 @@
#define MT_SKB_HEAD_LEN 128
struct mt76_dev;
+struct mt76_phy;
struct mt76_wcid;
struct mt76_reg_pair {
@@ -138,6 +139,8 @@ struct mt76_sw_queue {
struct mt76_mcu_ops {
int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
int len, bool wait_resp);
+ int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, bool wait_resp);
int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
const struct mt76_reg_pair *rp, int len);
int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
@@ -177,6 +180,9 @@ enum mt76_wcid_flags {
#define MT76_N_WCIDS 128
+/* stored in ieee80211_tx_info::hw_queue */
+#define MT_TX_HW_QUEUE_EXT_PHY BIT(3)
+
DECLARE_EWMA(signal, 10, 8);
#define MT_WCID_TX_INFO_RATE GENMASK(15, 0)
@@ -196,6 +202,7 @@ struct mt76_wcid {
u8 hw_key_idx;
u8 sta:1;
+ u8 ext_phy:1;
u8 rx_check_pn;
u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
@@ -237,6 +244,8 @@ struct mt76_rx_tid {
u8 size;
u8 nframes;
+ u8 num;
+
u8 started:1, stopped:1, timer_pending:1;
struct sk_buff *reorder_buf[];
@@ -267,6 +276,7 @@ enum {
MT76_STATE_MCU_RUNNING,
MT76_SCANNING,
MT76_RESET,
+ MT76_MCU_RESET,
MT76_REMOVED,
MT76_READING_STATS,
};
@@ -279,6 +289,7 @@ struct mt76_hw_cap {
#define MT_DRV_TXWI_NO_FREE BIT(0)
#define MT_DRV_TX_ALIGNED4_SKBS BIT(1)
#define MT_DRV_SW_RX_AIRTIME BIT(2)
+#define MT_DRV_RX_DMA_HDR BIT(3)
struct mt76_driver_ops {
u32 drv_flags;
@@ -321,6 +332,8 @@ struct mt76_channel_state {
u64 cc_rx;
u64 cc_bss_rx;
u64 cc_tx;
+
+ s8 noise;
};
struct mt76_sband {
@@ -350,12 +363,15 @@ struct mt76_rate_power {
enum mt_vendor_req {
MT_VEND_DEV_MODE = 0x1,
MT_VEND_WRITE = 0x2,
+ MT_VEND_POWER_ON = 0x4,
MT_VEND_MULTI_WRITE = 0x6,
MT_VEND_MULTI_READ = 0x7,
MT_VEND_READ_EEPROM = 0x9,
MT_VEND_WRITE_FCE = 0x42,
MT_VEND_WRITE_CFG = 0x46,
MT_VEND_READ_CFG = 0x47,
+ MT_VEND_READ_EXT = 0x63,
+ MT_VEND_WRITE_EXT = 0x66,
};
enum mt76u_in_ep {
@@ -374,20 +390,26 @@ enum mt76u_out_ep {
__MT_EP_OUT_MAX,
};
+struct mt76_mcu {
+ struct mutex mutex;
+ u32 msg_seq;
+
+ struct sk_buff_head res_q;
+ wait_queue_head_t wait;
+};
+
#define MT_TX_SG_MAX_SIZE 8
-#define MT_RX_SG_MAX_SIZE 1
+#define MT_RX_SG_MAX_SIZE 4
#define MT_NUM_TX_ENTRIES 256
#define MT_NUM_RX_ENTRIES 128
#define MCU_RESP_URB_SIZE 1024
struct mt76_usb {
struct mutex usb_ctrl_mtx;
- union {
- u8 data[32];
- __le32 reg_val;
- };
+ u8 *data;
+ u16 data_len;
struct tasklet_struct rx_tasklet;
- struct workqueue_struct *stat_wq;
+ struct workqueue_struct *wq;
struct work_struct stat_work;
u8 out_ep[__MT_EP_OUT_MAX];
@@ -395,10 +417,7 @@ struct mt76_usb {
bool sg_en;
struct mt76u_mcu {
- struct mutex mutex;
u8 *data;
- u32 msg_seq;
-
/* multiple reads */
struct mt76_reg_pair *rp;
int rp_len;
@@ -408,14 +427,6 @@ struct mt76_usb {
};
struct mt76_mmio {
- struct mt76e_mcu {
- struct mutex mutex;
-
- wait_queue_head_t wait;
- struct sk_buff_head res_q;
-
- u32 msg_seq;
- } mcu;
void __iomem *regs;
spinlock_t irq_lock;
u32 irqmask;
@@ -433,6 +444,7 @@ struct mt76_rx_status {
u8 iv[6];
+ u8 ext_phy:1;
u8 aggr:1;
u8 tid;
u16 seqno;
@@ -449,12 +461,33 @@ struct mt76_rx_status {
s8 chain_signal[IEEE80211_MAX_CHAINS];
};
-struct mt76_dev {
+struct mt76_phy {
struct ieee80211_hw *hw;
+ struct mt76_dev *dev;
+ void *priv;
+
+ unsigned long state;
+
struct cfg80211_chan_def chandef;
struct ieee80211_channel *main_chan;
struct mt76_channel_state *chan_state;
+ ktime_t survey_time;
+
+ struct mt76_sband sband_2g;
+ struct mt76_sband sband_5g;
+
+ int txpower_cur;
+ u8 antenna_mask;
+};
+
+struct mt76_dev {
+ struct mt76_phy phy; /* must be first */
+
+ struct mt76_phy *phy2;
+
+ struct ieee80211_hw *hw;
+
spinlock_t lock;
spinlock_t cc_lock;
@@ -471,14 +504,15 @@ struct mt76_dev {
const struct mt76_mcu_ops *mcu_ops;
struct device *dev;
+ struct mt76_mcu mcu;
+
struct net_device napi_dev;
spinlock_t rx_lock;
struct napi_struct napi[__MT_RXQ_MAX];
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
- u32 ampdu_ref;
struct list_head txwi_cache;
- struct mt76_sw_queue q_tx[__MT_TXQ_MAX];
+ struct mt76_sw_queue q_tx[2 * __MT_TXQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
int tx_dma_idx[4];
@@ -491,32 +525,25 @@ struct mt76_dev {
struct sk_buff_head status_list;
unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG];
+ unsigned long wcid_phy_mask[MT76_N_WCIDS / BITS_PER_LONG];
struct mt76_wcid global_wcid;
struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
u8 macaddr[ETH_ALEN];
u32 rev;
- unsigned long state;
u32 aggr_stats[32];
- u8 antenna_mask;
- u16 chainmask;
-
struct tasklet_struct pre_tbtt_tasklet;
int beacon_int;
u8 beacon_mask;
- struct mt76_sband sband_2g;
- struct mt76_sband sband_5g;
struct debugfs_blob_wrapper eeprom;
struct debugfs_blob_wrapper otp;
struct mt76_hw_cap cap;
struct mt76_rate_power rate_power;
- int txpower_conf;
- int txpower_cur;
enum nl80211_dfs_regions region;
@@ -529,8 +556,6 @@ struct mt76_dev {
u8 csa_complete;
- ktime_t survey_time;
-
u32 rxfilter;
union {
@@ -565,7 +590,9 @@ enum mt76_phy_type {
#define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
#define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
+
#define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__)
+#define __mt76_mcu_skb_send_msg(dev, ...) (dev)->mcu_ops->mcu_skb_send_msg((dev), __VA_ARGS__)
#define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
#define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev))
@@ -581,7 +608,17 @@ enum mt76_phy_type {
#define __mt76_rmw_field(_dev, _reg, _field, _val) \
__mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
-#define mt76_hw(dev) (dev)->mt76.hw
+#define mt76_hw(dev) (dev)->mphy.hw
+
+static inline struct ieee80211_hw *
+mt76_wcid_hw(struct mt76_dev *dev, u8 wcid)
+{
+ if (wcid <= MT76_N_WCIDS &&
+ mt76_wcid_mask_test(dev->wcid_phy_mask, wcid))
+ return dev->phy2->hw;
+
+ return dev->phy.hw;
+}
bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
int timeout);
@@ -624,6 +661,11 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
struct ieee80211_rate *rates, int n_rates);
void mt76_unregister_device(struct mt76_dev *dev);
void mt76_free_device(struct mt76_dev *dev);
+void mt76_unregister_phy(struct mt76_phy *phy);
+
+struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
+ const struct ieee80211_ops *ops);
+int mt76_register_phy(struct mt76_phy *phy);
struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
int mt76_queues_read(struct seq_file *s, void *data);
@@ -633,6 +675,20 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_dev *dev);
+static inline struct mt76_phy *
+mt76_dev_phy(struct mt76_dev *dev, bool phy_ext)
+{
+ if (phy_ext && dev->phy2)
+ return dev->phy2;
+ return &dev->phy;
+}
+
+static inline struct ieee80211_hw *
+mt76_phy_hw(struct mt76_dev *dev, bool phy_ext)
+{
+ return mt76_dev_phy(dev, phy_ext)->hw;
+}
+
static inline u8 *
mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
@@ -701,24 +757,31 @@ static inline bool mt76_is_skb_pktid(u8 pktid)
return pktid >= MT_PACKET_ID_FIRST;
}
+static inline u8 mt76_tx_power_nss_delta(u8 nss)
+{
+ static const u8 nss_delta[4] = { 0, 6, 9, 12 };
+
+ return nss_delta[nss - 1];
+}
+
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
-void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb);
void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool send_bar);
-void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid);
-void mt76_txq_schedule_all(struct mt76_dev *dev);
+void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
+void mt76_txq_schedule_all(struct mt76_phy *phy);
void mt76_tx_tasklet(unsigned long data);
void mt76_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 tids, int nframes,
enum ieee80211_frame_release_type reason,
bool more_data);
-bool mt76_has_tx_pending(struct mt76_dev *dev);
-void mt76_set_channel(struct mt76_dev *dev);
+bool mt76_has_tx_pending(struct mt76_phy *phy);
+void mt76_set_channel(struct mt76_phy *phy);
void mt76_update_survey(struct mt76_dev *dev);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
@@ -752,8 +815,10 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_sta_state new_state);
void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
-int mt76_get_min_avg_rssi(struct mt76_dev *dev);
+int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int *dbm);
@@ -771,10 +836,22 @@ void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
-u32 mt76_calc_tx_airtime(struct mt76_dev *dev, struct ieee80211_tx_info *info,
- int len);
/* internal */
+static inline struct ieee80211_hw *
+mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hw *hw = dev->phy.hw;
+
+ if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2)
+ hw = dev->phy2->hw;
+
+ info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY;
+
+ return hw;
+}
+
void mt76_tx_free(struct mt76_dev *dev);
struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
@@ -783,8 +860,6 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
-u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
- int len);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
@@ -804,7 +879,7 @@ static inline u8 q2ep(u8 qid)
static inline int
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
- int timeout)
+ int timeout, int ep)
{
struct usb_interface *uintf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(uintf);
@@ -812,20 +887,23 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
unsigned int pipe;
if (actual_len)
- pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]);
+ pipe = usb_rcvbulkpipe(udev, usb->in_ep[ep]);
else
- pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
+ pipe = usb_sndbulkpipe(udev, usb->out_ep[ep]);
return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
}
+int mt76u_skb_dma_info(struct sk_buff *skb, u32 info);
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
void *buf, size_t len);
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val);
-int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
void mt76u_deinit(struct mt76_dev *dev);
+int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
+ bool ext);
+int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
int mt76u_alloc_queues(struct mt76_dev *dev);
void mt76u_stop_tx(struct mt76_dev *dev);
void mt76u_stop_rx(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
index e5af4f3389cc..60a996b63c0c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: ISC
#include "mt7603.h"
+#include "../trace.h"
void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
{
@@ -17,9 +18,11 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return IRQ_NONE;
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
intr &= dev->mt76.mmio.irqmask;
if (intr & MT_INT_MAC_IRQ3) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
index 47c85a9fac28..cc7c788abedd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
@@ -121,4 +121,8 @@ void mt7603_init_debugfs(struct mt7603_dev *dev)
mt7603_reset_read);
debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
mt7603_radio_read);
+ debugfs_create_u8("sensitivity_limit", 0600, dir,
+ &dev->sensitivity_limit);
+ debugfs_create_bool("dynamic_sensitivity", 0600, dir,
+ &dev->dynamic_sensitivity);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index a6ab73060aad..a08b85281170 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -30,6 +30,16 @@ mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_sw_queue *q,
static void
mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
{
+ static const u8 tid_to_ac[8] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO
+ };
__le32 *txd = (__le32 *)skb->data;
struct ieee80211_hdr *hdr;
struct ieee80211_sta *sta;
@@ -38,7 +48,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
void *priv;
int idx;
u32 val;
- u8 tid;
+ u8 tid = 0;
if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
goto free;
@@ -56,15 +66,16 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
priv = msta = container_of(wcid, struct mt7603_sta, wcid);
val = le32_to_cpu(txd[0]);
- skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
-
val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
txd[0] = cpu_to_le32(val);
sta = container_of(priv, struct ieee80211_sta, drv_priv);
hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
- tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = *ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_TAG1D_MASK;
+ skb_set_queue_mapping(skb, tid_to_ac[tid]);
ieee80211_sta_set_buffered(sta, tid, true);
spin_lock_bh(&dev->ps_lock);
@@ -210,7 +221,7 @@ int mt7603_dma_init(struct mt7603_dev *dev)
return ret;
ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
- MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
+ MT7603_MCU_RX_RING_SIZE, MT_RX_BUF_SIZE);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 0696dbf28c5b..f641a8b56b39 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -113,7 +113,7 @@ mt7603_dma_sched_init(struct mt7603_dev *dev)
static void
mt7603_phy_init(struct mt7603_dev *dev)
{
- int rx_chains = dev->mt76.antenna_mask;
+ int rx_chains = dev->mphy.antenna_mask;
int tx_chains = hweight8(rx_chains) - 1;
mt76_rmw(dev, MT_WF_RMAC_RMCR,
@@ -284,7 +284,7 @@ mt7603_init_hardware(struct mt7603_dev *dev)
mt76_wr(dev, MT_WPDMA_GLO_CFG, 0x52000850);
mt7603_mac_dma_start(dev);
dev->rxfilter = mt76_rr(dev, MT_WF_RFCR);
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
for (i = 0; i < MT7603_WTBL_SIZE; i++) {
mt76_wr(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY | MT_PSE_RTA_WRITE |
@@ -363,9 +363,9 @@ static void mt7603_led_set_config(struct mt76_dev *mt76, u8 delay_on,
mt76);
u32 val, addr;
- val = MT_LED_STATUS_DURATION(0xffff) |
- MT_LED_STATUS_OFF(delay_off) |
- MT_LED_STATUS_ON(delay_on);
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
addr = mt7603_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
mt76_wr(dev, addr, val);
@@ -493,12 +493,12 @@ mt7603_init_txpower(struct mt7603_dev *dev,
target_power += max_offset;
dev->tx_power_limit = target_power;
- dev->mt76.txpower_cur = target_power;
+ dev->mphy.txpower_cur = target_power;
target_power = DIV_ROUND_UP(target_power, 2);
/* add 3 dBm for 2SS devices (combined output) */
- if (dev->mt76.antenna_mask & BIT(1))
+ if (dev->mphy.antenna_mask & BIT(1))
target_power += 3;
for (i = 0; i < sband->n_channels; i++) {
@@ -535,11 +535,13 @@ int mt7603_register_device(struct mt7603_dev *dev)
(unsigned long)dev);
/* Check for 7688, which only has 1SS */
- dev->mt76.antenna_mask = 3;
+ dev->mphy.antenna_mask = 3;
if (mt76_rr(dev, MT_EFUSE_BASE + 0x64) & BIT(4))
- dev->mt76.antenna_mask = 1;
+ dev->mphy.antenna_mask = 1;
dev->slottime = 9;
+ dev->sensitivity_limit = 28;
+ dev->dynamic_sensitivity = true;
ret = mt7603_init_hardware(dev);
if (ret)
@@ -557,6 +559,7 @@ int mt7603_register_device(struct mt7603_dev *dev)
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
@@ -564,7 +567,6 @@ int mt7603_register_device(struct mt7603_dev *dev)
dev->mt76.led_cdev.blink_set = mt7603_led_set_blink;
}
- wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
wiphy->reg_notifier = mt7603_regd_notifier;
ret = mt76_register_device(&dev->mt76, true, mt7603_rates,
@@ -573,7 +575,7 @@ int mt7603_register_device(struct mt7603_dev *dev)
return ret;
mt7603_init_debugfs(dev);
- mt7603_init_txpower(dev, &dev->mt76.sband_2g.sband);
+ mt7603_init_txpower(dev, &dev->mphy.sband_2g.sband);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 812d081ad943..39b7c5d6e6cd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -4,6 +4,7 @@
#include <linux/timekeeping.h>
#include "mt7603.h"
#include "mac.h"
+#include "../trace.h"
#define MT_PSE_PAGE_SIZE 128
@@ -53,7 +54,7 @@ void mt7603_mac_set_timing(struct mt7603_dev *dev)
int sifs;
u32 val;
- if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
+ if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
sifs = 16;
else
sifs = 10;
@@ -456,7 +457,7 @@ void mt7603_mac_sta_poll(struct mt7603_dev *dev)
return;
spin_lock_bh(&dev->mt76.cc_lock);
- dev->mt76.chan_state->cc_tx += total_airtime;
+ dev->mphy.chan_state->cc_tx += total_airtime;
spin_unlock_bh(&dev->mt76.cc_lock);
}
@@ -502,7 +503,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
memset(status, 0, sizeof(*status));
i = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
- sband = (i & 1) ? &dev->mt76.sband_5g.sband : &dev->mt76.sband_2g.sband;
+ sband = (i & 1) ? &dev->mphy.sband_5g.sband : &dev->mphy.sband_2g.sband;
i >>= 1;
idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
@@ -531,12 +532,12 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
/* all subframes of an A-MPDU have the same timestamp */
if (dev->rx_ampdu_ts != rxd[12]) {
- if (!++dev->mt76.ampdu_ref)
- dev->mt76.ampdu_ref++;
+ if (!++dev->ampdu_ref)
+ dev->ampdu_ref++;
}
dev->rx_ampdu_ts = rxd[12];
- status->ampdu_ref = dev->mt76.ampdu_ref;
+ status->ampdu_ref = dev->ampdu_ref;
}
remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
@@ -609,7 +610,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
status->rate_idx = i;
- status->chains = dev->mt76.antenna_mask;
+ status->chains = dev->mphy.antenna_mask;
status->chain_signal[0] = FIELD_GET(MT_RXV4_IB_RSSI0, rxdg3) +
dev->rssi_offset[0];
status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) +
@@ -668,7 +669,7 @@ mt7603_mac_tx_rate_val(struct mt7603_dev *dev,
*bw = 1;
} else {
const struct ieee80211_rate *r;
- int band = dev->mt76.chandef.chan->band;
+ int band = dev->mphy.chandef.chan->band;
u16 val;
nss = 1;
@@ -1156,10 +1157,10 @@ out:
cck = true;
/* fall through */
case MT_PHY_TYPE_OFDM:
- if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
- sband = &dev->mt76.sband_5g.sband;
+ if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &dev->mphy.sband_5g.sband;
else
- sband = &dev->mt76.sband_2g.sband;
+ sband = &dev->mphy.sband_2g.sband;
final_rate &= GENMASK(5, 0);
final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
cck);
@@ -1193,6 +1194,8 @@ mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
if (pid < MT_PACKET_ID_FIRST)
return false;
+ trace_mac_txdone(mdev, sta->wcid.idx, pid);
+
mt76_tx_status_lock(mdev, &list);
skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
if (skb) {
@@ -1389,10 +1392,10 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
int i;
ieee80211_stop_queues(dev->mt76.hw);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &dev->mphy.state);
/* lock/unlock all queues to ensure that no tx is pending */
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
tasklet_disable(&dev->mt76.tx_tasklet);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
@@ -1426,7 +1429,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
mt7603_pse_client_reset(dev);
- for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
+ for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, i, true);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
@@ -1439,7 +1442,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
mt7603_irq_enable(dev, mask);
skip_dma_reset:
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
mutex_unlock(&dev->mt76.mutex);
tasklet_enable(&dev->mt76.tx_tasklet);
@@ -1456,7 +1459,7 @@ skip_dma_reset:
napi_schedule(&dev->mt76.napi[1]);
ieee80211_wake_queues(dev->mt76.hw);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
}
static u32 mt7603_dma_debug(struct mt7603_dev *dev, u8 index)
@@ -1574,7 +1577,7 @@ void mt7603_update_channel(struct mt76_dev *mdev)
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct mt76_channel_state *state;
- state = mdev->chan_state;
+ state = mdev->phy.chan_state;
state->cc_busy += mt76_rr(dev, MT_MIB_STAT_CCA);
}
@@ -1724,6 +1727,9 @@ mt7603_false_cca_check(struct mt7603_dev *dev)
int min_signal;
u32 val;
+ if (!dev->dynamic_sensitivity)
+ return;
+
val = mt76_rr(dev, MT_PHYCTRL_STAT_PD);
pd_cck = FIELD_GET(MT_PHYCTRL_STAT_PD_CCK, val);
pd_ofdm = FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM, val);
@@ -1737,7 +1743,7 @@ mt7603_false_cca_check(struct mt7603_dev *dev)
mt7603_cca_stats_reset(dev);
- min_signal = mt76_get_min_avg_rssi(&dev->mt76);
+ min_signal = mt76_get_min_avg_rssi(&dev->mt76, false);
if (!min_signal) {
dev->sensitivity = 0;
dev->last_cca_adj = jiffies;
@@ -1747,7 +1753,8 @@ mt7603_false_cca_check(struct mt7603_dev *dev)
min_signal -= 15;
false_cca = dev->false_cca_ofdm + dev->false_cca_cck;
- if (false_cca > 600) {
+ if (false_cca > 600 &&
+ dev->sensitivity < -100 + dev->sensitivity_limit) {
if (!dev->sensitivity)
dev->sensitivity = -92;
else
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 962e2822d19f..26cb711b465f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -15,8 +15,8 @@ mt7603_start(struct ieee80211_hw *hw)
mt7603_mac_reset_counters(dev);
mt7603_mac_start(dev);
- dev->mt76.survey_time = ktime_get_boottime();
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ dev->mphy.survey_time = ktime_get_boottime();
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
mt7603_mac_work(&dev->mt76.mac_work.work);
return 0;
@@ -27,7 +27,7 @@ mt7603_stop(struct ieee80211_hw *hw)
{
struct mt7603_dev *dev = hw->priv;
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
cancel_delayed_work_sync(&dev->mt76.mac_work);
mt7603_mac_stop(dev);
}
@@ -143,16 +143,16 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &dev->mphy.state);
mt7603_beacon_set_timer(dev, -1, 0);
- mt76_set_channel(&dev->mt76);
+ mt76_set_channel(&dev->mphy);
mt7603_mac_stop(dev);
if (def->width == NL80211_CHAN_WIDTH_40)
bw = MT_BW_40;
- dev->mt76.chandef = *def;
+ dev->mphy.chandef = *def;
mt76_rmw_field(dev, MT_AGG_BWCR, MT_AGG_BWCR_BW, bw);
ret = mt7603_mcu_set_channel(dev);
if (ret) {
@@ -176,9 +176,9 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
mt7603_mac_set_timing(dev);
mt7603_mac_start(dev);
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
msecs_to_jiffies(MT7603_WATCHDOG_TIME));
@@ -187,10 +187,10 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_READ_CLR_DIS);
mt76_set(dev, MT_MIB_CTL,
MT_MIB_CTL_CCA_NAV_TX | MT_MIB_CTL_PSCCA_TIME);
- mt76_rr(dev, MT_MIB_STAT_PSCCA);
+ mt76_rr(dev, MT_MIB_STAT_CCA);
mt7603_cca_stats_reset(dev);
- dev->mt76.survey_time = ktime_get_boottime();
+ dev->mphy.survey_time = ktime_get_boottime();
mt7603_init_edcca(dev);
@@ -642,7 +642,7 @@ mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
{
struct mt7603_dev *dev = hw->priv;
- dev->coverage_class = coverage_class;
+ dev->coverage_class = max_t(s16, coverage_class, 0);
mt7603_mac_set_timing(dev);
}
@@ -667,7 +667,7 @@ static void mt7603_tx(struct ieee80211_hw *hw,
wcid = &mvif->sta.wcid;
}
- mt76_tx(&dev->mt76, control->sta, wcid, skb);
+ mt76_tx(&dev->mphy, control->sta, wcid, skb);
}
const struct ieee80211_ops mt7603_ops = {
@@ -680,6 +680,7 @@ const struct ieee80211_ops mt7603_ops = {
.configure_filter = mt7603_configure_filter,
.bss_info_changed = mt7603_bss_info_changed,
.sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt7603_set_key,
.conf_tx = mt7603_conf_tx,
.sw_scan_start = mt76_sw_scan,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
index 02b2bd60d04d..77985d81c447 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -22,12 +22,11 @@ __mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb,
struct mt7603_mcu_txd *txd;
u8 seq;
- seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+ seq = ++mdev->mcu.msg_seq & 0xf;
if (!seq)
- seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+ seq = ++mdev->mcu.msg_seq & 0xf;
txd = (struct mt7603_mcu_txd *)skb_push(skb, hdrlen);
- memset(txd, 0, hdrlen);
txd->len = cpu_to_le16(skb->len);
if (cmd == -MCU_CMD_FW_SCATTER)
@@ -67,7 +66,7 @@ mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
if (!skb)
return -ENOMEM;
- mutex_lock(&mdev->mmio.mcu.mutex);
+ mutex_lock(&mdev->mcu.mutex);
ret = __mt7603_mcu_msg_send(dev, skb, cmd, &seq);
if (ret)
@@ -97,7 +96,7 @@ mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
}
out:
- mutex_unlock(&mdev->mmio.mcu.mutex);
+ mutex_unlock(&mdev->mcu.mutex);
return ret;
}
@@ -277,7 +276,7 @@ int mt7603_mcu_init(struct mt7603_dev *dev)
void mt7603_mcu_exit(struct mt7603_dev *dev)
{
__mt76_mcu_restart(&dev->mt76);
- skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
}
int mt7603_mcu_set_eeprom(struct mt7603_dev *dev)
@@ -397,7 +396,7 @@ static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
u8 temp_comp_power[17];
u8 reserved;
} req = {
- .center_channel = dev->mt76.chandef.chan->hw_value,
+ .center_channel = dev->mphy.chandef.chan->hw_value,
#define EEP_VAL(n) ((u8 *)dev->mt76.eeprom.data)[n]
.tssi = EEP_VAL(MT_EE_NIC_CONF_1 + 1),
.temp_comp = EEP_VAL(MT_EE_NIC_CONF_1),
@@ -430,9 +429,9 @@ static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
int mt7603_mcu_set_channel(struct mt7603_dev *dev)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
struct ieee80211_hw *hw = mt76_hw(dev);
- int n_chains = hweight8(dev->mt76.antenna_mask);
+ int n_chains = hweight8(dev->mphy.antenna_mask);
struct {
u8 control_chan;
u8 center_chan;
@@ -452,7 +451,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
s8 tx_power;
int i, ret;
- if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_40) {
+ if (dev->mphy.chandef.width == NL80211_CHAN_WIDTH_40) {
req.bw = MT_BW_40;
if (chandef->center_freq1 > chandef->chan->center_freq)
req.center_chan += 2;
@@ -461,11 +460,11 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
}
tx_power = hw->conf.power_level * 2;
- if (dev->mt76.antenna_mask == 3)
+ if (dev->mphy.antenna_mask == 3)
tx_power -= 6;
tx_power = min(tx_power, dev->tx_power_limit);
- dev->mt76.txpower_cur = tx_power;
+ dev->mphy.txpower_cur = tx_power;
for (i = 0; i < ARRAY_SIZE(req.txpower); i++)
req.txpower[i] = tx_power;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index ab54b0612e98..7fadf094e9be 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -15,6 +15,7 @@
#define MT7603_RATE_RETRY 2
+#define MT7603_MCU_RX_RING_SIZE 64
#define MT7603_RX_RING_SIZE 128
#define MT7603_FIRMWARE_E1 "mt7603_e1.bin"
@@ -98,7 +99,10 @@ enum mt7603_reset_cause {
};
struct mt7603_dev {
- struct mt76_dev mt76; /* must be first */
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
const struct mt76_bus_ops *bus_ops;
@@ -115,6 +119,7 @@ struct mt7603_dev {
u32 false_cca_ofdm, false_cca_cck;
unsigned long last_cca_adj;
+ u32 ampdu_ref;
__le32 rx_ampdu_ts;
u8 rssi_offset[3];
@@ -137,7 +142,9 @@ struct mt7603_dev {
u8 ed_strict_mode;
u8 ed_strong_signal;
+ bool dynamic_sensitivity;
s8 sensitivity;
+ u8 sensitivity_limit;
u8 beacon_check;
u8 tx_hang_check;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
index 6e23ed3dfdff..6741e6907194 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
@@ -585,18 +585,9 @@ enum {
#define MT_LED_STATUS_0(_n) MT_LED_PHYS(0x10 + ((_n) * 8))
#define MT_LED_STATUS_1(_n) MT_LED_PHYS(0x14 + ((_n) * 8))
-#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24)
-#define MT_LED_STATUS_OFF(_v) (((_v) << \
- __ffs(MT_LED_STATUS_OFF_MASK)) & \
- MT_LED_STATUS_OFF_MASK)
-#define MT_LED_STATUS_ON_MASK GENMASK(23, 16)
-#define MT_LED_STATUS_ON(_v) (((_v) << \
- __ffs(MT_LED_STATUS_ON_MASK)) & \
- MT_LED_STATUS_ON_MASK)
-#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 0)
-#define MT_LED_STATUS_DURATION(_v) (((_v) << \
- __ffs(MT_LED_STATUS_DURATION_MASK)) &\
- MT_LED_STATUS_DURATION_MASK)
+#define MT_LED_STATUS_OFF GENMASK(31, 24)
+#define MT_LED_STATUS_ON GENMASK(23, 16)
+#define MT_LED_STATUS_DURATION GENMASK(15, 0)
#define MT_CLIENT_BASE_PHYS_ADDR 0x800c0000
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
index 4cabba9aa2ea..6afd4aea67ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
@@ -11,3 +11,14 @@ config MT7615E
MU-MIMO up to 4 users/group and 160MHz channels.
To compile this driver as a module, choose M here.
+
+config MT7622_WMAC
+ bool "MT7622 (SoC) WMAC support"
+ depends on MT7615E
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select REGMAP
+ default y
+ help
+ This adds support for the built-in WMAC on MT7622 SoC devices
+ which has the same feature set as a MT7615, but limited to
+ 2.4 GHz only.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
index 5aaac69849d6..5c6a220ed7e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
@@ -2,5 +2,8 @@
obj-$(CONFIG_MT7615E) += mt7615e.o
-mt7615e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
- debugfs.o
+CFLAGS_trace.o := -I$(src)
+
+mt7615e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o mmio.o \
+ debugfs.o trace.o
+mt7615e-$(CONFIG_MT7622_WMAC) += soc.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index f6b75f832e6a..b4d0795154e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -7,6 +7,9 @@ mt7615_radar_pattern_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
return mt7615_mcu_rdd_send_pattern(dev);
}
@@ -18,6 +21,9 @@ mt7615_scs_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
mt7615_mac_set_scs(dev, val);
return 0;
@@ -37,6 +43,84 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_scs, mt7615_scs_get,
mt7615_scs_set, "%lld\n");
static int
+mt7615_dbdc_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
+ if (val)
+ mt7615_register_ext_phy(dev);
+ else
+ mt7615_unregister_ext_phy(dev);
+
+ return 0;
+}
+
+static int
+mt7615_dbdc_get(void *data, u64 *val)
+{
+ struct mt7615_dev *dev = data;
+
+ *val = !!mt7615_ext_phy(dev);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_dbdc, mt7615_dbdc_get,
+ mt7615_dbdc_set, "%lld\n");
+
+static int
+mt7615_fw_debug_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
+ dev->fw_debug = val;
+ mt7615_mcu_fw_log_2_host(dev, dev->fw_debug ? 2 : 0);
+
+ return 0;
+}
+
+static int
+mt7615_fw_debug_get(void *data, u64 *val)
+{
+ struct mt7615_dev *dev = data;
+
+ *val = dev->fw_debug;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug, mt7615_fw_debug_get,
+ mt7615_fw_debug_set, "%lld\n");
+
+static int
+mt7615_reset_test_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+ struct sk_buff *skb;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
+ skb = alloc_skb(1, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, 1);
+ mt76_tx_queue_skb_raw(dev, 0, skb, 0);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_reset_test, NULL,
+ mt7615_reset_test_set, "%lld\n");
+
+static int
mt7615_ampdu_stat_read(struct seq_file *file, void *data)
{
struct mt7615_dev *dev = file->private;
@@ -74,15 +158,28 @@ static const struct file_operations fops_ampdu_stat = {
.release = single_release,
};
+static void
+mt7615_radio_read_phy(struct mt7615_phy *phy, struct seq_file *s)
+{
+ struct mt7615_dev *dev = dev_get_drvdata(s->private);
+ bool ext_phy = phy != &dev->phy;
+
+ if (!phy)
+ return;
+
+ seq_printf(s, "Radio %d sensitivity: ofdm=%d cck=%d\n", ext_phy,
+ phy->ofdm_sensitivity, phy->cck_sensitivity);
+ seq_printf(s, "Radio %d false CCA: ofdm=%d cck=%d\n", ext_phy,
+ phy->false_cca_ofdm, phy->false_cca_cck);
+}
+
static int
mt7615_radio_read(struct seq_file *s, void *data)
{
struct mt7615_dev *dev = dev_get_drvdata(s->private);
- seq_printf(s, "Sensitivity: ofdm=%d cck=%d\n",
- dev->ofdm_sensitivity, dev->cck_sensitivity);
- seq_printf(s, "False CCA: ofdm=%d cck=%d\n",
- dev->false_cca_ofdm, dev->false_cca_cck);
+ mt7615_radio_read_phy(&dev->phy, s);
+ mt7615_radio_read_phy(mt7615_ext_phy(dev), s);
return 0;
}
@@ -92,6 +189,9 @@ static int mt7615_read_temperature(struct seq_file *s, void *data)
struct mt7615_dev *dev = dev_get_drvdata(s->private);
int temp;
+ if (!mt7615_wait_for_mcu_init(dev))
+ return 0;
+
/* cpu */
temp = mt7615_mcu_get_temperature(dev, 0);
seq_printf(s, "Temperature: %d\n", temp);
@@ -164,12 +264,18 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
if (!dir)
return -ENOMEM;
- debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
- mt7615_queues_read);
+ if (is_mt7615(&dev->mt76))
+ debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ mt7615_queues_read);
+ else
+ debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
+ mt76_queues_read);
debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
mt7615_queues_acq);
debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
debugfs_create_file("scs", 0600, dir, dev, &fops_scs);
+ debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc);
+ debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
mt7615_radio_read);
debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern);
@@ -184,6 +290,8 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
&dev->radar_pattern.power);
debugfs_create_file("radar_trigger", 0200, dir, dev,
&fops_radar_pattern);
+ debugfs_create_file("reset_test", 0200, dir, dev,
+ &fops_reset_test);
debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
mt7615_read_temperature);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index 285d4f1d6178..b19f208e3d54 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -12,47 +12,85 @@
#include "mac.h"
static int
-mt7615_init_tx_queues(struct mt7615_dev *dev, int n_desc)
+mt7615_init_tx_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q,
+ int idx, int n_desc)
{
- struct mt76_sw_queue *q;
struct mt76_queue *hwq;
- int err, i;
+ int err;
hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
if (!hwq)
return -ENOMEM;
- err = mt76_queue_alloc(dev, hwq, 0, n_desc, 0, MT_TX_RING_BASE);
+ err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
if (err < 0)
return err;
- for (i = 0; i < MT_TXQ_MCU; i++) {
- q = &dev->mt76.q_tx[i];
- INIT_LIST_HEAD(&q->swq);
- q->q = hwq;
- }
+ INIT_LIST_HEAD(&q->swq);
+ q->q = hwq;
return 0;
}
static int
-mt7615_init_mcu_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q,
- int idx, int n_desc)
+mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
{
- struct mt76_queue *hwq;
- int err;
+ static const u8 wmm_queue_map[] = {
+ MT7622_TXQ_AC0,
+ MT7622_TXQ_AC1,
+ MT7622_TXQ_AC2,
+ MT7622_TXQ_AC3,
+ };
+ int ret;
+ int i;
- hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
- if (!hwq)
- return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[i],
+ wmm_queue_map[i],
+ MT7615_TX_RING_SIZE / 2);
+ if (ret)
+ return ret;
+ }
- err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
- if (err < 0)
- return err;
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+ MT7622_TXQ_MGMT, MT7615_TX_MGMT_RING_SIZE);
+ if (ret)
+ return ret;
- INIT_LIST_HEAD(&q->swq);
- q->q = hwq;
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ MT7622_TXQ_MCU, MT7615_TX_MCU_RING_SIZE);
+ return ret;
+}
+
+static int
+mt7615_init_tx_queues(struct mt7615_dev *dev)
+{
+ struct mt76_sw_queue *q;
+ int ret, i;
+
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
+ MT7615_TXQ_FWDL,
+ MT7615_TX_FWDL_RING_SIZE);
+ if (ret)
+ return ret;
+
+ if (!is_mt7615(&dev->mt76))
+ return mt7622_init_tx_queues_multi(dev);
+
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[0], 0,
+ MT7615_TX_RING_SIZE);
+ if (ret)
+ return ret;
+ for (i = 1; i < MT_TXQ_MCU; i++) {
+ q = &dev->mt76.q_tx[i];
+ INIT_LIST_HEAD(&q->swq);
+ q->q = dev->mt76.q_tx[0].q;
+ }
+
+ ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ MT7615_TXQ_MCU,
+ MT7615_TX_MCU_RING_SIZE);
return 0;
}
@@ -63,8 +101,12 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
__le32 *rxd = (__le32 *)skb->data;
__le32 *end = (__le32 *)&skb->data[skb->len];
enum rx_pkt_type type;
+ u16 flag;
type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
+ if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
+ type = PKT_TYPE_NORMAL_MCU;
switch (type) {
case PKT_TYPE_TXS:
@@ -78,6 +120,7 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
case PKT_TYPE_RX_EVENT:
mt7615_mcu_rx_event(dev, skb);
break;
+ case PKT_TYPE_NORMAL_MCU:
case PKT_TYPE_NORMAL:
if (!mt7615_mac_fill_rx(dev, skb)) {
mt76_rx(&dev->mt76, q, skb);
@@ -90,25 +133,32 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
}
+static void
+mt7615_tx_cleanup(struct mt7615_dev *dev)
+{
+ int i;
+
+ mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
+ if (is_mt7615(&dev->mt76)) {
+ mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
+ } else {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ mt76_queue_tx_cleanup(dev, i, false);
+ }
+}
+
static int mt7615_poll_tx(struct napi_struct *napi, int budget)
{
- static const u8 queue_map[] = {
- MT_TXQ_MCU,
- MT_TXQ_BE
- };
struct mt7615_dev *dev;
- int i;
dev = container_of(napi, struct mt7615_dev, mt76.tx_napi);
- for (i = 0; i < ARRAY_SIZE(queue_map); i++)
- mt76_queue_tx_cleanup(dev, queue_map[i], false);
+ mt7615_tx_cleanup(dev);
if (napi_complete_done(napi, 0))
mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL);
- for (i = 0; i < ARRAY_SIZE(queue_map); i++)
- mt76_queue_tx_cleanup(dev, queue_map[i], false);
+ mt7615_tx_cleanup(dev);
mt7615_mac_sta_poll(dev);
@@ -117,8 +167,68 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
+static void mt7622_dma_sched_init(struct mt7615_dev *dev)
+{
+ u32 reg = mt7615_reg_map(dev, MT_DMASHDL_BASE);
+ int i;
+
+ mt76_rmw(dev, reg + MT_DMASHDL_PKT_MAX_SIZE,
+ MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8));
+
+ for (i = 0; i <= 5; i++)
+ mt76_wr(dev, reg + MT_DMASHDL_GROUP_QUOTA(i),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x10) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x800));
+
+ mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(0), 0x42104210);
+ mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(1), 0x42104210);
+ mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(2), 0x5);
+ mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(3), 0);
+
+ mt76_wr(dev, reg + MT_DMASHDL_SCHED_SET0, 0x6012345f);
+ mt76_wr(dev, reg + MT_DMASHDL_SCHED_SET1, 0xedcba987);
+}
+
+static void mt7663_dma_sched_init(struct mt7615_dev *dev)
+{
+ int i;
+
+ mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE),
+ MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8));
+
+ /* enable refill control group 0, 1, 2, 4, 5 */
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffc80000);
+ /* enable group 0, 1, 2, 4, 5, 15 */
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x70068037);
+
+ /* each group min quota must larger then PLE_PKT_MAX_SIZE_NUM */
+ for (i = 0; i < 5; i++)
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x40) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x800));
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(5)),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x40) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x40));
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(15)),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x20) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x20));
+
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x00050005);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(3)), 0);
+ /* ALTX0 and ALTX1 QID mapping to group 5 */
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6012345f);
+ mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987);
+}
+
int mt7615_dma_init(struct mt7615_dev *dev)
{
+ int rx_ring_size = MT7615_RX_RING_SIZE;
int ret;
mt76_dma_attach(&dev->mt76);
@@ -126,7 +236,6 @@ int mt7615_dma_init(struct mt7615_dev *dev)
mt76_wr(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE |
MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN |
- MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY |
MT_WPDMA_GLO_CFG_OMIT_TX_INFO);
mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
@@ -141,28 +250,22 @@ int mt7615_dma_init(struct mt7615_dev *dev)
mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_MULTI_DMA_EN, 0x3);
- mt76_wr(dev, MT_WPDMA_GLO_CFG1, 0x1);
- mt76_wr(dev, MT_WPDMA_TX_PRE_CFG, 0xf0000);
- mt76_wr(dev, MT_WPDMA_RX_PRE_CFG, 0xf7f0000);
- mt76_wr(dev, MT_WPDMA_ABT_CFG, 0x4000026);
- mt76_wr(dev, MT_WPDMA_ABT_CFG1, 0x18811881);
- mt76_set(dev, 0x7158, BIT(16));
- mt76_clear(dev, 0x7000, BIT(23));
- mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
-
- ret = mt7615_init_tx_queues(dev, MT7615_TX_RING_SIZE);
- if (ret)
- return ret;
+ if (is_mt7615(&dev->mt76)) {
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY);
+
+ mt76_wr(dev, MT_WPDMA_GLO_CFG1, 0x1);
+ mt76_wr(dev, MT_WPDMA_TX_PRE_CFG, 0xf0000);
+ mt76_wr(dev, MT_WPDMA_RX_PRE_CFG, 0xf7f0000);
+ mt76_wr(dev, MT_WPDMA_ABT_CFG, 0x4000026);
+ mt76_wr(dev, MT_WPDMA_ABT_CFG1, 0x18811881);
+ mt76_set(dev, 0x7158, BIT(16));
+ mt76_clear(dev, 0x7000, BIT(23));
+ }
- ret = mt7615_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
- MT7615_TXQ_MCU,
- MT7615_TX_MCU_RING_SIZE);
- if (ret)
- return ret;
+ mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
- ret = mt7615_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
- MT7615_TXQ_FWDL,
- MT7615_TX_FWDL_RING_SIZE);
+ ret = mt7615_init_tx_queues(dev);
if (ret)
return ret;
@@ -173,9 +276,11 @@ int mt7615_dma_init(struct mt7615_dev *dev)
if (ret)
return ret;
+ if (!is_mt7615(&dev->mt76))
+ rx_ring_size /= 2;
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
- MT7615_RX_RING_SIZE, MT_RX_BUF_SIZE,
- MT_RX_RING_BASE);
+ rx_ring_size, MT_RX_BUF_SIZE, MT_RX_RING_BASE);
if (ret)
return ret;
@@ -199,7 +304,14 @@ int mt7615_dma_init(struct mt7615_dev *dev)
MT_WPDMA_GLO_CFG_RX_DMA_EN);
/* enable interrupts for TX/RX rings */
- mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL);
+ mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ MT_INT_MCU_CMD);
+
+ if (is_mt7622(&dev->mt76))
+ mt7622_dma_sched_init(dev);
+
+ if (is_mt7663(&dev->mt76))
+ mt7663_dma_sched_init(dev);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index 17e277bf39e0..dfa9a08b896d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -84,17 +84,30 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
switch (val) {
case 0x7615:
+ case 0x7622:
return 0;
default:
return -EINVAL;
}
}
-static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
+static void
+mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev)
{
- u8 *eeprom = dev->mt76.eeprom.data;
- u8 tx_mask, rx_mask, max_nss;
- u32 val;
+ u8 val, *eeprom = dev->mt76.eeprom.data;
+
+ if (is_mt7663(&dev->mt76)) {
+ /* dual band */
+ dev->mt76.cap.has_2ghz = true;
+ dev->mt76.cap.has_5ghz = true;
+ return;
+ }
+
+ if (is_mt7622(&dev->mt76)) {
+ /* 2GHz only */
+ dev->mt76.cap.has_2ghz = true;
+ return;
+ }
val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
eeprom[MT_EE_WIFI_CONF]);
@@ -110,23 +123,34 @@ static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
dev->mt76.cap.has_5ghz = true;
break;
}
+}
+
+static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
+{
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u8 tx_mask;
- /* read tx-rx mask from eeprom */
- val = mt76_rr(dev, MT_TOP_STRAP_STA);
- max_nss = val & MT_TOP_3NSS ? 3 : 4;
+ mt7615_eeprom_parse_hw_band_cap(dev);
- rx_mask = FIELD_GET(MT_EE_NIC_CONF_RX_MASK,
- eeprom[MT_EE_NIC_CONF_0]);
- if (!rx_mask || rx_mask > max_nss)
- rx_mask = max_nss;
+ if (is_mt7663(&dev->mt76)) {
+ tx_mask = 2;
+ } else {
+ u8 max_nss;
+ u32 val;
- tx_mask = FIELD_GET(MT_EE_NIC_CONF_TX_MASK,
- eeprom[MT_EE_NIC_CONF_0]);
- if (!tx_mask || tx_mask > max_nss)
- tx_mask = max_nss;
+ /* read tx-rx mask from eeprom */
+ val = mt76_rr(dev, MT_TOP_STRAP_STA);
+ max_nss = val & MT_TOP_3NSS ? 3 : 4;
- dev->mt76.chainmask = tx_mask << 8 | rx_mask;
- dev->mt76.antenna_mask = BIT(tx_mask) - 1;
+ tx_mask = FIELD_GET(MT_EE_NIC_CONF_TX_MASK,
+ eeprom[MT_EE_NIC_CONF_0]);
+ if (!tx_mask || tx_mask > max_nss)
+ tx_mask = max_nss;
+ }
+
+ dev->chainmask = BIT(tx_mask) - 1;
+ dev->mphy.antenna_mask = dev->chainmask;
+ dev->phy.chainmask = dev->chainmask;
}
int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
@@ -209,6 +233,38 @@ static void mt7615_apply_cal_free_data(struct mt7615_dev *dev)
eeprom[ical_nocheck[i]] = otp[ical_nocheck[i]];
}
+static void mt7622_apply_cal_free_data(struct mt7615_dev *dev)
+{
+ static const u16 ical[] = {
+ 0x53, 0x54, 0x55, 0x56, 0xf4, 0xf7, 0x144, 0x156, 0x15b
+ };
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u8 *otp = dev->mt76.otp.data;
+ int i;
+
+ if (!otp)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ical); i++) {
+ if (!otp[ical[i]])
+ continue;
+
+ eeprom[ical[i]] = otp[ical[i]];
+ }
+}
+
+static void mt7615_cal_free_data(struct mt7615_dev *dev)
+{
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7622:
+ mt7622_apply_cal_free_data(dev);
+ break;
+ case 0x7615:
+ mt7615_apply_cal_free_data(dev);
+ break;
+ }
+}
+
int mt7615_eeprom_init(struct mt7615_dev *dev)
{
int ret;
@@ -222,7 +278,7 @@ int mt7615_eeprom_init(struct mt7615_dev *dev)
memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data,
MT7615_EEPROM_SIZE);
else
- mt7615_apply_cal_free_data(dev);
+ mt7615_cal_free_data(dev);
mt7615_eeprom_parse_hw_cap(dev);
memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
index c3bc69ac210e..8a2a64b7fcd3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
@@ -18,10 +18,13 @@ enum mt7615_eeprom_field {
MT_EE_TX1_5G_G0_TARGET_POWER = 0x098,
MT_EE_EXT_PA_2G_TARGET_POWER = 0x0f2,
MT_EE_EXT_PA_5G_TARGET_POWER = 0x0f3,
+ MT7663_EE_TX0_2G_TARGET_POWER = 0x123,
MT_EE_TX2_5G_G0_TARGET_POWER = 0x142,
MT_EE_TX3_5G_G0_TARGET_POWER = 0x16a,
- __MT_EE_MAX = 0x3bf
+ MT7615_EE_MAX = 0x3bf,
+ MT7622_EE_MAX = 0x3db,
+ MT7663_EE_MAX = 0x400,
};
#define MT_EE_NIC_CONF_TX_MASK GENMASK(7, 4)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 553bd4d988f7..03b1e56534d6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -4,6 +4,7 @@
* Author: Roy Luo <royluo@google.com>
* Ryder Lee <ryder.lee@mediatek.com>
* Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
*/
#include <linux/etherdevice.h>
@@ -13,55 +14,35 @@
static void mt7615_phy_init(struct mt7615_dev *dev)
{
- /* disable band 0 rf low power beacon mode */
- mt76_rmw(dev, MT_WF_PHY_WF2_RFCTRL0, MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN,
- MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
+ /* disable rf low power beacon mode */
+ mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(0), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
+ mt76_set(dev, MT_WF_PHY_WF2_RFCTRL0(1), MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
}
-static void mt7615_mac_init(struct mt7615_dev *dev)
+static void
+mt7615_init_mac_chain(struct mt7615_dev *dev, int chain)
{
u32 val, mask, set;
- int i;
- /* enable band 0/1 clk */
- mt76_set(dev, MT_CFG_CCR,
- MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN |
- MT_CFG_CCR_MAC_D1_1X_GC_EN | MT_CFG_CCR_MAC_D1_2X_GC_EN);
-
- val = mt76_rmw(dev, MT_TMAC_TRCR0,
- MT_TMAC_TRCR_CCA_SEL | MT_TMAC_TRCR_SEC_CCA_SEL,
- FIELD_PREP(MT_TMAC_TRCR_CCA_SEL, 2) |
- FIELD_PREP(MT_TMAC_TRCR_SEC_CCA_SEL, 0));
- mt76_wr(dev, MT_TMAC_TRCR1, val);
-
- val = MT_AGG_ACR_PKT_TIME_EN | MT_AGG_ACR_NO_BA_AR_RULE |
- FIELD_PREP(MT_AGG_ACR_CFEND_RATE, 0x49) | /* 24M */
- FIELD_PREP(MT_AGG_ACR_BAR_RATE, 0x4b); /* 6M */
- mt76_wr(dev, MT_AGG_ACR0, val);
- mt76_wr(dev, MT_AGG_ACR1, val);
-
- mt76_rmw_field(dev, MT_TMAC_CTCR0,
- MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
- mt76_rmw_field(dev, MT_TMAC_CTCR0,
- MT_TMAC_CTCR0_INS_DDLMT_DENSITY, 0x3);
- mt76_rmw(dev, MT_TMAC_CTCR0,
- MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
- MT_TMAC_CTCR0_INS_DDLMT_EN,
- MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
- MT_TMAC_CTCR0_INS_DDLMT_EN);
-
- mt7615_mcu_set_rts_thresh(dev, 0x92b);
- mt7615_mac_set_scs(dev, true);
+ if (!chain)
+ val = MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN;
+ else
+ val = MT_CFG_CCR_MAC_D1_1X_GC_EN | MT_CFG_CCR_MAC_D1_2X_GC_EN;
- mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS,
- MT_AGG_SCR_NLNAV_MID_PTEC_DIS);
+ /* enable band 0/1 clk */
+ mt76_set(dev, MT_CFG_CCR, val);
- mt7615_mcu_init_mac(dev);
+ mt76_rmw(dev, MT_TMAC_TRCR(chain),
+ MT_TMAC_TRCR_CCA_SEL | MT_TMAC_TRCR_SEC_CCA_SEL,
+ FIELD_PREP(MT_TMAC_TRCR_CCA_SEL, 2) |
+ FIELD_PREP(MT_TMAC_TRCR_SEC_CCA_SEL, 0));
- mt76_wr(dev, MT_DMA_DCR0, MT_DMA_DCR0_RX_VEC_DROP |
- FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072));
+ mt76_wr(dev, MT_AGG_ACR(chain),
+ MT_AGG_ACR_PKT_TIME_EN | MT_AGG_ACR_NO_BA_AR_RULE |
+ FIELD_PREP(MT_AGG_ACR_CFEND_RATE, MT7615_CFEND_RATE_DEFAULT) |
+ FIELD_PREP(MT_AGG_ACR_BAR_RATE, MT7615_BAR_RATE_DEFAULT));
- mt76_wr(dev, MT_AGG_ARUCR,
+ mt76_wr(dev, MT_AGG_ARUCR(chain),
FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), 2) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), 2) |
@@ -71,7 +52,7 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), 1));
- mt76_wr(dev, MT_AGG_ARDCR,
+ mt76_wr(dev, MT_AGG_ARDCR(chain),
FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), MT7615_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), MT7615_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7615_RATE_RETRY - 1) |
@@ -81,12 +62,6 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7615_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7615_RATE_RETRY - 1));
- mt76_wr(dev, MT_AGG_ARCR,
- (FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) |
- MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
- FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
- FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4)));
-
mask = MT_DMA_RCFR0_MCU_RX_MGMT |
MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR |
MT_DMA_RCFR0_MCU_RX_CTL_BAR |
@@ -95,8 +70,36 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
MT_DMA_RCFR0_RX_DROPPED_MCAST;
set = FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_UCAST, 2) |
FIELD_PREP(MT_DMA_RCFR0_RX_DROPPED_MCAST, 2);
- mt76_rmw(dev, MT_DMA_BN0RCFR0, mask, set);
- mt76_rmw(dev, MT_DMA_BN1RCFR0, mask, set);
+ mt76_rmw(dev, MT_DMA_RCFR0(chain), mask, set);
+}
+
+static void mt7615_mac_init(struct mt7615_dev *dev)
+{
+ int i;
+
+ mt7615_init_mac_chain(dev, 0);
+
+ mt76_rmw_field(dev, MT_TMAC_CTCR0,
+ MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
+ mt76_rmw_field(dev, MT_TMAC_CTCR0,
+ MT_TMAC_CTCR0_INS_DDLMT_DENSITY, 0x3);
+ mt76_rmw(dev, MT_TMAC_CTCR0,
+ MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+ MT_TMAC_CTCR0_INS_DDLMT_EN,
+ MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+ MT_TMAC_CTCR0_INS_DDLMT_EN);
+
+ mt7615_mcu_set_rts_thresh(&dev->phy, 0x92b);
+ mt7615_mac_set_scs(dev, true);
+
+ mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS,
+ MT_AGG_SCR_NLNAV_MID_PTEC_DIS);
+
+ mt76_wr(dev, MT_AGG_ARCR,
+ FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) |
+ MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
+ FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
+ FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4));
for (i = 0; i < MT7615_WTBL_SIZE; i++)
mt7615_mac_wtbl_update(dev, i,
@@ -104,6 +107,39 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_EN);
mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_EN);
+
+ /* disable hdr translation and hw AMSDU */
+ mt76_wr(dev, MT_DMA_DCR0,
+ FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072) |
+ MT_DMA_DCR0_RX_VEC_DROP);
+ if (is_mt7663(&dev->mt76)) {
+ mt76_wr(dev, MT_CSR(0x010), 0x8208);
+ mt76_wr(dev, 0x44064, 0x2000000);
+ mt76_wr(dev, MT_WF_AGG(0x160), 0x5c341c02);
+ mt76_wr(dev, MT_WF_AGG(0x164), 0x70708040);
+ } else {
+ mt7615_init_mac_chain(dev, 1);
+ }
+}
+
+bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev)
+{
+ flush_work(&dev->mcu_work);
+
+ return test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+}
+
+static void mt7615_init_work(struct work_struct *work)
+{
+ struct mt7615_dev *dev = container_of(work, struct mt7615_dev, mcu_work);
+
+ if (mt7615_mcu_init(dev))
+ return;
+
+ mt7615_mcu_set_eeprom(dev);
+ mt7615_mac_init(dev);
+ mt7615_phy_init(dev);
+ mt7615_mcu_del_wtbl_all(dev);
}
static int mt7615_init_hardware(struct mt7615_dev *dev)
@@ -112,6 +148,7 @@ static int mt7615_init_hardware(struct mt7615_dev *dev)
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+ INIT_WORK(&dev->mcu_work, mt7615_init_work);
spin_lock_init(&dev->token_lock);
idr_init(&dev->token);
@@ -123,17 +160,7 @@ static int mt7615_init_hardware(struct mt7615_dev *dev)
if (ret)
return ret;
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
-
- ret = mt7615_mcu_init(dev);
- if (ret)
- return ret;
-
- mt7615_mcu_set_eeprom(dev);
- mt7615_mac_init(dev);
- mt7615_phy_init(dev);
- mt7615_mcu_ctrl_pm_state(dev, 0);
- mt7615_mcu_del_wtbl_all(dev);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
@@ -200,12 +227,65 @@ static const struct ieee80211_iface_combination if_comb[] = {
};
static void
+mt7615_led_set_config(struct led_classdev *led_cdev,
+ u8 delay_on, u8 delay_off)
+{
+ struct mt7615_dev *dev;
+ struct mt76_dev *mt76;
+ u32 val, addr;
+
+ mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
+ dev = container_of(mt76, struct mt7615_dev, mt76);
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
+
+ addr = mt7615_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
+ mt76_wr(dev, addr, val);
+ addr = mt7615_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin));
+ mt76_wr(dev, addr, val);
+
+ val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
+ MT_LED_CTRL_KICK(mt76->led_pin);
+ if (mt76->led_al)
+ val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
+ addr = mt7615_reg_map(dev, MT_LED_CTRL);
+ mt76_wr(dev, addr, val);
+}
+
+static int
+mt7615_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u8 delta_on, delta_off;
+
+ delta_off = max_t(u8, *delay_off / 10, 1);
+ delta_on = max_t(u8, *delay_on / 10, 1);
+
+ mt7615_led_set_config(led_cdev, delta_on, delta_off);
+
+ return 0;
+}
+
+static void
+mt7615_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ if (!brightness)
+ mt7615_led_set_config(led_cdev, 0, 0xff);
+ else
+ mt7615_led_set_config(led_cdev, 0xff, 0);
+}
+
+static void
mt7615_init_txpower(struct mt7615_dev *dev,
struct ieee80211_supported_band *sband)
{
- int i, n_chains = hweight8(dev->mt76.antenna_mask), target_chains;
+ int i, n_chains = hweight8(dev->mphy.antenna_mask), target_chains;
u8 *eep = (u8 *)dev->mt76.eeprom.data;
enum nl80211_band band = sband->band;
+ int delta = mt76_tx_power_nss_delta(n_chains);
target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
for (i = 0; i < sband->n_channels; i++) {
@@ -220,21 +300,7 @@ mt7615_init_txpower(struct mt7615_dev *dev,
target_power = max(target_power, eep[index]);
}
- target_power = DIV_ROUND_UP(target_power, 2);
- switch (n_chains) {
- case 4:
- target_power += 6;
- break;
- case 3:
- target_power += 4;
- break;
- case 2:
- target_power += 3;
- break;
- default:
- break;
- }
-
+ target_power = DIV_ROUND_UP(target_power + delta, 2);
chan->max_power = min_t(int, chan->max_reg_power,
target_power);
chan->orig_mpwr = target_power;
@@ -246,74 +312,190 @@ mt7615_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct mt7615_dev *dev = hw->priv;
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
-
- if (request->dfs_region == dev->mt76.region)
- return;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ struct mt7615_phy *phy = mphy->priv;
+ struct cfg80211_chan_def *chandef = &mphy->chandef;
dev->mt76.region = request->dfs_region;
if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
return;
- mt7615_dfs_stop_radar_detector(dev);
- if (request->dfs_region == NL80211_DFS_UNSET)
- mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, MT_HW_RDD0,
- MT_RX_SEL0, 0);
- else
- mt7615_dfs_start_radar_detector(dev);
+ mt7615_dfs_init_radar_detector(phy);
}
-int mt7615_register_device(struct mt7615_dev *dev)
+static void
+mt7615_init_wiphy(struct ieee80211_hw *hw)
{
- struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct wiphy *wiphy = hw->wiphy;
- int ret;
-
- INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
- INIT_LIST_HEAD(&dev->sta_poll_list);
- spin_lock_init(&dev->sta_poll_lock);
-
- ret = mt7615_init_hardware(dev);
- if (ret)
- return ret;
hw->queues = 4;
hw->max_rates = 3;
hw->max_report_rates = 7;
hw->max_rate_tries = 11;
+ phy->slottime = 9;
+
hw->sta_data_size = sizeof(struct mt7615_sta);
hw->vif_data_size = sizeof(struct mt7615_vif);
wiphy->iface_combinations = if_comb;
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->reg_notifier = mt7615_regd_notifier;
- wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
- dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.sband_5g.sband.vht_cap.cap |=
+ if (is_mt7615(&phy->dev->mt76))
+ hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
+ else
+ hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM;
+}
+
+static void
+mt7615_cap_dbdc_enable(struct mt7615_dev *dev)
+{
+ dev->mphy.sband_5g.sband.vht_cap.cap &=
+ ~(IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ);
+ if (dev->chainmask == 0xf)
+ dev->mphy.antenna_mask = dev->chainmask >> 2;
+ else
+ dev->mphy.antenna_mask = dev->chainmask >> 1;
+ dev->phy.chainmask = dev->mphy.antenna_mask;
+ dev->mphy.hw->wiphy->available_antennas_rx = dev->phy.chainmask;
+ dev->mphy.hw->wiphy->available_antennas_tx = dev->phy.chainmask;
+ mt76_set_stream_caps(&dev->mt76, true);
+}
+
+static void
+mt7615_cap_dbdc_disable(struct mt7615_dev *dev)
+{
+ dev->mphy.sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
- dev->dfs_state = -1;
+ dev->mphy.antenna_mask = dev->chainmask;
+ dev->phy.chainmask = dev->chainmask;
+ dev->mphy.hw->wiphy->available_antennas_rx = dev->chainmask;
+ dev->mphy.hw->wiphy->available_antennas_tx = dev->chainmask;
+ mt76_set_stream_caps(&dev->mt76, true);
+}
+
+int mt7615_register_ext_phy(struct mt7615_dev *dev)
+{
+ struct mt7615_phy *phy = mt7615_ext_phy(dev);
+ struct mt76_phy *mphy;
+ int ret;
+
+ if (!is_mt7615(&dev->mt76))
+ return -EOPNOTSUPP;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return -EINVAL;
+
+ if (phy)
+ return 0;
+
+ mt7615_cap_dbdc_enable(dev);
+ mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7615_ops);
+ if (!mphy)
+ return -ENOMEM;
+
+ phy = mphy->priv;
+ phy->dev = dev;
+ phy->mt76 = mphy;
+ phy->chainmask = dev->chainmask & ~dev->phy.chainmask;
+ mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1;
+ mt7615_init_wiphy(mphy->hw);
+
+ /*
+ * Make the secondary PHY MAC address local without overlapping with
+ * the usual MAC address allocation scheme on multiple virtual interfaces
+ */
+ mphy->hw->wiphy->perm_addr[0] |= 2;
+ mphy->hw->wiphy->perm_addr[0] ^= BIT(7);
+
+ /* second phy can only handle 5 GHz */
+ mphy->sband_2g.sband.n_channels = 0;
+ mphy->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
+
+ /* The second interface does not get any packets unless it has a vif */
+ ieee80211_hw_set(mphy->hw, WANT_MONITOR_VIF);
+
+ ret = mt76_register_phy(mphy);
+ if (ret)
+ ieee80211_free_hw(mphy->hw);
+
+ return ret;
+}
+
+void mt7615_unregister_ext_phy(struct mt7615_dev *dev)
+{
+ struct mt7615_phy *phy = mt7615_ext_phy(dev);
+ struct mt76_phy *mphy = dev->mt76.phy2;
+
+ if (!phy)
+ return;
+
+ mt7615_cap_dbdc_disable(dev);
+ mt76_unregister_phy(mphy);
+ ieee80211_free_hw(mphy->hw);
+}
+
+void mt7615_init_device(struct mt7615_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ dev->phy.dev = dev;
+ dev->phy.mt76 = &dev->mt76.phy;
+ dev->mt76.phy.priv = &dev->phy;
+ INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
+ INIT_LIST_HEAD(&dev->sta_poll_list);
+ spin_lock_init(&dev->sta_poll_lock);
+ init_waitqueue_head(&dev->reset_wait);
+ INIT_WORK(&dev->reset_work, mt7615_mac_reset_work);
+
+ mt7615_init_wiphy(hw);
+ dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mphy.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mphy.sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ mt7615_cap_dbdc_disable(dev);
+ dev->phy.dfs_state = -1;
+}
+
+int mt7615_register_device(struct mt7615_dev *dev)
+{
+ int ret;
+
+ mt7615_init_device(dev);
+
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ dev->mt76.led_cdev.brightness_set = mt7615_led_set_brightness;
+ dev->mt76.led_cdev.blink_set = mt7615_led_set_blink;
+ }
+
+ ret = mt7622_wmac_init(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7615_init_hardware(dev);
+ if (ret)
+ return ret;
ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
ARRAY_SIZE(mt7615_rates));
if (ret)
return ret;
- mt7615_init_txpower(dev, &dev->mt76.sband_2g.sband);
- mt7615_init_txpower(dev, &dev->mt76.sband_5g.sband);
-
- hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
+ ieee80211_queue_work(mt76_hw(dev), &dev->mcu_work);
+ mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
return mt7615_init_debugfs(dev);
}
@@ -321,10 +503,15 @@ int mt7615_register_device(struct mt7615_dev *dev)
void mt7615_unregister_device(struct mt7615_dev *dev)
{
struct mt76_txwi_cache *txwi;
+ bool mcu_running;
int id;
+ mcu_running = mt7615_wait_for_mcu_init(dev);
+
+ mt7615_unregister_ext_phy(dev);
mt76_unregister_device(&dev->mt76);
- mt7615_mcu_exit(dev);
+ if (mcu_running)
+ mt7615_mcu_exit(dev);
mt7615_dma_cleanup(dev);
spin_lock_bh(&dev->token_lock);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index c77adc5d2552..a27a6d164009 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -10,13 +10,50 @@
#include <linux/etherdevice.h>
#include <linux/timekeeping.h>
#include "mt7615.h"
+#include "../trace.h"
#include "../dma.h"
+#include "mt7615_trace.h"
#include "mac.h"
-static inline s8 to_rssi(u32 field, u32 rxv)
-{
- return (FIELD_GET(field, rxv) - 220) / 2;
-}
+#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
+
+static const struct mt7615_dfs_radar_spec etsi_radar_specs = {
+ .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+ .radar_pattern = {
+ [5] = { 1, 0, 6, 32, 28, 0, 17, 990, 5010, 1, 1 },
+ [6] = { 1, 0, 9, 32, 28, 0, 27, 615, 5010, 1, 1 },
+ [7] = { 1, 0, 15, 32, 28, 0, 27, 240, 445, 1, 1 },
+ [8] = { 1, 0, 12, 32, 28, 0, 42, 240, 510, 1, 1 },
+ [9] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 12, 32, 28 },
+ [10] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 15, 32, 24 },
+ [11] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 18, 32, 28 },
+ [12] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 27, 32, 24 },
+ },
+};
+
+static const struct mt7615_dfs_radar_spec fcc_radar_specs = {
+ .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+ .radar_pattern = {
+ [0] = { 1, 0, 9, 32, 28, 0, 13, 508, 3076, 1, 1 },
+ [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 },
+ [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 },
+ [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 },
+ [4] = { 1, 0, 9, 255, 28, 0, 13, 323, 343, 1, 32 },
+ },
+};
+
+static const struct mt7615_dfs_radar_spec jp_radar_specs = {
+ .pulse_th = { 40, -10, -80, 800, 3360, 128, 5200 },
+ .radar_pattern = {
+ [0] = { 1, 0, 8, 32, 28, 0, 13, 508, 3076, 1, 1 },
+ [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 },
+ [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 },
+ [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 },
+ [4] = { 1, 0, 9, 32, 28, 0, 13, 323, 343, 1, 32 },
+ [13] = { 1, 0, 8, 32, 28, 0, 14, 3836, 3856, 1, 1 },
+ [14] = { 1, 0, 8, 32, 28, 0, 14, 3990, 4010, 1, 1 },
+ },
+};
static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
u8 idx, bool unicast)
@@ -49,34 +86,116 @@ void mt7615_mac_reset_counters(struct mt7615_dev *dev)
mt76_rr(dev, MT_TX_AGG_CNT(i));
memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
-
- /* TODO: add DBDC support */
+ dev->mt76.phy.survey_time = ktime_get_boottime();
+ if (dev->mt76.phy2)
+ dev->mt76.phy2->survey_time = ktime_get_boottime();
/* reset airtime counters */
mt76_rr(dev, MT_MIB_SDR9(0));
+ mt76_rr(dev, MT_MIB_SDR9(1));
+
mt76_rr(dev, MT_MIB_SDR36(0));
+ mt76_rr(dev, MT_MIB_SDR36(1));
+
mt76_rr(dev, MT_MIB_SDR37(0));
+ mt76_rr(dev, MT_MIB_SDR37(1));
+
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
}
+void mt7615_mac_set_timing(struct mt7615_phy *phy)
+{
+ s16 coverage_class = phy->coverage_class;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ u32 val, reg_offset;
+ u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
+ u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24);
+ int sifs, offset;
+
+ if (phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ)
+ sifs = 16;
+ else
+ sifs = 10;
+
+ if (ext_phy) {
+ coverage_class = max_t(s16, dev->phy.coverage_class,
+ coverage_class);
+ mt76_set(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE);
+ } else {
+ struct mt7615_phy *phy_ext = mt7615_ext_phy(dev);
+
+ if (phy_ext)
+ coverage_class = max_t(s16, phy_ext->coverage_class,
+ coverage_class);
+ mt76_set(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE);
+ }
+ udelay(1);
+
+ offset = 3 * coverage_class;
+ reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
+ mt76_wr(dev, MT_TMAC_CDTR, cck + reg_offset);
+ mt76_wr(dev, MT_TMAC_ODTR, ofdm + reg_offset);
+
+ mt76_wr(dev, MT_TMAC_ICR(ext_phy),
+ FIELD_PREP(MT_IFS_EIFS, 360) |
+ FIELD_PREP(MT_IFS_RIFS, 2) |
+ FIELD_PREP(MT_IFS_SIFS, sifs) |
+ FIELD_PREP(MT_IFS_SLOT, phy->slottime));
+
+ if (phy->slottime < 20)
+ val = MT7615_CFEND_RATE_DEFAULT;
+ else
+ val = MT7615_CFEND_RATE_11B;
+
+ mt76_rmw_field(dev, MT_AGG_ACR(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
+ if (ext_phy)
+ mt76_clear(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE);
+ else
+ mt76_clear(dev, MT_ARB_SCR,
+ MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE);
+
+}
+
int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7615_phy *phy = &dev->phy;
+ struct mt7615_phy *phy2 = dev->mt76.phy2 ? dev->mt76.phy2->priv : NULL;
struct ieee80211_supported_band *sband;
struct ieee80211_hdr *hdr;
__le32 *rxd = (__le32 *)skb->data;
u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
+ __le32 rxd12 = rxd[12];
bool unicast, remove_pad, insert_ccmp_hdr = false;
+ int phy_idx;
int i, idx;
-
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
- return -EINVAL;
+ u8 chfreq;
memset(status, 0, sizeof(*status));
+ chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
+ if (!phy2)
+ phy_idx = 0;
+ else if (phy2->chfreq == phy->chfreq)
+ phy_idx = -1;
+ else if (phy->chfreq == chfreq)
+ phy_idx = 0;
+ else if (phy2->chfreq == chfreq)
+ phy_idx = 1;
+ else
+ phy_idx = -1;
+
unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
@@ -91,14 +210,6 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
spin_unlock_bh(&dev->sta_poll_lock);
}
- /* TODO: properly support DBDC */
- status->freq = dev->mt76.chandef.chan->center_freq;
- status->band = dev->mt76.chandef.chan->band;
- if (status->band == NL80211_BAND_5GHZ)
- sband = &dev->mt76.sband_5g.sband;
- else
- sband = &dev->mt76.sband_2g.sband;
-
if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -112,28 +223,11 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
}
- if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
- MT_RXD2_NORMAL_NON_AMPDU))) {
- status->flag |= RX_FLAG_AMPDU_DETAILS;
-
- /* all subframes of an A-MPDU have the same timestamp */
- if (dev->rx_ampdu_ts != rxd[12]) {
- if (!++dev->mt76.ampdu_ref)
- dev->mt76.ampdu_ref++;
- }
- dev->rx_ampdu_ts = rxd[12];
-
- status->ampdu_ref = dev->mt76.ampdu_ref;
- }
-
remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
return -EINVAL;
- if (!sband->channels)
- return -EINVAL;
-
rxd += 4;
if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
rxd += 4;
@@ -166,6 +260,59 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
}
if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
+ u32 rxdg5 = le32_to_cpu(rxd[5]);
+
+ /*
+ * If both PHYs are on the same channel and we don't have a WCID,
+ * we need to figure out which PHY this packet was received on.
+ * On the primary PHY, the noise value for the chains belonging to the
+ * second PHY will be set to the noise value of the last packet from
+ * that PHY.
+ */
+ if (phy_idx < 0) {
+ int first_chain = ffs(phy2->chainmask) - 1;
+
+ phy_idx = ((rxdg5 >> (first_chain * 8)) & 0xff) == 0;
+ }
+ }
+
+ if (phy_idx == 1 && phy2) {
+ mphy = dev->mt76.phy2;
+ phy = phy2;
+ status->ext_phy = true;
+ }
+
+ if (chfreq != phy->chfreq)
+ return -EINVAL;
+
+ status->freq = mphy->chandef.chan->center_freq;
+ status->band = mphy->chandef.chan->band;
+ if (status->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
+ return -EINVAL;
+
+ if (!sband->channels)
+ return -EINVAL;
+
+ if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
+ MT_RXD2_NORMAL_NON_AMPDU))) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (phy->rx_ampdu_ts != rxd12) {
+ if (!++phy->ampdu_ref)
+ phy->ampdu_ref++;
+ }
+ phy->rx_ampdu_ts = rxd12;
+
+ status->ampdu_ref = phy->ampdu_ref;
+ }
+
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
u32 rxdg0 = le32_to_cpu(rxd[0]);
u32 rxdg1 = le32_to_cpu(rxd[1]);
u32 rxdg3 = le32_to_cpu(rxd[3]);
@@ -218,14 +365,14 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
- status->chains = dev->mt76.antenna_mask;
+ status->chains = mphy->antenna_mask;
status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3);
status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
status->signal = status->chain_signal[0];
- for (i = 1; i < hweight8(dev->mt76.antenna_mask); i++) {
+ for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
if (!(status->chains & BIT(i)))
continue;
@@ -274,13 +421,20 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
if (e->skb == DMA_DUMMY_DATA) {
struct mt76_txwi_cache *t;
struct mt7615_dev *dev;
- struct mt7615_txp *txp;
+ struct mt7615_txp_common *txp;
+ u16 token;
dev = container_of(mdev, struct mt7615_dev, mt76);
txp = mt7615_txwi_to_txp(mdev, e->txwi);
+ if (is_mt7615(&dev->mt76))
+ token = le16_to_cpu(txp->fw.token);
+ else
+ token = le16_to_cpu(txp->hw.msdu_id[0]) &
+ ~MT_MSDU_ID_VALID;
+
spin_lock_bh(&dev->token_lock);
- t = idr_remove(&dev->token, le16_to_cpu(txp->token));
+ t = idr_remove(&dev->token, token);
spin_unlock_bh(&dev->token_lock);
e->skb = t ? t->skb : NULL;
}
@@ -291,6 +445,7 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
static u16
mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
+ struct mt76_phy *mphy,
const struct ieee80211_tx_rate *rate,
bool stbc, u8 *bw)
{
@@ -319,11 +474,11 @@ mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
*bw = 1;
} else {
const struct ieee80211_rate *r;
- int band = dev->mt76.chandef.chan->band;
+ int band = mphy->chandef.chan->band;
u16 val;
nss = 1;
- r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+ r = &mphy->hw->wiphy->bands[band]->bitrates[rate->idx];
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
val = r->hw_value_short;
else
@@ -348,13 +503,15 @@ mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int pid,
- struct ieee80211_key_conf *key)
+ struct ieee80211_key_conf *key, bool beacon)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rate = &info->control.rates[0];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool multicast = is_multicast_ether_addr(hdr->addr1);
struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_phy *mphy = &dev->mphy;
+ bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
int tx_count = 8;
u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
__le16 fc = hdr->frame_control;
@@ -374,6 +531,9 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
tx_count = msta->rate_count;
}
+ if (ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
@@ -381,11 +541,17 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
skb_get_queue_mapping(skb);
p_fmt = MT_TX_TYPE_CT;
- } else if (ieee80211_is_beacon(fc)) {
- q_idx = MT_LMAC_BCN0;
+ } else if (beacon) {
+ if (ext_phy)
+ q_idx = MT_LMAC_BCN1;
+ else
+ q_idx = MT_LMAC_BCN0;
p_fmt = MT_TX_TYPE_FW;
} else {
- q_idx = MT_LMAC_ALTX0;
+ if (ext_phy)
+ q_idx = MT_LMAC_ALTX1;
+ else
+ q_idx = MT_LMAC_ALTX0;
p_fmt = MT_TX_TYPE_CT;
}
@@ -431,7 +597,8 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
u8 bw;
- u16 rateval = mt7615_mac_tx_rate_val(dev, rate, stbc, &bw);
+ u16 rateval = mt7615_mac_tx_rate_val(dev, mphy, rate, stbc,
+ &bw);
txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
@@ -486,21 +653,59 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
return 0;
}
-void mt7615_txp_skb_unmap(struct mt76_dev *dev,
- struct mt76_txwi_cache *t)
+static void
+mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
{
- struct mt7615_txp *txp;
int i;
- txp = mt7615_txwi_to_txp(dev, t);
for (i = 1; i < txp->nbuf; i++)
dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
}
-static u32 mt7615_mac_wtbl_addr(int wcid)
+static void
+mt7615_txp_skb_unmap_hw(struct mt76_dev *dev, struct mt7615_hw_txp *txp)
{
- return MT_WTBL_BASE + wcid * MT_WTBL_ENTRY_SIZE;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) {
+ struct mt7615_txp_ptr *ptr = &txp->ptr[i];
+ bool last;
+ u16 len;
+
+ len = le16_to_cpu(ptr->len0);
+ last = len & MT_TXD_LEN_MSDU_LAST;
+ len &= ~MT_TXD_LEN_MSDU_LAST;
+ dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
+ DMA_TO_DEVICE);
+ if (last)
+ break;
+
+ len = le16_to_cpu(ptr->len1);
+ last = len & MT_TXD_LEN_MSDU_LAST;
+ len &= ~MT_TXD_LEN_MSDU_LAST;
+ dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
+ DMA_TO_DEVICE);
+ if (last)
+ break;
+ }
+}
+
+void mt7615_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *t)
+{
+ struct mt7615_txp_common *txp;
+
+ txp = mt7615_txwi_to_txp(dev, t);
+ if (is_mt7615(dev))
+ mt7615_txp_skb_unmap_fw(dev, &txp->fw);
+ else
+ mt7615_txp_skb_unmap_hw(dev, &txp->hw);
+}
+
+static u32 mt7615_mac_wtbl_addr(struct mt7615_dev *dev, int wcid)
+{
+ return MT_WTBL_BASE(dev) + wcid * MT_WTBL_ENTRY_SIZE;
}
bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask)
@@ -546,7 +751,7 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev)
list_del_init(&msta->poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
- addr = mt7615_mac_wtbl_addr(msta->wcid.idx) + 19 * 4;
+ addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;
for (i = 0; i < 4; i++, addr += 8) {
u32 tx_last = msta->airtime_ac[i];
@@ -588,13 +793,15 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev)
rcu_read_unlock();
}
-void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
+void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates)
{
+ struct mt7615_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
struct ieee80211_tx_rate *ref;
int wcid = sta->wcid.idx;
- u32 addr = mt7615_mac_wtbl_addr(wcid);
+ u32 addr = mt7615_mac_wtbl_addr(dev, wcid);
bool stbc = false;
int n_rates = sta->n_rates;
u8 bw, bw_prev, bw_idx = 0;
@@ -649,11 +856,12 @@ void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
}
}
- val[0] = mt7615_mac_tx_rate_val(dev, &rates[0], stbc, &bw);
+ val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw);
bw_prev = bw;
if (probe_rate) {
- probe_val = mt7615_mac_tx_rate_val(dev, probe_rate, stbc, &bw);
+ probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate,
+ stbc, &bw);
if (bw)
bw_idx = 1;
else
@@ -662,19 +870,19 @@ void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
probe_val = val[0];
}
- val[1] = mt7615_mac_tx_rate_val(dev, &rates[1], stbc, &bw);
+ val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw);
if (bw_prev) {
bw_idx = 3;
bw_prev = bw;
}
- val[2] = mt7615_mac_tx_rate_val(dev, &rates[2], stbc, &bw);
+ val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw);
if (bw_prev) {
bw_idx = 5;
bw_prev = bw;
}
- val[3] = mt7615_mac_tx_rate_val(dev, &rates[3], stbc, &bw);
+ val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw);
if (bw_prev)
bw_idx = 7;
@@ -758,7 +966,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt7615_cipher_type cipher,
enum set_key_cmd cmd)
{
- u32 addr = mt7615_mac_wtbl_addr(wcid->idx) + 30 * 4;
+ u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
u8 data[32] = {};
if (key->keylen > sizeof(data))
@@ -796,7 +1004,7 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt7615_cipher_type cipher, int keyidx,
enum set_key_cmd cmd)
{
- u32 addr = mt7615_mac_wtbl_addr(wcid->idx), w0, w1;
+ u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
return -ETIMEDOUT;
@@ -832,7 +1040,7 @@ mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt7615_cipher_type cipher,
enum set_key_cmd cmd)
{
- u32 addr = mt7615_mac_wtbl_addr(wcid->idx);
+ u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
if (cmd == SET_KEY) {
if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
@@ -884,39 +1092,51 @@ out:
return err;
}
-int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- enum mt76_txq_id qid, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta,
- struct mt76_tx_info *tx_info)
+static void
+mt7615_write_hw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
+ void *txp_ptr, u32 id)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
- struct ieee80211_key_conf *key = info->control.hw_key;
- struct ieee80211_vif *vif = info->control.vif;
- int i, pid, id, nbuf = tx_info->nbuf - 1;
- u8 *txwi = (u8 *)txwi_ptr;
- struct mt76_txwi_cache *t;
- struct mt7615_txp *txp;
+ struct mt7615_hw_txp *txp = txp_ptr;
+ struct mt7615_txp_ptr *ptr = &txp->ptr[0];
+ int nbuf = tx_info->nbuf - 1;
+ int i;
- if (!wcid)
- wcid = &dev->mt76.global_wcid;
+ tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
+ tx_info->nbuf = 1;
- pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+ txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
- spin_lock_bh(&dev->mt76.lock);
- mt7615_mac_set_rates(dev, msta, &info->control.rates[0],
- msta->rates);
- msta->rate_probe = true;
- spin_unlock_bh(&dev->mt76.lock);
+ for (i = 0; i < nbuf; i++) {
+ u32 addr = tx_info->buf[i + 1].addr;
+ u16 len = tx_info->buf[i + 1].len;
+
+ if (i == nbuf - 1)
+ len |= MT_TXD_LEN_MSDU_LAST |
+ MT_TXD_LEN_AMSDU_LAST;
+
+ if (i & 1) {
+ ptr->buf1 = cpu_to_le32(addr);
+ ptr->len1 = cpu_to_le16(len);
+ ptr++;
+ } else {
+ ptr->buf0 = cpu_to_le32(addr);
+ ptr->len0 = cpu_to_le16(len);
+ }
}
+}
- mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
- pid, key);
+static void
+mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
+ void *txp_ptr, u32 id)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt7615_fw_txp *txp = txp_ptr;
+ int nbuf = tx_info->nbuf - 1;
+ int i;
- txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
for (i = 0; i < nbuf; i++) {
txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
@@ -924,6 +1144,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
txp->nbuf = nbuf;
/* pass partial skb header to fw */
+ tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
tx_info->buf[1].len = MT_CT_PARSE_LEN;
tx_info->nbuf = MT_CT_DMA_BUF_NUM;
@@ -941,6 +1162,42 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
txp->bss_idx = mvif->idx;
}
+ txp->token = cpu_to_le16(id);
+ txp->rept_wds_wcid = 0xff;
+}
+
+int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ int pid, id;
+ u8 *txwi = (u8 *)txwi_ptr;
+ struct mt76_txwi_cache *t;
+ void *txp;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ struct mt7615_phy *phy = &dev->phy;
+
+ if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && mdev->phy2)
+ phy = mdev->phy2->priv;
+
+ spin_lock_bh(&dev->mt76.lock);
+ mt7615_mac_set_rates(phy, msta, &info->control.rates[0],
+ msta->rates);
+ msta->rate_probe = true;
+ spin_unlock_bh(&dev->mt76.lock);
+ }
+
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
@@ -950,8 +1207,16 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (id < 0)
return id;
- txp->token = cpu_to_le16(id);
- txp->rept_wds_wcid = 0xff;
+ mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
+ pid, key, false);
+
+ txp = txwi + MT_TXD_SIZE;
+ memset(txp, 0, sizeof(struct mt7615_txp_common));
+ if (is_mt7615(&dev->mt76))
+ mt7615_write_fw_txp(dev, tx_info, txp, id);
+ else
+ mt7615_write_hw_txp(dev, tx_info, txp, id);
+
tx_info->skb = DMA_DUMMY_DATA;
return 0;
@@ -962,6 +1227,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
{
struct ieee80211_supported_band *sband;
struct mt7615_rate_set *rs;
+ struct mt76_phy *mphy;
int first_idx = 0, last_idx;
int i, idx, count;
bool fixed_rate, ack_timeout;
@@ -1019,7 +1285,12 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
spin_lock_bh(&dev->mt76.lock);
if (sta->rate_probe) {
- mt7615_mac_set_rates(dev, sta, NULL, sta->rates);
+ struct mt7615_phy *phy = &dev->phy;
+
+ if (sta->wcid.ext_phy && dev->mt76.phy2)
+ phy = dev->mt76.phy2->priv;
+
+ mt7615_mac_set_rates(phy, sta, NULL, sta->rates);
sta->rate_probe = false;
}
spin_unlock_bh(&dev->mt76.lock);
@@ -1059,10 +1330,14 @@ out:
cck = true;
/* fall through */
case MT_PHY_TYPE_OFDM:
- if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
- sband = &dev->mt76.sband_5g.sband;
+ mphy = &dev->mphy;
+ if (sta->wcid.ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
else
- sband = &dev->mt76.sband_2g.sband;
+ sband = &mphy->sband_2g.sband;
final_rate &= MT_TX_RATE_IDX;
final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
cck);
@@ -1105,6 +1380,8 @@ static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
if (pid < MT_PACKET_ID_FIRST)
return false;
+ trace_mac_txdone(mdev, sta->wcid.idx, pid);
+
mt76_tx_status_lock(mdev, &list);
skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
if (skb) {
@@ -1128,6 +1405,7 @@ void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
struct ieee80211_sta *sta = NULL;
struct mt7615_sta *msta = NULL;
struct mt76_wcid *wcid;
+ struct mt76_phy *mphy = &dev->mt76.phy;
__le32 *txs_data = data;
u32 txs;
u8 wcidx;
@@ -1164,111 +1442,160 @@ void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
if (wcidx >= MT7615_WTBL_STA || !sta)
goto out;
+ if (wcid->ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
if (mt7615_fill_txs(dev, msta, &info, txs_data))
- ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+ ieee80211_tx_status_noskb(mphy->hw, sta, &info);
out:
rcu_read_unlock();
}
-void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
+static void
+mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
{
- struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
struct mt76_dev *mdev = &dev->mt76;
struct mt76_txwi_cache *txwi;
+
+ trace_mac_tx_free(dev, token);
+
+ spin_lock_bh(&dev->token_lock);
+ txwi = idr_remove(&dev->token, token);
+ spin_unlock_bh(&dev->token_lock);
+
+ if (!txwi)
+ return;
+
+ mt7615_txp_skb_unmap(mdev, txwi);
+ if (txwi->skb) {
+ mt76_tx_complete_skb(mdev, txwi->skb);
+ txwi->skb = NULL;
+ }
+
+ mt76_put_txwi(mdev, txwi);
+}
+
+void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
u8 i, count;
count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
- for (i = 0; i < count; i++) {
- spin_lock_bh(&dev->token_lock);
- txwi = idr_remove(&dev->token, le16_to_cpu(free->token[i]));
- spin_unlock_bh(&dev->token_lock);
-
- if (!txwi)
- continue;
+ if (is_mt7615(&dev->mt76)) {
+ __le16 *token = &free->token[0];
- mt7615_txp_skb_unmap(mdev, txwi);
- if (txwi->skb) {
- mt76_tx_complete_skb(mdev, txwi->skb);
- txwi->skb = NULL;
- }
+ for (i = 0; i < count; i++)
+ mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i]));
+ } else {
+ __le32 *token = (__le32 *)&free->token[0];
- mt76_put_txwi(mdev, txwi);
+ for (i = 0; i < count; i++)
+ mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i]));
}
+
dev_kfree_skb(skb);
}
static void
-mt7615_mac_set_default_sensitivity(struct mt7615_dev *dev)
+mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy)
{
- mt76_rmw(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
- MT_WF_PHY_B0_PD_OFDM_MASK,
- MT_WF_PHY_B0_PD_OFDM(0x13c));
- mt76_rmw(dev, MT_WF_PHY_B1_MIN_PRI_PWR,
- MT_WF_PHY_B1_PD_OFDM_MASK,
- MT_WF_PHY_B1_PD_OFDM(0x13c));
-
- mt76_rmw(dev, MT_WF_PHY_B0_RXTD_CCK_PD,
- MT_WF_PHY_B0_PD_CCK_MASK,
- MT_WF_PHY_B0_PD_CCK(0x92));
- mt76_rmw(dev, MT_WF_PHY_B1_RXTD_CCK_PD,
- MT_WF_PHY_B1_PD_CCK_MASK,
- MT_WF_PHY_B1_PD_CCK(0x92));
-
- dev->ofdm_sensitivity = -98;
- dev->cck_sensitivity = -110;
- dev->last_cca_adj = jiffies;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+
+ mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy),
+ MT_WF_PHY_PD_OFDM_MASK(ext_phy),
+ MT_WF_PHY_PD_OFDM(ext_phy, 0x13c));
+ mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy),
+ MT_WF_PHY_PD_CCK_MASK(ext_phy),
+ MT_WF_PHY_PD_CCK(ext_phy, 0x92));
+
+ phy->ofdm_sensitivity = -98;
+ phy->cck_sensitivity = -110;
+ phy->last_cca_adj = jiffies;
}
void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable)
{
+ struct mt7615_phy *ext_phy;
+
mutex_lock(&dev->mt76.mutex);
if (dev->scs_en == enable)
goto out;
+ if (is_mt7663(&dev->mt76))
+ goto out;
+
if (enable) {
- /* DBDC not supported */
- mt76_set(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
- MT_WF_PHY_B0_PD_BLK);
+ mt76_set(dev, MT_WF_PHY_MIN_PRI_PWR(0),
+ MT_WF_PHY_PD_BLK(0));
+ mt76_set(dev, MT_WF_PHY_MIN_PRI_PWR(1),
+ MT_WF_PHY_PD_BLK(1));
if (is_mt7622(&dev->mt76)) {
mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7 << 8);
mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7);
}
} else {
- mt76_clear(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
- MT_WF_PHY_B0_PD_BLK);
- mt76_clear(dev, MT_WF_PHY_B1_MIN_PRI_PWR,
- MT_WF_PHY_B1_PD_BLK);
+ mt76_clear(dev, MT_WF_PHY_MIN_PRI_PWR(0),
+ MT_WF_PHY_PD_BLK(0));
+ mt76_clear(dev, MT_WF_PHY_MIN_PRI_PWR(1),
+ MT_WF_PHY_PD_BLK(1));
}
- mt7615_mac_set_default_sensitivity(dev);
+ mt7615_mac_set_default_sensitivity(&dev->phy);
+ ext_phy = mt7615_ext_phy(dev);
+ if (ext_phy)
+ mt7615_mac_set_default_sensitivity(ext_phy);
+
dev->scs_en = enable;
out:
mutex_unlock(&dev->mt76.mutex);
}
-void mt7615_mac_cca_stats_reset(struct mt7615_dev *dev)
+void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy)
{
- mt76_clear(dev, MT_WF_PHY_R0_B0_PHYMUX_5, GENMASK(22, 20));
- mt76_set(dev, MT_WF_PHY_R0_B0_PHYMUX_5, BIT(22) | BIT(20));
+ u32 rxtd;
+
+ if (is_mt7663(&dev->mt76))
+ return;
+
+ if (ext_phy)
+ rxtd = MT_WF_PHY_RXTD2(10);
+ else
+ rxtd = MT_WF_PHY_RXTD(12);
+
+ mt76_set(dev, rxtd, BIT(18) | BIT(29));
+ mt76_set(dev, MT_WF_PHY_R0_PHYMUX_5(ext_phy), 0x5 << 12);
+}
+
+void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ u32 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy);
+
+ mt76_clear(dev, reg, GENMASK(22, 20));
+ mt76_set(dev, reg, BIT(22) | BIT(20));
}
static void
-mt7615_mac_adjust_sensitivity(struct mt7615_dev *dev,
+mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy,
u32 rts_err_rate, bool ofdm)
{
- int false_cca = ofdm ? dev->false_cca_ofdm : dev->false_cca_cck;
+ struct mt7615_dev *dev = phy->dev;
+ int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck;
+ bool ext_phy = phy != &dev->phy;
u16 def_th = ofdm ? -98 : -110;
bool update = false;
s8 *sensitivity;
int signal;
- sensitivity = ofdm ? &dev->ofdm_sensitivity : &dev->cck_sensitivity;
- signal = mt76_get_min_avg_rssi(&dev->mt76);
+ sensitivity = ofdm ? &phy->ofdm_sensitivity : &phy->cck_sensitivity;
+ signal = mt76_get_min_avg_rssi(&dev->mt76, ext_phy);
if (!signal) {
- mt7615_mac_set_default_sensitivity(dev);
+ mt7615_mac_set_default_sensitivity(phy);
return;
}
@@ -1303,99 +1630,155 @@ mt7615_mac_adjust_sensitivity(struct mt7615_dev *dev,
u16 val;
if (ofdm) {
- /* DBDC not supported */
val = *sensitivity * 2 + 512;
- mt76_rmw(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
- MT_WF_PHY_B0_PD_OFDM_MASK,
- MT_WF_PHY_B0_PD_OFDM(val));
+ mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy),
+ MT_WF_PHY_PD_OFDM_MASK(ext_phy),
+ MT_WF_PHY_PD_OFDM(ext_phy, val));
} else {
val = *sensitivity + 256;
- mt76_rmw(dev, MT_WF_PHY_B0_RXTD_CCK_PD,
- MT_WF_PHY_B0_PD_CCK_MASK,
- MT_WF_PHY_B0_PD_CCK(val));
- mt76_rmw(dev, MT_WF_PHY_B1_RXTD_CCK_PD,
- MT_WF_PHY_B1_PD_CCK_MASK,
- MT_WF_PHY_B1_PD_CCK(val));
+ mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy),
+ MT_WF_PHY_PD_CCK_MASK(ext_phy),
+ MT_WF_PHY_PD_CCK(ext_phy, val));
}
- dev->last_cca_adj = jiffies;
+ phy->last_cca_adj = jiffies;
}
}
static void
-mt7615_mac_scs_check(struct mt7615_dev *dev)
+mt7615_mac_scs_check(struct mt7615_phy *phy)
{
- u32 val, rts_cnt = 0, rts_retries_cnt = 0, rts_err_rate = 0;
+ struct mt7615_dev *dev = phy->dev;
+ struct mib_stats *mib = &phy->mib;
+ u32 val, rts_err_rate = 0;
u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm;
- int i;
+ bool ext_phy = phy != &dev->phy;
if (!dev->scs_en)
return;
- for (i = 0; i < 4; i++) {
- u32 data;
-
- val = mt76_rr(dev, MT_MIB_MB_SDR0(i));
- data = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
- if (data > rts_retries_cnt) {
- rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
- rts_retries_cnt = data;
- }
- }
-
- val = mt76_rr(dev, MT_WF_PHY_R0_B0_PHYCTRL_STS0);
+ val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy));
pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val);
pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val);
- val = mt76_rr(dev, MT_WF_PHY_R0_B0_PHYCTRL_STS5);
+ val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy));
mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val);
mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val);
- dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
- dev->false_cca_cck = pd_cck - mdrdy_cck;
- mt7615_mac_cca_stats_reset(dev);
+ phy->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
+ phy->false_cca_cck = pd_cck - mdrdy_cck;
+ mt7615_mac_cca_stats_reset(phy);
- if (rts_cnt + rts_retries_cnt)
- rts_err_rate = MT_FRAC(rts_retries_cnt,
- rts_cnt + rts_retries_cnt);
+ if (mib->rts_cnt + mib->rts_retries_cnt)
+ rts_err_rate = MT_FRAC(mib->rts_retries_cnt,
+ mib->rts_cnt + mib->rts_retries_cnt);
/* cck */
- mt7615_mac_adjust_sensitivity(dev, rts_err_rate, false);
+ mt7615_mac_adjust_sensitivity(phy, rts_err_rate, false);
/* ofdm */
- mt7615_mac_adjust_sensitivity(dev, rts_err_rate, true);
+ mt7615_mac_adjust_sensitivity(phy, rts_err_rate, true);
- if (time_after(jiffies, dev->last_cca_adj + 10 * HZ))
- mt7615_mac_set_default_sensitivity(dev);
+ if (time_after(jiffies, phy->last_cca_adj + 10 * HZ))
+ mt7615_mac_set_default_sensitivity(phy);
}
-void mt7615_update_channel(struct mt76_dev *mdev)
+static u8
+mt7615_phy_get_nf(struct mt7615_dev *dev, int idx)
{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
+ u32 reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20);
+ u32 val, sum = 0, n = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
+ val = mt76_rr(dev, reg);
+ sum += val * nf_power[i];
+ n += val;
+ }
+
+ if (!n)
+ return 0;
+
+ return sum / n;
+}
+
+static void
+mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
+{
+ struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76);
+ struct mt7615_phy *phy = mphy->priv;
struct mt76_channel_state *state;
u64 busy_time, tx_time, rx_time, obss_time;
+ u32 obss_reg = idx ? MT_WF_RMAC_MIB_TIME6 : MT_WF_RMAC_MIB_TIME5;
+ int nf;
- /* TODO: add DBDC support */
- busy_time = mt76_get_field(dev, MT_MIB_SDR9(0),
+ busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
MT_MIB_SDR9_BUSY_MASK);
- tx_time = mt76_get_field(dev, MT_MIB_SDR36(0),
+ tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
MT_MIB_SDR36_TXTIME_MASK);
- rx_time = mt76_get_field(dev, MT_MIB_SDR37(0),
+ rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
MT_MIB_SDR37_RXTIME_MASK);
- obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_TIME5,
- MT_MIB_OBSSTIME_MASK);
+ obss_time = mt76_get_field(dev, obss_reg, MT_MIB_OBSSTIME_MASK);
+
+ nf = mt7615_phy_get_nf(dev, idx);
+ if (!phy->noise)
+ phy->noise = nf << 4;
+ else if (nf)
+ phy->noise += nf - (phy->noise >> 4);
- state = mdev->chan_state;
+ state = mphy->chan_state;
state->cc_busy += busy_time;
state->cc_tx += tx_time;
state->cc_rx += rx_time + obss_time;
state->cc_bss_rx += rx_time;
+ state->noise = -(phy->noise >> 4);
+}
+
+void mt7615_update_channel(struct mt76_dev *mdev)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ mt7615_phy_update_channel(&mdev->phy, 0);
+ if (mdev->phy2)
+ mt7615_phy_update_channel(mdev->phy2, 1);
/* reset obss airtime */
mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
}
+static void
+mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct mib_stats *mib = &phy->mib;
+ bool ext_phy = phy != &dev->phy;
+ int i;
+
+ memset(mib, 0, sizeof(*mib));
+
+ mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
+ MT_MIB_SDR3_FCS_ERR_MASK);
+
+ for (i = 0; i < 4; i++) {
+ u32 data, val, val2;
+
+ val = mt76_get_field(dev, MT_MIB_MB_SDR1(ext_phy, i),
+ MT_MIB_ACK_FAIL_COUNT_MASK);
+ if (val > mib->ack_fail_cnt)
+ mib->ack_fail_cnt = val;
+
+ val2 = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
+ data = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val2);
+ if (data > mib->rts_retries_cnt) {
+ mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val2);
+ mib->rts_retries_cnt = data;
+ }
+ }
+}
+
void mt7615_mac_work(struct work_struct *work)
{
struct mt7615_dev *dev;
+ struct mt7615_phy *ext_phy;
int i, idx;
dev = (struct mt7615_dev *)container_of(work, struct mt76_dev,
@@ -1404,7 +1787,15 @@ void mt7615_mac_work(struct work_struct *work)
mutex_lock(&dev->mt76.mutex);
mt76_update_survey(&dev->mt76);
if (++dev->mac_work_count == 5) {
- mt7615_mac_scs_check(dev);
+ ext_phy = mt7615_ext_phy(dev);
+
+ mt7615_mac_update_mib_stats(&dev->phy);
+ mt7615_mac_scs_check(&dev->phy);
+ if (ext_phy) {
+ mt7615_mac_update_mib_stats(ext_phy);
+ mt7615_mac_scs_check(ext_phy);
+ }
+
dev->mac_work_count = 0;
}
@@ -1421,21 +1812,140 @@ void mt7615_mac_work(struct work_struct *work)
MT7615_WATCHDOG_TIME);
}
-int mt7615_dfs_stop_radar_detector(struct mt7615_dev *dev)
+static bool
+mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
- int err;
+ bool ret;
- err = mt7615_mcu_rdd_cmd(dev, RDD_STOP, MT_HW_RDD0,
- MT_RX_SEL0, 0);
- if (err < 0)
- return err;
+ ret = wait_event_timeout(dev->reset_wait,
+ (READ_ONCE(dev->reset_state) & state),
+ MT7615_RESET_TIMEOUT);
+ WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
+ return ret;
+}
- if (chandef->width == NL80211_CHAN_WIDTH_160 ||
- chandef->width == NL80211_CHAN_WIDTH_80P80)
- err = mt7615_mcu_rdd_cmd(dev, RDD_STOP, MT_HW_RDD1,
- MT_RX_SEL0, 0);
- return err;
+static void
+mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ieee80211_hw *hw = priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+
+ mt7615_mcu_add_beacon(dev, hw, vif, vif->bss_conf.enable_beacon);
+}
+
+static void
+mt7615_update_beacons(struct mt7615_dev *dev)
+{
+ ieee80211_iterate_active_interfaces(dev->mt76.hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_update_vif_beacon, dev->mt76.hw);
+
+ if (!dev->mt76.phy2)
+ return;
+
+ ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7615_update_vif_beacon, dev->mt76.phy2->hw);
+}
+
+static void
+mt7615_dma_reset(struct mt7615_dev *dev)
+{
+ int i;
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+ usleep_range(1000, 2000);
+
+ for (i = 0; i < __MT_TXQ_MAX; i++)
+ mt76_queue_tx_cleanup(dev, i, true);
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
+ mt76_queue_rx_reset(dev, i);
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+}
+
+void mt7615_mac_reset_work(struct work_struct *work)
+{
+ struct mt7615_dev *dev;
+
+ dev = container_of(work, struct mt7615_dev, reset_work);
+
+ if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA))
+ return;
+
+ ieee80211_stop_queues(mt76_hw(dev));
+ if (dev->mt76.phy2)
+ ieee80211_stop_queues(dev->mt76.phy2->hw);
+
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+
+ /* lock/unlock all queues to ensure that no tx is pending */
+ mt76_txq_schedule_all(&dev->mphy);
+ if (dev->mt76.phy2)
+ mt76_txq_schedule_all(dev->mt76.phy2);
+
+ tasklet_disable(&dev->mt76.tx_tasklet);
+ napi_disable(&dev->mt76.napi[0]);
+ napi_disable(&dev->mt76.napi[1]);
+ napi_disable(&dev->mt76.tx_napi);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED);
+
+ if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
+ mt7615_dma_reset(dev);
+
+ mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_INIT);
+ mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
+ }
+
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ tasklet_enable(&dev->mt76.tx_tasklet);
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+
+ napi_enable(&dev->mt76.napi[0]);
+ napi_schedule(&dev->mt76.napi[0]);
+
+ napi_enable(&dev->mt76.napi[1]);
+ napi_schedule(&dev->mt76.napi[1]);
+
+ ieee80211_wake_queues(mt76_hw(dev));
+ if (dev->mt76.phy2)
+ ieee80211_wake_queues(dev->mt76.phy2->hw);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
+ mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt7615_update_beacons(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ MT7615_WATCHDOG_TIME);
+}
+
+static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy)
+{
+ struct mt7615_dev *dev = phy->dev;
+
+ if (phy->rdd_state & BIT(0))
+ mt7615_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
+ if (phy->rdd_state & BIT(1))
+ mt7615_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
}
static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
@@ -1450,61 +1960,112 @@ static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
MT_RX_SEL0, 1);
}
-int mt7615_dfs_start_radar_detector(struct mt7615_dev *dev)
+static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
int err;
/* start CAC */
- err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, MT_HW_RDD0,
- MT_RX_SEL0, 0);
+ err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
if (err < 0)
return err;
- /* TODO: DBDC support */
-
- err = mt7615_dfs_start_rdd(dev, MT_HW_RDD0);
+ err = mt7615_dfs_start_rdd(dev, ext_phy);
if (err < 0)
return err;
+ phy->rdd_state |= BIT(ext_phy);
+
if (chandef->width == NL80211_CHAN_WIDTH_160 ||
chandef->width == NL80211_CHAN_WIDTH_80P80) {
- err = mt7615_dfs_start_rdd(dev, MT_HW_RDD1);
+ err = mt7615_dfs_start_rdd(dev, 1);
if (err < 0)
return err;
+
+ phy->rdd_state |= BIT(1);
}
return 0;
}
-int mt7615_dfs_init_radar_detector(struct mt7615_dev *dev)
+static int
+mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
+{
+ const struct mt7615_dfs_radar_spec *radar_specs;
+ struct mt7615_dev *dev = phy->dev;
+ int err, i;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_FCC:
+ radar_specs = &fcc_radar_specs;
+ err = mt7615_mcu_set_fcc5_lpn(dev, 8);
+ if (err < 0)
+ return err;
+ break;
+ case NL80211_DFS_ETSI:
+ radar_specs = &etsi_radar_specs;
+ break;
+ case NL80211_DFS_JP:
+ radar_specs = &jp_radar_specs;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
+ err = mt7615_mcu_set_radar_th(dev, i,
+ &radar_specs->radar_pattern[i]);
+ if (err < 0)
+ return err;
+ }
+
+ return mt7615_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
+}
+
+int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
int err;
- if (dev->mt76.region == NL80211_DFS_UNSET)
+ if (dev->mt76.region == NL80211_DFS_UNSET) {
+ phy->dfs_state = -1;
+ if (phy->rdd_state)
+ goto stop;
+
return 0;
+ }
- if (test_bit(MT76_SCANNING, &dev->mt76.state))
+ if (test_bit(MT76_SCANNING, &phy->mt76->state))
return 0;
- if (dev->dfs_state == chandef->chan->dfs_state)
+ if (phy->dfs_state == chandef->chan->dfs_state)
return 0;
- dev->dfs_state = chandef->chan->dfs_state;
+ err = mt7615_dfs_init_radar_specs(phy);
+ if (err < 0) {
+ phy->dfs_state = -1;
+ goto stop;
+ }
+
+ phy->dfs_state = chandef->chan->dfs_state;
if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
- return mt7615_dfs_start_radar_detector(dev);
- else
- return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, MT_HW_RDD0,
- MT_RX_SEL0, 0);
- } else {
- err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START,
- MT_HW_RDD0, MT_RX_SEL0, 0);
- if (err < 0)
- return err;
+ return mt7615_dfs_start_radar_detector(phy);
- return mt7615_dfs_stop_radar_detector(dev);
+ return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
+ MT_RX_SEL0, 0);
}
+
+stop:
+ err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, MT_RX_SEL0, 0);
+ if (err < 0)
+ return err;
+
+ mt7615_dfs_stop_radar_detector(phy);
+ return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
index 38695d4f92e2..e0b89257db90 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
@@ -8,6 +8,7 @@
#define MT_CT_DMA_BUF_NUM 2
#define MT_RXD0_LENGTH GENMASK(15, 0)
+#define MT_RXD0_PKT_FLAG GENMASK(19, 16)
#define MT_RXD0_PKT_TYPE GENMASK(31, 29)
#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
@@ -26,7 +27,8 @@ enum rx_pkt_type {
PKT_TYPE_RX_TMR,
PKT_TYPE_RETRIEVE,
PKT_TYPE_TXRX_NOTIFY,
- PKT_TYPE_RX_EVENT
+ PKT_TYPE_RX_EVENT,
+ PKT_TYPE_NORMAL_MCU,
};
#define MT_RXD1_NORMAL_BSSID GENMASK(31, 26)
@@ -103,6 +105,11 @@ enum rx_pkt_type {
#define MT_RXV4_RCPI1 GENMASK(15, 8)
#define MT_RXV4_RCPI0 GENMASK(7, 0)
+#define MT_RXV6_NF3 GENMASK(31, 24)
+#define MT_RXV6_NF2 GENMASK(23, 16)
+#define MT_RXV6_NF1 GENMASK(15, 8)
+#define MT_RXV6_NF0 GENMASK(7, 0)
+
enum tx_header_format {
MT_HDR_FORMAT_802_3,
MT_HDR_FORMAT_CMD,
@@ -126,6 +133,10 @@ enum tx_pkt_queue_idx {
MT_LMAC_BMC0,
MT_LMAC_BCN0,
MT_LMAC_PSMP0,
+ MT_LMAC_ALTX1,
+ MT_LMAC_BMC1,
+ MT_LMAC_BCN1,
+ MT_LMAC_PSMP1,
};
enum tx_port_idx {
@@ -220,8 +231,15 @@ enum tx_phy_bandwidth {
#define MT_TXD6_FIXED_BW BIT(2)
#define MT_TXD6_BW GENMASK(1, 0)
+/* MT7663 DW7 HW-AMSDU */
+#define MT_TXD7_HW_AMSDU_CAP BIT(30)
#define MT_TXD7_TYPE GENMASK(21, 20)
#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
+#define MT_TXD7_SPE_IDX GENMASK(15, 11)
+#define MT_TXD7_SPE_IDX_SLE BIT(10)
+
+#define MT_TXD8_L_TYPE GENMASK(5, 4)
+#define MT_TXD8_L_SUB_TYPE GENMASK(3, 0)
#define MT_TX_RATE_STBC BIT(11)
#define MT_TX_RATE_NSS GENMASK(10, 9)
@@ -229,8 +247,27 @@ enum tx_phy_bandwidth {
#define MT_TX_RATE_IDX GENMASK(5, 0)
#define MT_TXP_MAX_BUF_NUM 6
+#define MT_HW_TXP_MAX_MSDU_NUM 4
+#define MT_HW_TXP_MAX_BUF_NUM 4
+
+#define MT_MSDU_ID_VALID BIT(15)
+
+#define MT_TXD_LEN_MSDU_LAST BIT(14)
+#define MT_TXD_LEN_AMSDU_LAST BIT(15)
-struct mt7615_txp {
+struct mt7615_txp_ptr {
+ __le32 buf0;
+ __le16 len0;
+ __le16 len1;
+ __le32 buf1;
+} __packed __aligned(4);
+
+struct mt7615_hw_txp {
+ __le16 msdu_id[MT_HW_TXP_MAX_MSDU_NUM];
+ struct mt7615_txp_ptr ptr[MT_HW_TXP_MAX_BUF_NUM / 2];
+} __packed __aligned(4);
+
+struct mt7615_fw_txp {
__le16 flags;
__le16 token;
u8 bss_idx;
@@ -239,7 +276,14 @@ struct mt7615_txp {
u8 nbuf;
__le32 buf[MT_TXP_MAX_BUF_NUM];
__le16 len[MT_TXP_MAX_BUF_NUM];
-} __packed;
+} __packed __aligned(4);
+
+struct mt7615_txp_common {
+ union {
+ struct mt7615_fw_txp fw;
+ struct mt7615_hw_txp hw;
+ };
+};
struct mt7615_tx_free {
__le16 rx_byte_cnt;
@@ -247,7 +291,7 @@ struct mt7615_tx_free {
u8 txd_cnt;
u8 rsv[3];
__le16 token[];
-} __packed;
+} __packed __aligned(4);
#define MT_TX_FREE_MSDU_ID_CNT GENMASK(6, 0)
@@ -302,6 +346,38 @@ struct mt7615_tx_free {
#define MT_TXS6_F1_RCPI_1 GENMASK(15, 8)
#define MT_TXS6_F1_RCPI_0 GENMASK(7, 0)
+struct mt7615_dfs_pulse {
+ u32 max_width; /* us */
+ int max_pwr; /* dbm */
+ int min_pwr; /* dbm */
+ u32 min_stgr_pri; /* us */
+ u32 max_stgr_pri; /* us */
+ u32 min_cr_pri; /* us */
+ u32 max_cr_pri; /* us */
+};
+
+struct mt7615_dfs_pattern {
+ u8 enb;
+ u8 stgr;
+ u8 min_crpn;
+ u8 max_crpn;
+ u8 min_crpr;
+ u8 min_pw;
+ u8 max_pw;
+ u32 min_pri;
+ u32 max_pri;
+ u8 min_crbn;
+ u8 max_crbn;
+ u8 min_stgpn;
+ u8 max_stgpn;
+ u8 min_stgpr;
+};
+
+struct mt7615_dfs_radar_spec {
+ struct mt7615_dfs_pulse pulse_th;
+ struct mt7615_dfs_pattern radar_pattern[16];
+};
+
enum mt7615_cipher_type {
MT_CIPHER_NONE,
MT_CIPHER_WEP40,
@@ -317,7 +393,7 @@ enum mt7615_cipher_type {
MT_CIPHER_GCMP_256,
};
-static inline struct mt7615_txp *
+static inline struct mt7615_txp_common *
mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
u8 *txwi;
@@ -327,7 +403,7 @@ mt7615_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
txwi = mt76_get_txwi_ptr(dev, t);
- return (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
+ return (struct mt7615_txp_common *)(txwi + MT_TXD_SIZE);
}
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 070b03403894..6586176c29af 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -11,27 +11,85 @@
#include <linux/pci.h>
#include <linux/module.h>
#include "mt7615.h"
+#include "mcu.h"
+
+static bool mt7615_dev_running(struct mt7615_dev *dev)
+{
+ struct mt7615_phy *phy;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return true;
+
+ phy = mt7615_ext_phy(dev);
+
+ return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+}
static int mt7615_start(struct ieee80211_hw *hw)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ bool running;
+
+ if (!mt7615_wait_for_mcu_init(dev))
+ return -EIO;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ running = mt7615_dev_running(dev);
+
+ if (!running) {
+ mt7615_mcu_set_pm(dev, 0, 0);
+ mt7615_mcu_set_mac_enable(dev, 0, true);
+ mt7615_mac_enable_nf(dev, 0);
+ }
+
+ if (phy != &dev->phy) {
+ mt7615_mcu_set_pm(dev, 1, 0);
+ mt7615_mcu_set_mac_enable(dev, 1, true);
+ mt7615_mac_enable_nf(dev, 1);
+ }
+
+ mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
+
+ set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ if (running)
+ goto out;
mt7615_mac_reset_counters(dev);
- dev->mt76.survey_time = ktime_get_boottime();
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT7615_WATCHDOG_TIME);
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
return 0;
}
static void mt7615_stop(struct ieee80211_hw *hw)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
- cancel_delayed_work_sync(&dev->mt76.mac_work);
+ mutex_lock(&dev->mt76.mutex);
+
+ clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ if (phy != &dev->phy) {
+ mt7615_mcu_set_pm(dev, 1, 1);
+ mt7615_mcu_set_mac_enable(dev, 1, false);
+ }
+
+ if (!mt7615_dev_running(dev)) {
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+
+ mt7615_mcu_set_pm(dev, 0, 1);
+ mt7615_mcu_set_mac_enable(dev, 0, false);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
}
static int get_omac_idx(enum nl80211_iftype type, u32 mask)
@@ -39,6 +97,7 @@ static int get_omac_idx(enum nl80211_iftype type, u32 mask)
int i;
switch (type) {
+ case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_ADHOC:
@@ -70,8 +129,10 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt76_txq *mtxq;
+ bool ext_phy = phy != &dev->phy;
int idx, ret = 0;
mutex_lock(&dev->mt76.mutex);
@@ -89,28 +150,38 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
}
mvif->omac_idx = idx;
- /* TODO: DBDC support. Use band 0 for now */
- mvif->band_idx = 0;
- mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS;
+ mvif->band_idx = ext_phy;
+ if (mt7615_ext_phy(dev))
+ mvif->wmm_idx = ext_phy * (MT7615_MAX_WMM_SETS / 2) +
+ mvif->idx % (MT7615_MAX_WMM_SETS / 2);
+ else
+ mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS;
- ret = mt7615_mcu_set_dev_info(dev, vif, 1);
+ ret = mt7615_mcu_add_dev_info(dev, vif, true);
if (ret)
goto out;
dev->vif_mask |= BIT(mvif->idx);
dev->omac_mask |= BIT(mvif->omac_idx);
+ phy->omac_mask |= BIT(mvif->omac_idx);
+
+ mt7615_mcu_set_dbdc(dev);
+
idx = MT7615_WTBL_RESERVED - mvif->idx;
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
+ mvif->sta.wcid.ext_phy = mvif->band_idx;
mvif->sta.wcid.hw_key_idx = -1;
mt7615_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
- mtxq = (struct mt76_txq *)vif->txq->drv_priv;
- mtxq->wcid = &mvif->sta.wcid;
- mt76_txq_init(&dev->mt76, vif->txq);
+ if (vif->txq) {
+ mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ mtxq->wcid = &mvif->sta.wcid;
+ mt76_txq_init(&dev->mt76, vif->txq);
+ }
out:
mutex_unlock(&dev->mt76.mutex);
@@ -123,19 +194,22 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_sta *msta = &mvif->sta;
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
int idx = msta->wcid.idx;
/* TODO: disable beacon for the bss */
- mt7615_mcu_set_dev_info(dev, vif, 0);
+ mt7615_mcu_add_dev_info(dev, vif, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- mt76_txq_remove(&dev->mt76, vif->txq);
+ if (vif->txq)
+ mt76_txq_remove(&dev->mt76, vif->txq);
mutex_lock(&dev->mt76.mutex);
dev->vif_mask &= ~BIT(mvif->idx);
dev->omac_mask &= ~BIT(mvif->omac_idx);
+ phy->omac_mask &= ~BIT(mvif->omac_idx);
mutex_unlock(&dev->mt76.mutex);
spin_lock_bh(&dev->sta_poll_lock);
@@ -144,34 +218,38 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
spin_unlock_bh(&dev->sta_poll_lock);
}
-static int mt7615_set_channel(struct mt7615_dev *dev)
+static int mt7615_set_channel(struct mt7615_phy *phy)
{
+ struct mt7615_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
int ret;
cancel_delayed_work_sync(&dev->mt76.mac_work);
mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &phy->mt76->state);
- mt7615_dfs_check_channel(dev);
+ phy->dfs_state = -1;
+ mt76_set_channel(phy->mt76);
- mt76_set_channel(&dev->mt76);
-
- ret = mt7615_mcu_set_channel(dev);
+ ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_CHANNEL_SWITCH);
if (ret)
goto out;
- ret = mt7615_dfs_init_radar_detector(dev);
- mt7615_mac_cca_stats_reset(dev);
- dev->mt76.survey_time = ktime_get_boottime();
+ mt7615_mac_set_timing(phy);
+ ret = mt7615_dfs_init_radar_detector(phy);
+ mt7615_mac_cca_stats_reset(phy);
+ mt7615_mcu_set_sku_en(phy, true);
mt7615_mac_reset_counters(dev);
+ phy->noise = 0;
+ phy->chfreq = mt76_rr(dev, MT_CHFREQ(ext_phy));
out:
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &phy->mt76->state);
mutex_unlock(&dev->mt76.mutex);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(phy->mt76);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT7615_WATCHDOG_TIME);
return ret;
@@ -181,7 +259,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
&mvif->sta;
@@ -230,27 +308,27 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ bool band = phy != &dev->phy;
int ret = 0;
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
+ IEEE80211_CONF_CHANGE_POWER)) {
ieee80211_stop_queues(hw);
- ret = mt7615_set_channel(dev);
+ ret = mt7615_set_channel(phy);
ieee80211_wake_queues(hw);
}
mutex_lock(&dev->mt76.mutex);
- if (changed & IEEE80211_CONF_CHANGE_POWER)
- ret = mt7615_mcu_set_tx_power(dev);
-
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
- dev->mt76.rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+ phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
else
- dev->mt76.rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+ phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
- mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
}
mutex_unlock(&dev->mt76.mutex);
@@ -263,7 +341,7 @@ mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS;
@@ -275,7 +353,10 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
unsigned int *total_flags,
u64 multicast)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ bool band = phy != &dev->phy;
+
u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
MT_WF_RFCR1_DROP_BF_POLL |
MT_WF_RFCR1_DROP_BA |
@@ -285,21 +366,21 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
#define MT76_FILTER(_flag, _hw) do { \
flags |= *total_flags & FIF_##_flag; \
- dev->mt76.rxfilter &= ~(_hw); \
- dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ phy->rxfilter &= ~(_hw); \
+ phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
} while (0)
- dev->mt76.rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
- MT_WF_RFCR_DROP_OTHER_BEACON |
- MT_WF_RFCR_DROP_FRAME_REPORT |
- MT_WF_RFCR_DROP_PROBEREQ |
- MT_WF_RFCR_DROP_MCAST_FILTERED |
- MT_WF_RFCR_DROP_MCAST |
- MT_WF_RFCR_DROP_BCAST |
- MT_WF_RFCR_DROP_DUPLICATE |
- MT_WF_RFCR_DROP_A2_BSSID |
- MT_WF_RFCR_DROP_UNWANTED_CTL |
- MT_WF_RFCR_DROP_STBC_MULTI);
+ phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI);
MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
MT_WF_RFCR_DROP_A3_MAC |
@@ -313,12 +394,12 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
MT_WF_RFCR_DROP_NDPA);
*total_flags = flags;
- mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
if (*total_flags & FIF_CONTROL)
- mt76_clear(dev, MT_WF_RFCR1, ctl_flags);
+ mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags);
else
- mt76_set(dev, MT_WF_RFCR1, ctl_flags);
+ mt76_set(dev, MT_WF_RFCR1(band), ctl_flags);
}
static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
@@ -326,24 +407,32 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *info,
u32 changed)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
if (changed & BSS_CHANGED_ASSOC)
- mt7615_mcu_set_bss_info(dev, vif, info->assoc);
+ mt7615_mcu_add_bss_info(dev, vif, info->assoc);
- /* TODO: update beacon content
- * BSS_CHANGED_BEACON
- */
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+
+ if (slottime != phy->slottime) {
+ phy->slottime = slottime;
+ mt7615_mac_set_timing(phy);
+ }
+ }
if (changed & BSS_CHANGED_BEACON_ENABLED) {
- mt7615_mcu_set_bss_info(dev, vif, info->enable_beacon);
- mt7615_mcu_wtbl_bmc(dev, vif, info->enable_beacon);
- mt7615_mcu_set_sta_rec_bmc(dev, vif, info->enable_beacon);
- mt7615_mcu_set_bcn(dev, vif, info->enable_beacon);
+ mt7615_mcu_add_bss_info(dev, vif, info->enable_beacon);
+ mt7615_mcu_sta_add(dev, vif, NULL, info->enable_beacon);
}
+ if (changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED))
+ mt7615_mcu_add_beacon(dev, hw, vif, info->enable_beacon);
+
mutex_unlock(&dev->mt76.mutex);
}
@@ -352,15 +441,15 @@ mt7615_channel_switch_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mt7615_mcu_set_bcn(dev, vif, true);
+ mt7615_mcu_add_beacon(dev, hw, vif, true);
mutex_unlock(&dev->mt76.mutex);
}
-int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
@@ -375,33 +464,23 @@ int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
+ msta->wcid.ext_phy = mvif->band_idx;
+
mt7615_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- mt7615_mcu_add_wtbl(dev, vif, sta);
- mt7615_mcu_set_sta_rec(dev, vif, sta, 1);
+ mt7615_mcu_sta_add(dev, vif, sta, true);
return 0;
}
-void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
-
- if (sta->ht_cap.ht_supported)
- mt7615_mcu_set_ht_cap(dev, vif, sta);
-}
-
-void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- mt7615_mcu_set_sta_rec(dev, vif, sta, 0);
- mt7615_mcu_del_wtbl(dev, sta);
-
+ mt7615_mcu_sta_add(dev, vif, sta, false);
mt7615_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -415,7 +494,8 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
int i;
@@ -430,7 +510,7 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
break;
}
msta->n_rates = i;
- mt7615_mac_set_rates(dev, msta, NULL, msta->rates);
+ mt7615_mac_set_rates(phy, msta, NULL, msta->rates);
msta->rate_probe = false;
spin_unlock_bh(&dev->mt76.lock);
}
@@ -439,7 +519,8 @@ static void mt7615_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
@@ -458,15 +539,16 @@ static void mt7615_tx(struct ieee80211_hw *hw,
wcid = &mvif->sta.wcid;
}
- mt76_tx(&dev->mt76, control->sta, wcid, skb);
+ mt76_tx(mphy, control->sta, wcid, skb);
}
static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
{
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
mutex_lock(&dev->mt76.mutex);
- mt7615_mcu_set_rts_thresh(dev, val);
+ mt7615_mcu_set_rts_thresh(phy, val);
mutex_unlock(&dev->mt76.mutex);
return 0;
@@ -477,7 +559,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
enum ieee80211_ampdu_mlme_action action = params->action;
- struct mt7615_dev *dev = hw->priv;
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct ieee80211_sta *sta = params->sta;
struct ieee80211_txq *txq = sta->txq[params->tid];
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
@@ -496,21 +578,21 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
params->buf_size);
- mt7615_mcu_set_rx_ba(dev, params, 1);
+ mt7615_mcu_add_rx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_RX_STOP:
mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
- mt7615_mcu_set_rx_ba(dev, params, 0);
+ mt7615_mcu_add_rx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
mtxq->send_bar = false;
- mt7615_mcu_set_tx_ba(dev, params, 1);
+ mt7615_mcu_add_tx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
- mt7615_mcu_set_tx_ba(dev, params, 0);
+ mt7615_mcu_add_tx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_START:
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
@@ -518,7 +600,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
- mt7615_mcu_set_tx_ba(dev, params, 0);
+ mt7615_mcu_add_tx_ba(dev, params, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
@@ -527,6 +609,98 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return ret;
}
+static int
+mt7615_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
+ IEEE80211_STA_NONE);
+}
+
+static int
+mt7615_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
+ IEEE80211_STA_NOTEXIST);
+}
+
+static int
+mt7615_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ struct mib_stats *mib = &phy->mib;
+
+ stats->dot11RTSSuccessCount = mib->rts_cnt;
+ stats->dot11RTSFailureCount = mib->rts_retries_cnt;
+ stats->dot11FCSErrorCount = mib->fcs_err_cnt;
+ stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+
+ return 0;
+}
+
+static u64
+mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
+ tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0);
+ tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return tsf.t64;
+}
+
+static void
+mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+{
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+
+ phy->coverage_class = max_t(s16, coverage_class, 0);
+ mt7615_mac_set_timing(phy);
+}
+
+static int
+mt7615_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ int max_nss = hweight8(hw->wiphy->available_antennas_tx);
+ bool ext_phy = phy != &dev->phy;
+
+ if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss)
+ return -EINVAL;
+
+ if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
+ tx_ant = BIT(ffs(tx_ant) - 1) - 1;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ phy->mt76->antenna_mask = tx_ant;
+ if (ext_phy) {
+ if (dev->chainmask == 0xf)
+ tx_ant <<= 2;
+ else
+ tx_ant <<= 1;
+ }
+ phy->chainmask = tx_ant;
+
+ mt76_set_stream_caps(&dev->mt76, true);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
const struct ieee80211_ops mt7615_ops = {
.tx = mt7615_tx,
.start = mt7615_start,
@@ -537,7 +711,9 @@ const struct ieee80211_ops mt7615_ops = {
.conf_tx = mt7615_conf_tx,
.configure_filter = mt7615_configure_filter,
.bss_info_changed = mt7615_bss_info_changed,
- .sta_state = mt76_sta_state,
+ .sta_add = mt7615_sta_add,
+ .sta_remove = mt7615_sta_remove,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt7615_set_key,
.ampdu_action = mt7615_ampdu_action,
.set_rts_threshold = mt7615_set_rts_threshold,
@@ -548,6 +724,38 @@ const struct ieee80211_ops mt7615_ops = {
.release_buffered_frames = mt76_release_buffered_frames,
.get_txpower = mt76_get_txpower,
.channel_switch_beacon = mt7615_channel_switch_beacon,
+ .get_stats = mt7615_get_stats,
+ .get_tsf = mt7615_get_tsf,
.get_survey = mt76_get_survey,
.get_antenna = mt76_get_antenna,
+ .set_antenna = mt7615_set_antenna,
+ .set_coverage_class = mt7615_set_coverage_class,
};
+
+static int __init mt7615_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&mt7615_pci_driver);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_MT7622_WMAC)) {
+ ret = platform_driver_register(&mt7622_wmac_driver);
+ if (ret)
+ pci_unregister_driver(&mt7615_pci_driver);
+ }
+
+ return ret;
+}
+
+static void __exit mt7615_exit(void)
+{
+ if (IS_ENABLED(CONFIG_MT7622_WMAC))
+ platform_driver_unregister(&mt7622_wmac_driver);
+ pci_unregister_driver(&mt7615_pci_driver);
+}
+
+module_init(mt7615_init);
+module_exit(mt7615_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index f229c9ce9f65..610cfa918c7b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -29,7 +29,37 @@ struct mt7615_fw_trailer {
__le32 len;
} __packed;
-#define MCU_PATCH_ADDRESS 0x80000
+#define FW_V3_COMMON_TAILER_SIZE 36
+#define FW_V3_REGION_TAILER_SIZE 40
+#define FW_START_OVERRIDE BIT(0)
+#define FW_START_DLYCAL BIT(1)
+#define FW_START_WORKING_PDA_CR4 BIT(2)
+
+struct mt7663_fw_trailer {
+ u8 chip_id;
+ u8 eco_code;
+ u8 n_region;
+ u8 format_ver;
+ u8 format_flag;
+ u8 reserv[2];
+ char fw_ver[10];
+ char build_date[15];
+ __le32 crc;
+} __packed;
+
+struct mt7663_fw_buf {
+ __le32 crc;
+ __le32 d_img_size;
+ __le32 block_size;
+ u8 rsv[4];
+ __le32 img_dest_addr;
+ __le32 img_size;
+ u8 feature_set;
+};
+
+#define MT7615_PATCH_ADDRESS 0x80000
+#define MT7622_PATCH_ADDRESS 0x9c000
+#define MT7663_PATCH_ADDRESS 0xdc000
#define N9_REGION_NUM 2
#define CR4_REGION_NUM 1
@@ -43,29 +73,32 @@ struct mt7615_fw_trailer {
#define DL_MODE_KEY_IDX GENMASK(2, 1)
#define DL_MODE_RESET_SEC_IV BIT(3)
#define DL_MODE_WORKING_PDA_CR4 BIT(4)
+#define DL_MODE_VALID_RAM_ENTRY BIT(5)
#define DL_MODE_NEED_RSP BIT(31)
#define FW_START_OVERRIDE BIT(0)
#define FW_START_WORKING_PDA_CR4 BIT(2)
-static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
- int cmd, int *wait_seq)
+void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
{
+ int txd_len, mcu_cmd = cmd & MCU_CMD_MASK;
+ struct mt7615_uni_txd *uni_txd;
struct mt7615_mcu_txd *mcu_txd;
u8 seq, q_idx, pkt_fmt;
- enum mt76_txq_id qid;
- u32 val;
__le32 *txd;
+ u32 val;
- seq = ++dev->mt76.mmio.mcu.msg_seq & 0xf;
+ seq = ++dev->mt76.mcu.msg_seq & 0xf;
if (!seq)
- seq = ++dev->mt76.mmio.mcu.msg_seq & 0xf;
+ seq = ++dev->mt76.mcu.msg_seq & 0xf;
+ if (wait_seq)
+ *wait_seq = seq;
- mcu_txd = (struct mt7615_mcu_txd *)skb_push(skb,
- sizeof(struct mt7615_mcu_txd));
- memset(mcu_txd, 0, sizeof(struct mt7615_mcu_txd));
+ txd_len = cmd & MCU_UNI_PREFIX ? sizeof(*uni_txd) : sizeof(*mcu_txd);
+ txd = (__le32 *)skb_push(skb, txd_len);
- if (cmd != -MCU_CMD_FW_SCATTER) {
+ if (cmd != MCU_CMD_FW_SCATTER) {
q_idx = MT_TX_MCU_PORT_RX_Q0;
pkt_fmt = MT_TX_TYPE_CMD;
} else {
@@ -73,8 +106,6 @@ static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
pkt_fmt = MT_TX_TYPE_FW;
}
- txd = mcu_txd->txd;
-
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len) |
FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_MCU) |
FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
@@ -85,26 +116,43 @@ static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
FIELD_PREP(MT_TXD1_PKT_FMT, pkt_fmt);
txd[1] = cpu_to_le32(val);
+ if (cmd & MCU_UNI_PREFIX) {
+ uni_txd = (struct mt7615_uni_txd *)txd;
+ uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
+ uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+ uni_txd->cid = cpu_to_le16(mcu_cmd);
+ uni_txd->s2d_index = MCU_S2D_H2N;
+ uni_txd->pkt_type = MCU_PKT_ID;
+ uni_txd->seq = seq;
+
+ return;
+ }
+
+ mcu_txd = (struct mt7615_mcu_txd *)txd;
mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU, q_idx));
+ mcu_txd->s2d_index = MCU_S2D_H2N;
mcu_txd->pkt_type = MCU_PKT_ID;
mcu_txd->seq = seq;
- if (cmd < 0) {
+ if (cmd & MCU_FW_PREFIX) {
mcu_txd->set_query = MCU_Q_NA;
- mcu_txd->cid = -cmd;
+ mcu_txd->cid = mcu_cmd;
} else {
mcu_txd->cid = MCU_CMD_EXT_CID;
mcu_txd->set_query = MCU_Q_SET;
mcu_txd->ext_cid = cmd;
mcu_txd->ext_cid_ack = 1;
}
- mcu_txd->s2d_index = MCU_S2D_H2N;
+}
- if (wait_seq)
- *wait_seq = seq;
+static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
+{
+ enum mt76_txq_id qid;
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state))
+ mt7615_mcu_fill_msg(dev, skb, cmd, wait_seq);
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
qid = MT_TXQ_MCU;
else
qid = MT_TXQ_FWDL;
@@ -123,7 +171,7 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
return -EAGAIN;
switch (cmd) {
- case -MCU_CMD_PATCH_SEM_CONTROL:
+ case MCU_CMD_PATCH_SEM_CONTROL:
skb_pull(skb, sizeof(*rxd) - 4);
ret = *skb->data;
break;
@@ -139,32 +187,18 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
return ret;
}
-static int
-mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
- int len, bool wait_resp)
+int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq)
{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- unsigned long expires = jiffies + 10 * HZ;
+ unsigned long expires = jiffies + 20 * HZ;
struct sk_buff *skb;
- int ret, seq;
-
- skb = mt7615_mcu_msg_alloc(data, len);
- if (!skb)
- return -ENOMEM;
-
- mutex_lock(&mdev->mmio.mcu.mutex);
-
- ret = __mt7615_mcu_msg_send(dev, skb, cmd, &seq);
- if (ret)
- goto out;
+ int ret = 0;
- while (wait_resp) {
- skb = mt76_mcu_get_response(mdev, expires);
+ while (true) {
+ skb = mt76_mcu_get_response(&dev->mt76, expires);
if (!skb) {
- dev_err(mdev->dev, "Message %d (seq %d) timeout\n",
- cmd, seq);
- ret = -ETIMEDOUT;
- break;
+ dev_err(dev->mt76.dev, "Message %ld (seq %d) timeout\n",
+ cmd & MCU_CMD_MASK, seq);
+ return -ETIMEDOUT;
}
ret = mt7615_mcu_parse_response(dev, cmd, skb, seq);
@@ -172,12 +206,44 @@ mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
break;
}
+ return ret;
+}
+
+static int
+mt7615_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, bool wait_resp)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ int ret, seq;
+
+ mutex_lock(&mdev->mcu.mutex);
+
+ ret = __mt7615_mcu_msg_send(dev, skb, cmd, &seq);
+ if (ret)
+ goto out;
+
+ if (wait_resp)
+ ret = mt7615_mcu_wait_response(dev, cmd, seq);
+
out:
- mutex_unlock(&mdev->mmio.mcu.mutex);
+ mutex_unlock(&mdev->mcu.mutex);
return ret;
}
+static int
+mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+ int len, bool wait_resp)
+{
+ struct sk_buff *skb;
+
+ skb = mt7615_mcu_msg_alloc(data, len);
+ if (!skb)
+ return -ENOMEM;
+
+ return __mt76_mcu_skb_send_msg(mdev, skb, cmd, wait_resp);
+}
+
static void
mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
@@ -186,20 +252,59 @@ mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
}
static void
+mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7615_mcu_rdd_report *r;
+
+ r = (struct mt7615_mcu_rdd_report *)skb->data;
+
+ if (r->idx && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ ieee80211_radar_detected(mphy->hw);
+ dev->hw_pattern++;
+}
+
+static void
+mt7615_mcu_rx_log_message(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
+ const char *data = (char *)&rxd[1];
+ const char *type;
+
+ switch (rxd->s2d_index) {
+ case 0:
+ type = "N9";
+ break;
+ case 2:
+ type = "CR4";
+ break;
+ default:
+ type = "unknown";
+ break;
+ }
+
+ wiphy_info(mt76_hw(dev)->wiphy, "%s: %s", type, data);
+}
+
+static void
mt7615_mcu_rx_ext_event(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
switch (rxd->ext_eid) {
case MCU_EXT_EVENT_RDD_REPORT:
- ieee80211_radar_detected(dev->mt76.hw);
- dev->hw_pattern++;
+ mt7615_mcu_rx_radar_detected(dev, skb);
break;
case MCU_EXT_EVENT_CSA_NOTIFY:
ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7615_mcu_csa_finish, dev);
break;
+ case MCU_EXT_EVENT_FW_LOG_2_HOST:
+ mt7615_mcu_rx_log_message(dev, skb);
+ break;
default:
break;
}
@@ -247,10 +352,1095 @@ static int mt7615_mcu_init_download(struct mt7615_dev *dev, u32 addr,
.mode = cpu_to_le32(mode),
};
- return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_TARGET_ADDRESS_LEN_REQ,
+ &req, sizeof(req), true);
+}
+
+static int
+mt7615_mcu_add_dev(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ struct req_hdr {
+ u8 omac_idx;
+ u8 band_idx;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 rsv[3];
+ } __packed hdr;
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 band_idx;
+ u8 omac_addr[ETH_ALEN];
+ } __packed tlv;
+ } data = {
+ .hdr = {
+ .omac_idx = mvif->omac_idx,
+ .band_idx = mvif->band_idx,
+ .tlv_num = cpu_to_le16(1),
+ .is_tlv_append = 1,
+ },
+ .tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(struct req_tlv)),
+ .active = enable,
+ .band_idx = mvif->band_idx,
+ },
+ };
+
+ memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE,
+ &data, sizeof(data), true);
+}
+
+static int
+mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
+ struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct ieee80211_mutable_offsets offs;
+ struct ieee80211_tx_info *info;
+ struct req {
+ u8 omac_idx;
+ u8 enable;
+ u8 wlan_idx;
+ u8 band_idx;
+ u8 pkt_type;
+ u8 need_pre_tbtt_int;
+ __le16 csa_ie_pos;
+ __le16 pkt_len;
+ __le16 tim_ie_pos;
+ u8 pkt[512];
+ u8 csa_cnt;
+ /* bss color change */
+ u8 bcc_cnt;
+ __le16 bcc_ie_pos;
+ } __packed req = {
+ .omac_idx = mvif->omac_idx,
+ .enable = enable,
+ .wlan_idx = wcid->idx,
+ .band_idx = mvif->band_idx,
+ };
+ struct sk_buff *skb;
+
+ skb = ieee80211_beacon_get_template(hw, vif, &offs);
+ if (!skb)
+ return -EINVAL;
+
+ if (skb->len > 512 - MT_TXD_SIZE) {
+ dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ if (mvif->band_idx) {
+ info = IEEE80211_SKB_CB(skb);
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+ }
+
+ mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
+ 0, NULL, true);
+ memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
+ req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
+ if (offs.csa_counter_offs[0]) {
+ u16 csa_offs;
+
+ csa_offs = MT_TXD_SIZE + offs.csa_counter_offs[0] - 4;
+ req.csa_ie_pos = cpu_to_le16(csa_offs);
+ req.csa_cnt = skb->data[offs.csa_counter_offs[0]];
+ }
+ dev_kfree_skb(skb);
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BCN_OFFLOAD,
+ &req, sizeof(req), true);
+}
+
+static int
+mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
+{
+#define ENTER_PM_STATE 1
+#define EXIT_PM_STATE 2
+ struct {
+ u8 pm_number;
+ u8 pm_state;
+ u8 bssid[ETH_ALEN];
+ u8 dtim_period;
+ u8 wlan_idx;
+ __le16 bcn_interval;
+ __le32 aid;
+ __le32 rx_filter;
+ u8 band_idx;
+ u8 rsv[3];
+ __le32 feature;
+ u8 omac_idx;
+ u8 wmm_idx;
+ u8 bcn_loss_cnt;
+ u8 bcn_sp_duration;
+ } __packed req = {
+ .pm_number = 5,
+ .pm_state = state ? ENTER_PM_STATE : EXIT_PM_STATE,
+ .band_idx = band,
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL,
+ &req, sizeof(req), true);
+}
+
+static struct sk_buff *
+mt7615_mcu_alloc_sta_req(struct mt7615_vif *mvif, struct mt7615_sta *msta)
+{
+ struct sta_req_hdr hdr = {
+ .bss_idx = mvif->idx,
+ .wlan_idx = msta ? msta->wcid.idx : 0,
+ .muar_idx = msta ? mvif->omac_idx : 0,
+ .is_tlv_append = 1,
+ };
+ struct sk_buff *skb;
+
+ skb = mt7615_mcu_msg_alloc(NULL, MT7615_STA_UPDATE_MAX_SIZE);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ skb_put_data(skb, &hdr, sizeof(hdr));
+
+ return skb;
+}
+
+static struct wtbl_req_hdr *
+mt7615_mcu_alloc_wtbl_req(struct mt7615_sta *msta, int cmd,
+ void *sta_wtbl, struct sk_buff **skb)
+{
+ struct tlv *sta_hdr = sta_wtbl;
+ struct wtbl_req_hdr hdr = {
+ .wlan_idx = msta->wcid.idx,
+ .operation = cmd,
+ };
+ struct sk_buff *nskb = *skb;
+
+ if (!nskb) {
+ nskb = mt7615_mcu_msg_alloc(NULL, MT7615_WTBL_UPDATE_BA_SIZE);
+ if (!nskb)
+ return ERR_PTR(-ENOMEM);
+
+ *skb = nskb;
+ }
+
+ if (sta_hdr)
+ sta_hdr->len = cpu_to_le16(sizeof(hdr));
+
+ return skb_put_data(nskb, &hdr, sizeof(hdr));
+}
+
+static struct tlv *
+mt7615_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
+ void *sta_ntlv, void *sta_wtbl)
+{
+ struct sta_ntlv_hdr *ntlv_hdr = sta_ntlv;
+ struct tlv *sta_hdr = sta_wtbl;
+ struct tlv *ptlv, tlv = {
+ .tag = cpu_to_le16(tag),
+ .len = cpu_to_le16(len),
+ };
+ u16 ntlv;
+
+ ptlv = skb_put(skb, len);
+ memcpy(ptlv, &tlv, sizeof(tlv));
+
+ ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
+ ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
+
+ if (sta_hdr) {
+ u16 size = le16_to_cpu(sta_hdr->len);
+
+ sta_hdr->len = cpu_to_le16(size + len);
+ }
+
+ return ptlv;
+}
+
+static struct tlv *
+mt7615_mcu_add_tlv(struct sk_buff *skb, int tag, int len)
+{
+ return mt7615_mcu_add_nested_tlv(skb, tag, len, skb->data, NULL);
+}
+
+static int
+mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct bss_info_basic *bss;
+ u8 wlan_idx = mvif->sta.wcid.idx;
+ u32 type = NETWORK_INFRA;
+ struct tlv *tlv;
+
+ tlv = mt7615_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
+ if (enable) {
+ struct ieee80211_sta *sta;
+ struct mt7615_sta *msta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (!sta) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ msta = (struct mt7615_sta *)sta->drv_priv;
+ wlan_idx = msta->wcid.idx;
+ rcu_read_unlock();
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = NETWORK_IBSS;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ bss = (struct bss_info_basic *)tlv;
+ memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ bss->network_type = cpu_to_le32(type);
+ bss->dtim_period = vif->bss_conf.dtim_period;
+ bss->bmc_tx_wlan_idx = wlan_idx;
+ bss->wmm_idx = mvif->wmm_idx;
+ bss->active = enable;
+
+ return 0;
+}
+
+static void
+mt7615_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct bss_info_omac *omac;
+ struct tlv *tlv;
+ u32 type = 0;
+ u8 idx;
+
+ tlv = mt7615_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ type = CONNECTION_INFRA_AP;
+ break;
+ case NL80211_IFTYPE_STATION:
+ type = CONNECTION_INFRA_STA;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = CONNECTION_IBSS_ADHOC;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ omac = (struct bss_info_omac *)tlv;
+ idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
+ omac->conn_type = cpu_to_le32(type);
+ omac->omac_idx = mvif->omac_idx;
+ omac->band_idx = mvif->band_idx;
+ omac->hw_bss_idx = idx;
+}
+
+/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
+#define BCN_TX_ESTIMATE_TIME (4096 + 20)
+static void
+mt7615_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7615_vif *mvif)
+{
+ struct bss_info_ext_bss *ext;
+ int ext_bss_idx, tsf_offset;
+ struct tlv *tlv;
+
+ ext_bss_idx = mvif->omac_idx - EXT_BSSID_START;
+ if (ext_bss_idx < 0)
+ return;
+
+ tlv = mt7615_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
+
+ ext = (struct bss_info_ext_bss *)tlv;
+ tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
+ ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
+}
+
+static void
+mt7615_mcu_sta_ba_tlv(struct sk_buff *skb,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx)
+{
+ struct sta_rec_ba *ba;
+ struct tlv *tlv;
+
+ tlv = mt7615_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba));
+
+ ba = (struct sta_rec_ba *)tlv;
+ ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT,
+ ba->winsize = cpu_to_le16(params->buf_size);
+ ba->ssn = cpu_to_le16(params->ssn);
+ ba->ba_en = enable << params->tid;
+ ba->amsdu = params->amsdu;
+ ba->tid = params->tid;
+}
+
+static void
+mt7615_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ struct sta_rec_basic *basic;
+ struct tlv *tlv;
+
+ tlv = mt7615_mcu_add_tlv(skb, STA_REC_BASIC, sizeof(*basic));
+
+ basic = (struct sta_rec_basic *)tlv;
+ basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
+
+ if (enable) {
+ basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
+ basic->conn_state = CONN_STATE_PORT_SECURE;
+ } else {
+ basic->conn_state = CONN_STATE_DISCONNECT;
+ }
+
+ if (!sta) {
+ basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
+ eth_broadcast_addr(basic->peer_addr);
+ return;
+ }
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ basic->conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
+ break;
+ case NL80211_IFTYPE_STATION:
+ basic->conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
+ basic->aid = cpu_to_le16(sta->aid);
+ basic->qos = sta->wme;
+}
+
+static void
+mt7615_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct tlv *tlv;
+
+ if (sta->ht_cap.ht_supported) {
+ struct sta_rec_ht *ht;
+
+ tlv = mt7615_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
+ ht = (struct sta_rec_ht *)tlv;
+ ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+ }
+ if (sta->vht_cap.vht_supported) {
+ struct sta_rec_vht *vht;
+
+ tlv = mt7615_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
+ vht = (struct sta_rec_vht *)tlv;
+ vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
+ vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
+ }
+}
+
+static void
+mt7615_mcu_wtbl_ba_tlv(struct sk_buff *skb,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx, void *sta_wtbl,
+ void *wtbl_tlv)
+{
+ struct wtbl_ba *ba;
+ struct tlv *tlv;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_BA, sizeof(*ba),
+ wtbl_tlv, sta_wtbl);
+
+ ba = (struct wtbl_ba *)tlv;
+ ba->tid = params->tid;
+
+ if (tx) {
+ ba->ba_type = MT_BA_TYPE_ORIGINATOR;
+ ba->sn = enable ? cpu_to_le16(params->ssn) : 0;
+ ba->ba_winsize = cpu_to_le16(params->buf_size);
+ ba->ba_en = enable;
+ } else {
+ memcpy(ba->peer_addr, params->sta->addr, ETH_ALEN);
+ ba->ba_type = MT_BA_TYPE_RECIPIENT;
+ ba->rst_ba_tid = params->tid;
+ ba->rst_ba_sel = RST_BA_MAC_TID_MATCH;
+ ba->rst_ba_sb = 1;
+ }
+
+ if (enable && tx) {
+ u8 ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
+ int i;
+
+ for (i = 7; i > 0; i--) {
+ if (params->buf_size >= ba_range[i])
+ break;
+ }
+ ba->ba_winsize_idx = i;
+ }
+}
+
+static void
+mt7615_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, void *sta_wtbl,
+ void *wtbl_tlv)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct wtbl_generic *generic;
+ struct wtbl_rx *rx;
+ struct tlv *tlv;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_GENERIC, sizeof(*generic),
+ wtbl_tlv, sta_wtbl);
+
+ generic = (struct wtbl_generic *)tlv;
+
+ if (sta) {
+ memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
+ generic->partial_aid = cpu_to_le16(sta->aid);
+ generic->muar_idx = mvif->omac_idx;
+ generic->qos = sta->wme;
+ } else {
+ eth_broadcast_addr(generic->peer_addr);
+ generic->muar_idx = 0xe;
+ }
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_RX, sizeof(*rx),
+ wtbl_tlv, sta_wtbl);
+
+ rx = (struct wtbl_rx *)tlv;
+ rx->rca1 = sta ? vif->type != NL80211_IFTYPE_AP : 1;
+ rx->rca2 = 1;
+ rx->rv = 1;
+}
+
+static void
+mt7615_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv)
+{
+ struct tlv *tlv;
+ u32 flags = 0;
+
+ if (sta->ht_cap.ht_supported) {
+ struct wtbl_ht *ht;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
+ wtbl_tlv, sta_wtbl);
+ ht = (struct wtbl_ht *)tlv;
+ ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
+ ht->af = sta->ht_cap.ampdu_factor;
+ ht->mm = sta->ht_cap.ampdu_density;
+ ht->ht = 1;
+
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ flags |= MT_WTBL_W5_SHORT_GI_20;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ flags |= MT_WTBL_W5_SHORT_GI_40;
+ }
+
+ if (sta->vht_cap.vht_supported) {
+ struct wtbl_vht *vht;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht),
+ wtbl_tlv, sta_wtbl);
+ vht = (struct wtbl_vht *)tlv;
+ vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC,
+ vht->vht = 1;
+
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ flags |= MT_WTBL_W5_SHORT_GI_80;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ flags |= MT_WTBL_W5_SHORT_GI_160;
+ }
+
+ /* wtbl smps */
+ if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) {
+ struct wtbl_smps *smps;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
+ wtbl_tlv, sta_wtbl);
+ smps = (struct wtbl_smps *)tlv;
+ smps->smps = 1;
+ }
+
+ if (sta->ht_cap.ht_supported) {
+ /* sgi */
+ u32 msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
+ MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
+ struct wtbl_raw *raw;
+
+ tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_RAW_DATA,
+ sizeof(*raw), wtbl_tlv,
+ sta_wtbl);
+ raw = (struct wtbl_raw *)tlv;
+ raw->val = cpu_to_le32(flags);
+ raw->msk = cpu_to_le32(~msk);
+ raw->wtbl_idx = 1;
+ raw->dw = 5;
+ }
+}
+
+static int
+mt7615_mcu_add_bss(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct sk_buff *skb;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, NULL);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ if (enable)
+ mt7615_mcu_bss_omac_tlv(skb, vif);
+
+ mt7615_mcu_bss_basic_tlv(skb, vif, enable);
+
+ if (enable && mvif->omac_idx > EXT_BSSID_START)
+ mt7615_mcu_bss_ext_tlv(skb, mvif);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_BSS_INFO_UPDATE, true);
+}
+
+static int
+mt7615_mcu_wtbl_tx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct sk_buff *skb = NULL;
+ int err;
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, NULL, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt7615_mcu_wtbl_ba_tlv(skb, params, enable, true, NULL, wtbl_hdr);
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_WTBL_UPDATE, true);
+ if (err < 0)
+ return err;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_ba_tlv(skb, params, enable, true);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+static int
+mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct sk_buff *skb;
+ int err;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_ba_tlv(skb, params, enable, false);
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+ if (err < 0 || !enable)
+ return err;
+
+ skb = NULL;
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, NULL, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt7615_mcu_wtbl_ba_tlv(skb, params, enable, false, NULL, wtbl_hdr);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_WTBL_UPDATE, true);
+}
+
+static int
+mt7615_mcu_wtbl_sta_add(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct sk_buff *skb, *sskb, *wskb = NULL;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct mt7615_sta *msta;
+ int cmd, err;
+
+ msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
+
+ sskb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(sskb))
+ return PTR_ERR(sskb);
+
+ mt7615_mcu_sta_basic_tlv(sskb, vif, sta, enable);
+ if (enable && sta)
+ mt7615_mcu_sta_ht_tlv(sskb, sta);
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_RESET_AND_SET, NULL,
+ &wskb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ if (enable) {
+ mt7615_mcu_wtbl_generic_tlv(wskb, vif, sta, NULL, wtbl_hdr);
+ if (sta)
+ mt7615_mcu_wtbl_ht_tlv(wskb, sta, NULL, wtbl_hdr);
+ }
+
+ cmd = enable ? MCU_EXT_CMD_WTBL_UPDATE : MCU_EXT_CMD_STA_REC_UPDATE;
+ skb = enable ? wskb : sskb;
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
+ if (err < 0)
+ return err;
+
+ cmd = enable ? MCU_EXT_CMD_STA_REC_UPDATE : MCU_EXT_CMD_WTBL_UPDATE;
+ skb = enable ? sskb : wskb;
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
+}
+
+static const struct mt7615_mcu_ops wtbl_update_ops = {
+ .add_beacon_offload = mt7615_mcu_add_beacon_offload,
+ .set_pm_state = mt7615_mcu_ctrl_pm_state,
+ .add_dev_info = mt7615_mcu_add_dev,
+ .add_bss_info = mt7615_mcu_add_bss,
+ .add_tx_ba = mt7615_mcu_wtbl_tx_ba,
+ .add_rx_ba = mt7615_mcu_wtbl_rx_ba,
+ .sta_add = mt7615_mcu_wtbl_sta_add,
+};
+
+static int
+mt7615_mcu_sta_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable, bool tx)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_ba_tlv(skb, params, enable, tx);
+
+ sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, sta_wtbl, &skb);
+ mt7615_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
+static int
+mt7615_mcu_sta_tx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ return mt7615_mcu_sta_ba(dev, params, enable, true);
+}
+
+static int
+mt7615_mcu_sta_rx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ return mt7615_mcu_sta_ba(dev, params, enable, false);
+}
+
+static int
+mt7615_mcu_add_sta_cmd(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable, int cmd)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct mt7615_sta *msta;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+
+ msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_basic_tlv(skb, vif, sta, enable);
+ if (enable && sta)
+ mt7615_mcu_sta_ht_tlv(skb, sta);
+
+ sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_RESET_AND_SET,
+ sta_wtbl, &skb);
+ if (enable) {
+ mt7615_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
+ if (sta)
+ mt7615_mcu_wtbl_ht_tlv(skb, sta, sta_wtbl, wtbl_hdr);
+ }
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
+}
+
+static int
+mt7615_mcu_add_sta(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ return mt7615_mcu_add_sta_cmd(dev, vif, sta, enable,
+ MCU_EXT_CMD_STA_REC_UPDATE);
+}
+
+static const struct mt7615_mcu_ops sta_update_ops = {
+ .add_beacon_offload = mt7615_mcu_add_beacon_offload,
+ .set_pm_state = mt7615_mcu_ctrl_pm_state,
+ .add_dev_info = mt7615_mcu_add_dev,
+ .add_bss_info = mt7615_mcu_add_bss,
+ .add_tx_ba = mt7615_mcu_sta_tx_ba,
+ .add_rx_ba = mt7615_mcu_sta_rx_ba,
+ .sta_add = mt7615_mcu_add_sta,
+};
+
+static int
+mt7615_mcu_uni_add_dev(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ struct req_hdr {
+ u8 omac_idx;
+ u8 band_idx;
+ __le16 pad;
+ } __packed hdr;
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 pad;
+ u8 omac_addr[ETH_ALEN];
+ } __packed tlv;
+ } data = {
+ .hdr = {
+ .omac_idx = mvif->omac_idx,
+ .band_idx = mvif->band_idx,
+ },
+ .tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(struct req_tlv)),
+ .active = enable,
+ },
+ };
+
+ memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_DEV_INFO_UPDATE,
+ &data, sizeof(data), true);
+}
+
+static int
+mt7615_mcu_uni_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
+{
+ return 0;
+}
+
+static int
+mt7615_mcu_uni_add_bss(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct {
+ struct req_hdr {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct basic_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 omac_idx;
+ u8 hw_bss_idx;
+ u8 band_idx;
+ __le32 conn_type;
+ u8 conn_state;
+ u8 wmm_idx;
+ u8 bssid[ETH_ALEN];
+ __le16 bmc_tx_wlan_idx;
+ __le16 bcn_interval;
+ u8 dtim_period;
+ u8 phymode;
+ __le16 sta_idx;
+ u8 nonht_basic_phy;
+ u8 pad[3];
+ } __packed basic;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .basic = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
+ .len = cpu_to_le16(sizeof(struct basic_tlv)),
+ .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
+ .dtim_period = vif->bss_conf.dtim_period,
+ .omac_idx = mvif->omac_idx,
+ .band_idx = mvif->band_idx,
+ .wmm_idx = mvif->wmm_idx,
+ .active = enable,
+ },
+ };
+ u8 idx, tx_wlan_idx = 0;
+
+ idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
+ req.basic.hw_bss_idx = idx;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
+ tx_wlan_idx = mvif->sta.wcid.idx;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (enable) {
+ struct ieee80211_sta *sta;
+ struct mt7615_sta *msta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (!sta) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ msta = (struct mt7615_sta *)sta->drv_priv;
+ tx_wlan_idx = msta->wcid.idx;
+ rcu_read_unlock();
+ }
+ req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ memcpy(req.basic.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ req.basic.bmc_tx_wlan_idx = cpu_to_le16(tx_wlan_idx);
+ req.basic.sta_idx = cpu_to_le16(tx_wlan_idx);
+ req.basic.conn_state = !enable;
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
+ &req, sizeof(req), true);
+}
+
+static int
+mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
+ struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct ieee80211_mutable_offsets offs;
+ struct {
+ struct req_hdr {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct bcn_content_tlv {
+ __le16 tag;
+ __le16 len;
+ __le16 tim_ie_pos;
+ __le16 csa_ie_pos;
+ __le16 bcc_ie_pos;
+ /* 0: enable beacon offload
+ * 1: disable beacon offload
+ * 2: update probe respond offload
+ */
+ u8 enable;
+ /* 0: legacy format (TXD + payload)
+ * 1: only cap field IE
+ */
+ u8 type;
+ __le16 pkt_len;
+ u8 pkt[512];
+ } __packed beacon_tlv;
+ } req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .beacon_tlv = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT),
+ .len = cpu_to_le16(sizeof(struct bcn_content_tlv)),
+ .enable = enable,
+ },
+ };
+ struct sk_buff *skb;
+
+ skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs);
+ if (!skb)
+ return -EINVAL;
+
+ if (skb->len > 512 - MT_TXD_SIZE) {
+ dev_err(dev->mt76.dev, "beacon size limit exceed\n");
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ mt7615_mac_write_txwi(dev, (__le32 *)(req.beacon_tlv.pkt), skb,
+ wcid, NULL, 0, NULL, true);
+ memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len);
+ req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
+
+ if (offs.csa_counter_offs[0]) {
+ u16 csa_offs;
+
+ csa_offs = MT_TXD_SIZE + offs.csa_counter_offs[0] - 4;
+ req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs);
+ }
+ dev_kfree_skb(skb);
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE,
&req, sizeof(req), true);
}
+static int
+mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+ int err;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, sta_wtbl, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt7615_mcu_wtbl_ba_tlv(skb, params, enable, true, sta_wtbl,
+ wtbl_hdr);
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_STA_REC_UPDATE, true);
+ if (err < 0)
+ return err;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_ba_tlv(skb, params, enable, true);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_STA_REC_UPDATE, true);
+}
+
+static int
+mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+ int err;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7615_mcu_sta_ba_tlv(skb, params, enable, false);
+
+ err = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_STA_REC_UPDATE, true);
+ if (err < 0 || !enable)
+ return err;
+
+ skb = mt7615_mcu_alloc_sta_req(mvif, msta);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+
+ wtbl_hdr = mt7615_mcu_alloc_wtbl_req(msta, WTBL_SET, sta_wtbl, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt7615_mcu_wtbl_ba_tlv(skb, params, enable, false, sta_wtbl,
+ wtbl_hdr);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD_STA_REC_UPDATE, true);
+}
+
+static int
+mt7615_mcu_uni_add_sta(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ return mt7615_mcu_add_sta_cmd(dev, vif, sta, enable,
+ MCU_UNI_CMD_STA_REC_UPDATE);
+}
+
+static const struct mt7615_mcu_ops uni_update_ops = {
+ .add_beacon_offload = mt7615_mcu_uni_add_beacon_offload,
+ .set_pm_state = mt7615_mcu_uni_ctrl_pm_state,
+ .add_dev_info = mt7615_mcu_uni_add_dev,
+ .add_bss_info = mt7615_mcu_uni_add_bss,
+ .add_tx_ba = mt7615_mcu_uni_tx_ba,
+ .add_rx_ba = mt7615_mcu_uni_rx_ba,
+ .sta_add = mt7615_mcu_uni_add_sta,
+};
+
static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data,
int len)
{
@@ -260,14 +1450,16 @@ static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data,
cur_len = min_t(int, 4096 - sizeof(struct mt7615_mcu_txd),
len);
- ret = __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_SCATTER,
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_FW_SCATTER,
data, cur_len, false);
if (ret)
break;
data += cur_len;
len -= cur_len;
- mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
+
+ if (mt76_is_mmio(&dev->mt76))
+ mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
}
return ret;
@@ -284,13 +1476,13 @@ static int mt7615_mcu_start_firmware(struct mt7615_dev *dev, u32 addr,
.addr = cpu_to_le32(addr),
};
- return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ,
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_FW_START_REQ,
&req, sizeof(req), true);
}
static int mt7615_mcu_restart(struct mt76_dev *dev)
{
- return __mt76_mcu_send_msg(dev, -MCU_CMD_RESTART_DL_REQ, NULL,
+ return __mt76_mcu_send_msg(dev, MCU_CMD_RESTART_DL_REQ, NULL,
0, true);
}
@@ -302,7 +1494,7 @@ static int mt7615_mcu_patch_sem_ctrl(struct mt7615_dev *dev, bool get)
.op = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE),
};
- return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_SEM_CONTROL,
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_PATCH_SEM_CONTROL,
&req, sizeof(req), true);
}
@@ -315,23 +1507,59 @@ static int mt7615_mcu_start_patch(struct mt7615_dev *dev)
.check_crc = 0,
};
- return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_FINISH_REQ,
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_PATCH_FINISH_REQ,
&req, sizeof(req), true);
}
+static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
+{
+ if (!is_mt7622(&dev->mt76))
+ return;
+
+ regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC,
+ MT_INFRACFG_MISC_AP2CONN_WAKE,
+ !en * MT_INFRACFG_MISC_AP2CONN_WAKE);
+}
+
static int mt7615_driver_own(struct mt7615_dev *dev)
{
- mt76_wr(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_DRV_OWN);
- if (!mt76_poll_msec(dev, MT_CFG_LPCR_HOST,
- MT_CFG_LPCR_HOST_FW_OWN, 0, 500)) {
+ u32 addr;
+
+ addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
+ mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
+
+ mt7622_trigger_hif_int(dev, true);
+ if (!mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000)) {
dev_err(dev->mt76.dev, "Timeout for driver own\n");
return -EIO;
}
+ mt7622_trigger_hif_int(dev, false);
return 0;
}
-static int mt7615_load_patch(struct mt7615_dev *dev)
+static int mt7615_firmware_own(struct mt7615_dev *dev)
+{
+ u32 addr;
+
+ addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
+ mt7622_trigger_hif_int(dev, true);
+
+ mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN);
+
+ if (is_mt7622(&dev->mt76) &&
+ !mt76_poll_msec(dev, MT_CFG_LPCR_HOST,
+ MT_CFG_LPCR_HOST_FW_OWN,
+ MT_CFG_LPCR_HOST_FW_OWN, 3000)) {
+ dev_err(dev->mt76.dev, "Timeout for firmware own\n");
+ return -EIO;
+ }
+ mt7622_trigger_hif_int(dev, false);
+
+ return 0;
+}
+
+static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
{
const struct mt7615_patch_hdr *hdr;
const struct firmware *fw = NULL;
@@ -348,7 +1576,7 @@ static int mt7615_load_patch(struct mt7615_dev *dev)
return -EAGAIN;
}
- ret = request_firmware(&fw, MT7615_ROM_PATCH, dev->mt76.dev);
+ ret = request_firmware(&fw, name, dev->mt76.dev);
if (ret)
goto out;
@@ -365,8 +1593,7 @@ static int mt7615_load_patch(struct mt7615_dev *dev)
len = fw->size - sizeof(*hdr);
- ret = mt7615_mcu_init_download(dev, MCU_PATCH_ADDRESS, len,
- DL_MODE_NEED_RSP);
+ ret = mt7615_mcu_init_download(dev, addr, len, DL_MODE_NEED_RSP);
if (ret) {
dev_err(dev->mt76.dev, "Download request failed\n");
goto out;
@@ -444,13 +1671,13 @@ mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
return 0;
}
-static int mt7615_load_ram(struct mt7615_dev *dev)
+static int mt7615_load_n9(struct mt7615_dev *dev, const char *name)
{
const struct mt7615_fw_trailer *hdr;
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, MT7615_FIRMWARE_N9, dev->mt76.dev);
+ ret = request_firmware(&fw, name, dev->mt76.dev);
if (ret)
return ret;
@@ -477,9 +1704,30 @@ static int mt7615_load_ram(struct mt7615_dev *dev)
goto out;
}
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+
+ if (!strncmp(hdr->fw_ver, "2.0", sizeof(hdr->fw_ver))) {
+ dev->fw_ver = MT7615_FIRMWARE_V2;
+ dev->mcu_ops = &sta_update_ops;
+ } else {
+ dev->fw_ver = MT7615_FIRMWARE_V1;
+ dev->mcu_ops = &wtbl_update_ops;
+ }
+
+out:
release_firmware(fw);
+ return ret;
+}
- ret = request_firmware(&fw, MT7615_FIRMWARE_CR4, dev->mt76.dev);
+static int mt7615_load_cr4(struct mt7615_dev *dev, const char *name)
+{
+ const struct mt7615_fw_trailer *hdr;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, name, dev->mt76.dev);
if (ret)
return ret;
@@ -500,8 +1748,10 @@ static int mt7615_load_ram(struct mt7615_dev *dev)
goto out;
ret = mt7615_mcu_start_firmware(dev, 0, FW_START_WORKING_PDA_CR4);
- if (ret)
+ if (ret) {
dev_err(dev->mt76.dev, "Failed to start CR4 firmware\n");
+ goto out;
+ }
out:
release_firmware(fw);
@@ -509,6 +1759,17 @@ out:
return ret;
}
+static int mt7615_load_ram(struct mt7615_dev *dev)
+{
+ int ret;
+
+ ret = mt7615_load_n9(dev, MT7615_FIRMWARE_N9);
+ if (ret)
+ return ret;
+
+ return mt7615_load_cr4(dev, MT7615_FIRMWARE_CR4);
+}
+
static int mt7615_load_firmware(struct mt7615_dev *dev)
{
int ret;
@@ -521,7 +1782,7 @@ static int mt7615_load_firmware(struct mt7615_dev *dev)
return -EIO;
}
- ret = mt7615_load_patch(dev);
+ ret = mt7615_load_patch(dev, MT7615_PATCH_ADDRESS, MT7615_ROM_PATCH);
if (ret)
return ret;
@@ -536,7 +1797,164 @@ static int mt7615_load_firmware(struct mt7615_dev *dev)
return -EIO;
}
- mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
+ return 0;
+}
+
+static int mt7622_load_firmware(struct mt7615_dev *dev)
+{
+ int ret;
+ u32 val;
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
+
+ val = mt76_get_field(dev, MT_TOP_OFF_RSV, MT_TOP_OFF_RSV_FW_STATE);
+ if (val != FW_STATE_FW_DOWNLOAD) {
+ dev_err(dev->mt76.dev, "Firmware is not ready for download\n");
+ return -EIO;
+ }
+
+ ret = mt7615_load_patch(dev, MT7622_PATCH_ADDRESS, MT7622_ROM_PATCH);
+ if (ret)
+ return ret;
+
+ ret = mt7615_load_n9(dev, MT7622_FIRMWARE_N9);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_TOP_OFF_RSV, MT_TOP_OFF_RSV_FW_STATE,
+ FIELD_PREP(MT_TOP_OFF_RSV_FW_STATE,
+ FW_STATE_NORMAL_TRX), 1500)) {
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
+
+ return 0;
+}
+
+int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl)
+{
+ struct {
+ u8 ctrl_val;
+ u8 pad[3];
+ } data = {
+ .ctrl_val = ctrl
+ };
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_LOG_2_HOST,
+ &data, sizeof(data), true);
+}
+
+static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
+{
+ u32 offset = 0, override_addr = 0, flag = 0;
+ const struct mt7663_fw_trailer *hdr;
+ const struct mt7663_fw_buf *buf;
+ const struct firmware *fw;
+ const u8 *base_addr;
+ int i, ret;
+
+ ret = request_firmware(&fw, name, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < FW_V3_COMMON_TAILER_SIZE) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7663_fw_trailer *)(fw->data + fw->size -
+ FW_V3_COMMON_TAILER_SIZE);
+
+ dev_info(dev->mt76.dev, "N9 Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+ dev_info(dev->mt76.dev, "Region number: 0x%x\n", hdr->n_region);
+
+ base_addr = fw->data + fw->size - FW_V3_COMMON_TAILER_SIZE;
+ for (i = 0; i < hdr->n_region; i++) {
+ u32 shift = (hdr->n_region - i) * FW_V3_REGION_TAILER_SIZE;
+ u32 len, addr, mode;
+
+ dev_info(dev->mt76.dev, "Parsing tailer Region: %d\n", i);
+
+ buf = (const struct mt7663_fw_buf *)(base_addr - shift);
+ mode = mt7615_mcu_gen_dl_mode(buf->feature_set, false);
+ addr = le32_to_cpu(buf->img_dest_addr);
+ len = le32_to_cpu(buf->img_size);
+
+ ret = mt7615_mcu_init_download(dev, addr, len, mode);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = mt7615_mcu_send_firmware(dev, fw->data + offset, len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to send firmware\n");
+ goto out;
+ }
+
+ offset += le32_to_cpu(buf->img_size);
+ if (buf->feature_set & DL_MODE_VALID_RAM_ENTRY) {
+ override_addr = le32_to_cpu(buf->img_dest_addr);
+ dev_info(dev->mt76.dev, "Region %d, override_addr = 0x%08x\n",
+ i, override_addr);
+ }
+ }
+
+ if (is_mt7663(&dev->mt76)) {
+ flag |= FW_START_DLYCAL;
+ if (override_addr)
+ flag |= FW_START_OVERRIDE;
+
+ dev_info(dev->mt76.dev, "override_addr = 0x%08x, option = %d\n",
+ override_addr, flag);
+ }
+
+ ret = mt7615_mcu_start_firmware(dev, override_addr, flag);
+ if (ret)
+ dev_err(dev->mt76.dev, "Failed to start N9 firmware\n");
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int mt7663_load_firmware(struct mt7615_dev *dev)
+{
+ int ret;
+
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
+
+ ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
+ if (ret) {
+ dev_dbg(dev->mt76.dev, "Firmware is already download\n");
+ return -EIO;
+ }
+
+ ret = mt7615_load_patch(dev, MT7663_PATCH_ADDRESS, MT7663_ROM_PATCH);
+ if (ret)
+ return ret;
+
+ dev->fw_ver = MT7615_FIRMWARE_V3;
+ dev->mcu_ops = &uni_update_ops;
+
+ ret = mt7663_load_n9(dev, MT7663_FIRMWARE_N9);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY,
+ MT_TOP_MISC2_FW_N9_RDY, 1500)) {
+ ret = mt76_get_field(dev, MT_CONN_ON_MISC,
+ MT7663_TOP_MISC2_FW_STATE);
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+
+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_BYPASS_TX_SCH);
dev_dbg(dev->mt76.dev, "Firmware init done\n");
@@ -546,6 +1964,7 @@ static int mt7615_load_firmware(struct mt7615_dev *dev)
int mt7615_mcu_init(struct mt7615_dev *dev)
{
static const struct mt76_mcu_ops mt7615_mcu_ops = {
+ .mcu_skb_send_msg = mt7615_mcu_send_message,
.mcu_send_msg = mt7615_mcu_msg_send,
.mcu_restart = mt7615_mcu_restart,
};
@@ -557,11 +1976,24 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
if (ret)
return ret;
- ret = mt7615_load_firmware(dev);
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7622:
+ ret = mt7622_load_firmware(dev);
+ break;
+ case 0x7663:
+ ret = mt7663_load_firmware(dev);
+ break;
+ default:
+ ret = mt7615_load_firmware(dev);
+ break;
+ }
if (ret)
return ret;
- set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+ mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false);
+ dev_dbg(dev->mt76.dev, "Firmware init done\n");
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ mt7615_mcu_fw_log_2_host(dev, 0);
return 0;
}
@@ -569,55 +2001,70 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
void mt7615_mcu_exit(struct mt7615_dev *dev)
{
__mt76_mcu_restart(&dev->mt76);
- mt76_wr(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_FW_OWN);
- skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
+ mt7615_firmware_own(dev);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
}
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
{
struct {
u8 buffer_mode;
- u8 pad;
- u16 len;
+ u8 content_format;
+ __le16 len;
} __packed req_hdr = {
.buffer_mode = 1,
- .len = __MT_EE_MAX - MT_EE_NIC_CONF_0,
};
- int ret, len = sizeof(req_hdr) + __MT_EE_MAX - MT_EE_NIC_CONF_0;
- u8 *req, *eep = (u8 *)dev->mt76.eeprom.data;
+ u8 *eep = (u8 *)dev->mt76.eeprom.data;
+ struct sk_buff *skb;
+ int eep_len, offset;
- req = kzalloc(len, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7622:
+ eep_len = MT7622_EE_MAX - MT_EE_NIC_CONF_0;
+ offset = MT_EE_NIC_CONF_0;
+ break;
+ case 0x7663:
+ eep_len = MT7663_EE_MAX - MT_EE_CHIP_ID;
+ req_hdr.content_format = 1;
+ offset = MT_EE_CHIP_ID;
+ break;
+ default:
+ eep_len = MT7615_EE_MAX - MT_EE_NIC_CONF_0;
+ offset = MT_EE_NIC_CONF_0;
+ break;
+ }
- memcpy(req, &req_hdr, sizeof(req_hdr));
- memcpy(req + sizeof(req_hdr), eep + MT_EE_NIC_CONF_0,
- __MT_EE_MAX - MT_EE_NIC_CONF_0);
+ req_hdr.len = cpu_to_le16(eep_len);
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
- req, len, true);
- kfree(req);
+ skb = mt7615_mcu_msg_alloc(NULL, sizeof(req_hdr) + eep_len);
+ if (!skb)
+ return -ENOMEM;
- return ret;
+ skb_put_data(skb, &req_hdr, sizeof(req_hdr));
+ skb_put_data(skb, eep + offset, eep_len);
+
+ return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD_EFUSE_BUFFER_MODE, true);
}
-int mt7615_mcu_init_mac(struct mt7615_dev *dev)
+int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable)
{
struct {
u8 enable;
u8 band;
u8 rsv[2];
} __packed req = {
- .enable = 1,
- .band = 0,
+ .enable = enable,
+ .band = band,
};
return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MAC_INIT_CTRL,
&req, sizeof(req), true);
}
-int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val)
+int mt7615_mcu_set_rts_thresh(struct mt7615_phy *phy, u32 val)
{
+ struct mt7615_dev *dev = phy->dev;
struct {
u8 prot_idx;
u8 band;
@@ -626,7 +2073,7 @@ int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val)
__le32 pkt_thresh;
} __packed req = {
.prot_idx = 1,
- .band = 0,
+ .band = phy != &dev->phy,
.len_thresh = cpu_to_le32(val),
.pkt_thresh = cpu_to_le32(0x2),
};
@@ -672,329 +2119,62 @@ int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
&req, sizeof(req), true);
}
-int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter)
+int mt7615_mcu_set_dbdc(struct mt7615_dev *dev)
{
-#define ENTER_PM_STATE 1
-#define EXIT_PM_STATE 2
- struct {
- u8 pm_number;
- u8 pm_state;
- u8 bssid[ETH_ALEN];
- u8 dtim_period;
- u8 wlan_idx;
- __le16 bcn_interval;
- __le32 aid;
- __le32 rx_filter;
- u8 band_idx;
- u8 rsv[3];
- __le32 feature;
- u8 omac_idx;
- u8 wmm_idx;
- u8 bcn_loss_cnt;
- u8 bcn_sp_duration;
- } __packed req = {
- .pm_number = 5,
- .pm_state = (enter) ? ENTER_PM_STATE : EXIT_PM_STATE,
- .band_idx = 0,
+ struct mt7615_phy *ext_phy = mt7615_ext_phy(dev);
+ struct dbdc_entry {
+ u8 type;
+ u8 index;
+ u8 band;
+ u8 _rsv;
};
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL,
- &req, sizeof(req), true);
-}
-
-int mt7615_mcu_set_dev_info(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, bool enable)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct {
- struct req_hdr {
- u8 omac_idx;
- u8 band_idx;
- __le16 tlv_num;
- u8 is_tlv_append;
- u8 rsv[3];
- } __packed hdr;
- struct req_tlv {
- __le16 tag;
- __le16 len;
- u8 active;
- u8 band_idx;
- u8 omac_addr[ETH_ALEN];
- } __packed tlv;
- } data = {
- .hdr = {
- .omac_idx = mvif->omac_idx,
- .band_idx = mvif->band_idx,
- .tlv_num = cpu_to_le16(1),
- .is_tlv_append = 1,
- },
- .tlv = {
- .tag = cpu_to_le16(DEV_INFO_ACTIVE),
- .len = cpu_to_le16(sizeof(struct req_tlv)),
- .active = enable,
- .band_idx = mvif->band_idx,
- },
+ u8 enable;
+ u8 num;
+ u8 _rsv[2];
+ struct dbdc_entry entry[64];
+ } req = {
+ .enable = !!ext_phy,
};
+ int i;
- memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE,
- &data, sizeof(data), true);
-}
-
-static void
-mt7615_mcu_bss_info_omac_header(struct mt7615_vif *mvif, u8 *data,
- u32 conn_type)
-{
- struct bss_info_omac *hdr = (struct bss_info_omac *)data;
- u8 idx;
-
- idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
- hdr->tag = cpu_to_le16(BSS_INFO_OMAC);
- hdr->len = cpu_to_le16(sizeof(struct bss_info_omac));
- hdr->hw_bss_idx = idx;
- hdr->omac_idx = mvif->omac_idx;
- hdr->band_idx = mvif->band_idx;
- hdr->conn_type = cpu_to_le32(conn_type);
-}
-
-static void
-mt7615_mcu_bss_info_basic_header(struct ieee80211_vif *vif, u8 *data,
- u32 net_type, u8 tx_wlan_idx,
- bool enable)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct bss_info_basic *hdr = (struct bss_info_basic *)data;
-
- hdr->tag = cpu_to_le16(BSS_INFO_BASIC);
- hdr->len = cpu_to_le16(sizeof(struct bss_info_basic));
- hdr->network_type = cpu_to_le32(net_type);
- hdr->active = enable;
- hdr->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
- memcpy(hdr->bssid, vif->bss_conf.bssid, ETH_ALEN);
- hdr->wmm_idx = mvif->wmm_idx;
- hdr->dtim_period = vif->bss_conf.dtim_period;
- hdr->bmc_tx_wlan_idx = tx_wlan_idx;
-}
-
-static void
-mt7615_mcu_bss_info_ext_header(struct mt7615_vif *mvif, u8 *data)
-{
-/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
-#define BCN_TX_ESTIMATE_TIME (4096 + 20)
- struct bss_info_ext_bss *hdr = (struct bss_info_ext_bss *)data;
- int ext_bss_idx, tsf_offset;
-
- ext_bss_idx = mvif->omac_idx - EXT_BSSID_START;
- if (ext_bss_idx < 0)
- return;
-
- hdr->tag = cpu_to_le16(BSS_INFO_EXT_BSS);
- hdr->len = cpu_to_le16(sizeof(struct bss_info_ext_bss));
- tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
- hdr->mbss_tsf_offset = cpu_to_le32(tsf_offset);
-}
-
-int mt7615_mcu_set_bss_info(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, int en)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct req_hdr {
- u8 bss_idx;
- u8 rsv0;
- __le16 tlv_num;
- u8 is_tlv_append;
- u8 rsv1[3];
- } __packed;
- int len = sizeof(struct req_hdr) + sizeof(struct bss_info_basic);
- int ret, i, features = BIT(BSS_INFO_BASIC), ntlv = 1;
- u32 conn_type = 0, net_type = NETWORK_INFRA;
- u8 *buf, *data, tx_wlan_idx = 0;
- struct req_hdr *hdr;
-
- if (en) {
- len += sizeof(struct bss_info_omac);
- features |= BIT(BSS_INFO_OMAC);
- if (mvif->omac_idx > EXT_BSSID_START) {
- len += sizeof(struct bss_info_ext_bss);
- features |= BIT(BSS_INFO_EXT_BSS);
- ntlv++;
- }
- ntlv++;
- }
-
- switch (vif->type) {
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MESH_POINT:
- tx_wlan_idx = mvif->sta.wcid.idx;
- conn_type = CONNECTION_INFRA_AP;
- break;
- case NL80211_IFTYPE_STATION: {
- /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
- if (en) {
- struct ieee80211_sta *sta;
- struct mt7615_sta *msta;
-
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
- if (!sta) {
- rcu_read_unlock();
- return -EINVAL;
- }
-
- msta = (struct mt7615_sta *)sta->drv_priv;
- tx_wlan_idx = msta->wcid.idx;
- rcu_read_unlock();
- }
- conn_type = CONNECTION_INFRA_STA;
- break;
- }
- case NL80211_IFTYPE_ADHOC:
- conn_type = CONNECTION_IBSS_ADHOC;
- tx_wlan_idx = mvif->sta.wcid.idx;
- net_type = NETWORK_IBSS;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- buf = kzalloc(len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ if (!ext_phy)
+ goto out;
- hdr = (struct req_hdr *)buf;
- hdr->bss_idx = mvif->idx;
- hdr->tlv_num = cpu_to_le16(ntlv);
- hdr->is_tlv_append = 1;
+#define ADD_DBDC_ENTRY(_type, _idx, _band) \
+ do { \
+ req.entry[req.num].type = _type; \
+ req.entry[req.num].index = _idx; \
+ req.entry[req.num++].band = _band; \
+ } while (0)
- data = buf + sizeof(*hdr);
- for (i = 0; i < BSS_INFO_MAX_NUM; i++) {
- int tag = ffs(features & BIT(i)) - 1;
+ for (i = 0; i < 4; i++) {
+ bool band = !!(ext_phy->omac_mask & BIT(i));
- switch (tag) {
- case BSS_INFO_OMAC:
- mt7615_mcu_bss_info_omac_header(mvif, data,
- conn_type);
- data += sizeof(struct bss_info_omac);
- break;
- case BSS_INFO_BASIC:
- mt7615_mcu_bss_info_basic_header(vif, data, net_type,
- tx_wlan_idx, en);
- data += sizeof(struct bss_info_basic);
- break;
- case BSS_INFO_EXT_BSS:
- mt7615_mcu_bss_info_ext_header(mvif, data);
- data += sizeof(struct bss_info_ext_bss);
- break;
- default:
- break;
- }
+ ADD_DBDC_ENTRY(DBDC_TYPE_BSS, i, band);
}
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BSS_INFO_UPDATE,
- buf, len, true);
- kfree(buf);
-
- return ret;
-}
-
-static int
-mt7615_mcu_add_wtbl_bmc(struct mt7615_dev *dev,
- struct mt7615_vif *mvif)
-{
- struct {
- struct wtbl_req_hdr hdr;
- struct wtbl_generic g_wtbl;
- struct wtbl_rx rx_wtbl;
- } req = {
- .hdr = {
- .wlan_idx = mvif->sta.wcid.idx,
- .operation = WTBL_RESET_AND_SET,
- .tlv_num = cpu_to_le16(2),
- },
- .g_wtbl = {
- .tag = cpu_to_le16(WTBL_GENERIC),
- .len = cpu_to_le16(sizeof(struct wtbl_generic)),
- .muar_idx = 0xe,
- },
- .rx_wtbl = {
- .tag = cpu_to_le16(WTBL_RX),
- .len = cpu_to_le16(sizeof(struct wtbl_rx)),
- .rca1 = 1,
- .rca2 = 1,
- .rv = 1,
- },
- };
- eth_broadcast_addr(req.g_wtbl.peer_addr);
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- &req, sizeof(req), true);
-}
-
-int mt7615_mcu_wtbl_bmc(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, bool enable)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ for (i = 0; i < 14; i++) {
+ bool band = !!(ext_phy->omac_mask & BIT(0x11 + i));
- if (!enable) {
- struct wtbl_req_hdr req = {
- .wlan_idx = mvif->sta.wcid.idx,
- .operation = WTBL_RESET_AND_SET,
- };
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- &req, sizeof(req), true);
+ ADD_DBDC_ENTRY(DBDC_TYPE_MBSS, i, band);
}
- return mt7615_mcu_add_wtbl_bmc(dev, mvif);
-}
+ ADD_DBDC_ENTRY(DBDC_TYPE_MU, 0, 1);
-int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- struct {
- struct wtbl_req_hdr hdr;
- struct wtbl_generic g_wtbl;
- struct wtbl_rx rx_wtbl;
- } req = {
- .hdr = {
- .wlan_idx = msta->wcid.idx,
- .operation = WTBL_RESET_AND_SET,
- .tlv_num = cpu_to_le16(2),
- },
- .g_wtbl = {
- .tag = cpu_to_le16(WTBL_GENERIC),
- .len = cpu_to_le16(sizeof(struct wtbl_generic)),
- .muar_idx = mvif->omac_idx,
- .qos = sta->wme,
- .partial_aid = cpu_to_le16(sta->aid),
- },
- .rx_wtbl = {
- .tag = cpu_to_le16(WTBL_RX),
- .len = cpu_to_le16(sizeof(struct wtbl_rx)),
- .rca1 = vif->type != NL80211_IFTYPE_AP,
- .rca2 = 1,
- .rv = 1,
- },
- };
- memcpy(req.g_wtbl.peer_addr, sta->addr, ETH_ALEN);
+ for (i = 0; i < 3; i++)
+ ADD_DBDC_ENTRY(DBDC_TYPE_BF, i, 1);
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- &req, sizeof(req), true);
-}
+ ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 0, 0);
+ ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 1, 0);
+ ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 2, 1);
+ ADD_DBDC_ENTRY(DBDC_TYPE_WMM, 3, 1);
-int mt7615_mcu_del_wtbl(struct mt7615_dev *dev,
- struct ieee80211_sta *sta)
-{
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- struct wtbl_req_hdr req = {
- .wlan_idx = msta->wcid.idx,
- .operation = WTBL_RESET_AND_SET,
- };
+ ADD_DBDC_ENTRY(DBDC_TYPE_MGMT, 0, 0);
+ ADD_DBDC_ENTRY(DBDC_TYPE_MGMT, 1, 1);
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
+out:
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DBDC_CTRL,
&req, sizeof(req), true);
}
@@ -1008,238 +2188,72 @@ int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
&req, sizeof(req), true);
}
-int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, bool en)
+int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
+ enum mt7615_rdd_cmd cmd, u8 index,
+ u8 rx_sel, u8 val)
{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct {
- struct sta_req_hdr hdr;
- struct sta_rec_basic basic;
+ u8 ctrl;
+ u8 rdd_idx;
+ u8 rdd_rx_sel;
+ u8 val;
+ u8 rsv[4];
} req = {
- .hdr = {
- .bss_idx = mvif->idx,
- .wlan_idx = mvif->sta.wcid.idx,
- .tlv_num = cpu_to_le16(1),
- .is_tlv_append = 1,
- .muar_idx = mvif->omac_idx,
- },
- .basic = {
- .tag = cpu_to_le16(STA_REC_BASIC),
- .len = cpu_to_le16(sizeof(struct sta_rec_basic)),
- .conn_type = cpu_to_le32(CONNECTION_INFRA_BC),
- },
+ .ctrl = cmd,
+ .rdd_idx = index,
+ .rdd_rx_sel = rx_sel,
+ .val = val,
};
- eth_broadcast_addr(req.basic.peer_addr);
-
- if (en) {
- req.basic.conn_state = CONN_STATE_PORT_SECURE;
- req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER |
- EXTRA_INFO_NEW);
- } else {
- req.basic.conn_state = CONN_STATE_DISCONNECT;
- req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
- }
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL,
&req, sizeof(req), true);
}
-int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool en)
+int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val)
{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
-
struct {
- struct sta_req_hdr hdr;
- struct sta_rec_basic basic;
+ u16 tag;
+ u16 min_lpn;
} req = {
- .hdr = {
- .bss_idx = mvif->idx,
- .wlan_idx = msta->wcid.idx,
- .tlv_num = cpu_to_le16(1),
- .is_tlv_append = 1,
- .muar_idx = mvif->omac_idx,
- },
- .basic = {
- .tag = cpu_to_le16(STA_REC_BASIC),
- .len = cpu_to_le16(sizeof(struct sta_rec_basic)),
- .qos = sta->wme,
- .aid = cpu_to_le16(sta->aid),
- },
+ .tag = 0x1,
+ .min_lpn = val,
};
- memcpy(req.basic.peer_addr, sta->addr, ETH_ALEN);
- switch (vif->type) {
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MESH_POINT:
- req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
- break;
- case NL80211_IFTYPE_STATION:
- req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
- break;
- case NL80211_IFTYPE_ADHOC:
- req.basic.conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- if (en) {
- req.basic.conn_state = CONN_STATE_PORT_SECURE;
- req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER |
- EXTRA_INFO_NEW);
- } else {
- req.basic.conn_state = CONN_STATE_DISCONNECT;
- req.basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
- }
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
&req, sizeof(req), true);
}
-int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- int en)
+int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
+ const struct mt7615_dfs_pulse *pulse)
{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct mt76_wcid *wcid = &dev->mt76.global_wcid;
- struct ieee80211_mutable_offsets offs;
- struct req {
- u8 omac_idx;
- u8 enable;
- u8 wlan_idx;
- u8 band_idx;
- u8 pkt_type;
- u8 need_pre_tbtt_int;
- __le16 csa_ie_pos;
- __le16 pkt_len;
- __le16 tim_ie_pos;
- u8 pkt[512];
- u8 csa_cnt;
- /* bss color change */
- u8 bcc_cnt;
- __le16 bcc_ie_pos;
- } __packed req = {
- .omac_idx = mvif->omac_idx,
- .enable = en,
- .wlan_idx = wcid->idx,
- .band_idx = mvif->band_idx,
- };
- struct sk_buff *skb;
-
- skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs);
- if (!skb)
- return -EINVAL;
-
- if (skb->len > 512 - MT_TXD_SIZE) {
- dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
- dev_kfree_skb(skb);
- return -EINVAL;
- }
-
- mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
- 0, NULL);
- memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
- req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
- req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
- if (offs.csa_counter_offs[0]) {
- u16 csa_offs;
-
- csa_offs = MT_TXD_SIZE + offs.csa_counter_offs[0] - 4;
- req.csa_ie_pos = cpu_to_le16(csa_offs);
- req.csa_cnt = skb->data[offs.csa_counter_offs[0]];
- }
- dev_kfree_skb(skb);
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BCN_OFFLOAD,
- &req, sizeof(req), true);
-}
-
-int mt7615_mcu_set_tx_power(struct mt7615_dev *dev)
-{
- int i, ret, n_chains = hweight8(dev->mt76.antenna_mask);
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
- int freq = chandef->center_freq1, len, target_chains;
- u8 *req, *data, *eep = (u8 *)dev->mt76.eeprom.data;
- enum nl80211_band band = chandef->chan->band;
- struct ieee80211_hw *hw = mt76_hw(dev);
struct {
- u8 center_chan;
- u8 dbdc_idx;
- u8 band;
- u8 rsv;
- } __packed req_hdr = {
- .center_chan = ieee80211_frequency_to_channel(freq),
- .band = band,
+ u16 tag;
+ struct mt7615_dfs_pulse pulse;
+ } req = {
+ .tag = 0x3,
};
- s8 tx_power;
-
- len = sizeof(req_hdr) + __MT_EE_MAX - MT_EE_NIC_CONF_0;
- req = kzalloc(len, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
- memcpy(req, &req_hdr, sizeof(req_hdr));
- data = req + sizeof(req_hdr);
- memcpy(data, eep + MT_EE_NIC_CONF_0,
- __MT_EE_MAX - MT_EE_NIC_CONF_0);
+ memcpy(&req.pulse, pulse, sizeof(*pulse));
- tx_power = hw->conf.power_level * 2;
- switch (n_chains) {
- case 4:
- tx_power -= 12;
- break;
- case 3:
- tx_power -= 8;
- break;
- case 2:
- tx_power -= 6;
- break;
- default:
- break;
- }
- tx_power = max_t(s8, tx_power, 0);
- dev->mt76.txpower_cur = tx_power;
-
- target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
- for (i = 0; i < target_chains; i++) {
- int index = -MT_EE_NIC_CONF_0;
-
- ret = mt7615_eeprom_get_power_index(dev, chandef->chan, i);
- if (ret < 0)
- goto out;
-
- index += ret;
- data[index] = min_t(u8, data[index], tx_power);
- }
-
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_TX_POWER_CTRL,
- req, len, true);
-out:
- kfree(req);
-
- return ret;
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
+ &req, sizeof(req), true);
}
-int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
- enum mt7615_rdd_cmd cmd, u8 index,
- u8 rx_sel, u8 val)
+int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index,
+ const struct mt7615_dfs_pattern *pattern)
{
struct {
- u8 ctrl;
- u8 rdd_idx;
- u8 rdd_rx_sel;
- u8 val;
- u8 rsv[4];
+ u16 tag;
+ u16 radar_type;
+ struct mt7615_dfs_pattern pattern;
} req = {
- .ctrl = cmd,
- .rdd_idx = index,
- .rdd_rx_sel = rx_sel,
- .val = val,
+ .tag = 0x2,
+ .radar_type = index,
};
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL,
+ memcpy(&req.pattern, pattern, sizeof(*pattern));
+
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH,
&req, sizeof(req), true);
}
@@ -1274,9 +2288,35 @@ int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev)
&req, sizeof(req), false);
}
-int mt7615_mcu_set_channel(struct mt7615_dev *dev)
+static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_hw *hw = mphy->hw;
+ int n_chains = hweight8(mphy->antenna_mask);
+ int tx_power;
+ int i;
+
+ tx_power = hw->conf.power_level * 2 -
+ mt76_tx_power_nss_delta(n_chains);
+ mphy->txpower_cur = tx_power;
+
+ for (i = 0; i < MT_SKU_1SS_DELTA; i++)
+ sku[i] = tx_power;
+
+ for (i = 0; i < 4; i++) {
+ int delta = 0;
+
+ if (i < n_chains - 1)
+ delta = mt76_tx_power_nss_delta(n_chains) -
+ mt76_tx_power_nss_delta(i + 1);
+ sku[MT_SKU_1SS_DELTA + i] = delta;
+ }
+}
+
+int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
+{
+ struct mt7615_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
struct {
u8 control_chan;
@@ -1299,11 +2339,10 @@ int mt7615_mcu_set_channel(struct mt7615_dev *dev)
} req = {
.control_chan = chandef->chan->hw_value,
.center_chan = ieee80211_frequency_to_channel(freq1),
- .tx_streams = (dev->mt76.chainmask >> 8) & 0xf,
- .rx_streams_mask = dev->mt76.antenna_mask,
+ .tx_streams = hweight8(phy->mt76->antenna_mask),
+ .rx_streams_mask = phy->chainmask,
.center_chan2 = ieee80211_frequency_to_channel(freq2),
};
- int ret;
if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
@@ -1313,7 +2352,9 @@ int mt7615_mcu_set_channel(struct mt7615_dev *dev)
else
req.switch_reason = CH_SWITCH_NORMAL;
- switch (dev->mt76.chandef.width) {
+ req.band_idx = phy != &dev->phy;
+
+ switch (chandef->width) {
case NL80211_CHAN_WIDTH_40:
req.bw = CMD_CBW_40MHZ;
break;
@@ -1338,278 +2379,39 @@ int mt7615_mcu_set_channel(struct mt7615_dev *dev)
req.bw = CMD_CBW_20MHZ;
break;
}
- memset(req.txpower_sku, 0x3f, 49);
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CHANNEL_SWITCH,
- &req, sizeof(req), true);
- if (ret)
- return ret;
+ mt7615_mcu_set_txpower_sku(phy, req.txpower_sku);
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RX_PATH,
- &req, sizeof(req), true);
+ return __mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
}
-int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- struct wtbl_req_hdr *wtbl_hdr;
- struct sta_req_hdr *sta_hdr;
- struct wtbl_raw *wtbl_raw;
- struct sta_rec_ht *sta_ht;
- struct wtbl_ht *wtbl_ht;
- int buf_len, ret, ntlv = 2;
- u32 msk, val = 0;
- u8 *buf;
-
- buf = kzalloc(MT7615_WTBL_UPDATE_MAX_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- wtbl_hdr = (struct wtbl_req_hdr *)buf;
- wtbl_hdr->wlan_idx = msta->wcid.idx;
- wtbl_hdr->operation = WTBL_SET;
- buf_len = sizeof(*wtbl_hdr);
-
- /* ht basic */
- wtbl_ht = (struct wtbl_ht *)(buf + buf_len);
- wtbl_ht->tag = cpu_to_le16(WTBL_HT);
- wtbl_ht->len = cpu_to_le16(sizeof(*wtbl_ht));
- wtbl_ht->ht = 1;
- wtbl_ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
- wtbl_ht->af = sta->ht_cap.ampdu_factor;
- wtbl_ht->mm = sta->ht_cap.ampdu_density;
- buf_len += sizeof(*wtbl_ht);
-
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
- val |= MT_WTBL_W5_SHORT_GI_20;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
- val |= MT_WTBL_W5_SHORT_GI_40;
-
- /* vht basic */
- if (sta->vht_cap.vht_supported) {
- struct wtbl_vht *wtbl_vht;
-
- wtbl_vht = (struct wtbl_vht *)(buf + buf_len);
- buf_len += sizeof(*wtbl_vht);
- wtbl_vht->tag = cpu_to_le16(WTBL_VHT);
- wtbl_vht->len = cpu_to_le16(sizeof(*wtbl_vht));
- wtbl_vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC;
- wtbl_vht->vht = 1;
- ntlv++;
-
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
- val |= MT_WTBL_W5_SHORT_GI_80;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
- val |= MT_WTBL_W5_SHORT_GI_160;
- }
-
- /* smps */
- if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) {
- struct wtbl_smps *wtbl_smps;
-
- wtbl_smps = (struct wtbl_smps *)(buf + buf_len);
- buf_len += sizeof(*wtbl_smps);
- wtbl_smps->tag = cpu_to_le16(WTBL_SMPS);
- wtbl_smps->len = cpu_to_le16(sizeof(*wtbl_smps));
- wtbl_smps->smps = 1;
- ntlv++;
- }
-
- /* sgi */
- msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
- MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
-
- wtbl_raw = (struct wtbl_raw *)(buf + buf_len);
- buf_len += sizeof(*wtbl_raw);
- wtbl_raw->tag = cpu_to_le16(WTBL_RAW_DATA);
- wtbl_raw->len = cpu_to_le16(sizeof(*wtbl_raw));
- wtbl_raw->wtbl_idx = 1;
- wtbl_raw->dw = 5;
- wtbl_raw->msk = cpu_to_le32(~msk);
- wtbl_raw->val = cpu_to_le32(val);
-
- wtbl_hdr->tlv_num = cpu_to_le16(ntlv);
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- buf, buf_len, true);
- if (ret)
- goto out;
-
- memset(buf, 0, MT7615_WTBL_UPDATE_MAX_SIZE);
-
- sta_hdr = (struct sta_req_hdr *)buf;
- sta_hdr->bss_idx = mvif->idx;
- sta_hdr->wlan_idx = msta->wcid.idx;
- sta_hdr->is_tlv_append = 1;
- ntlv = sta->vht_cap.vht_supported ? 2 : 1;
- sta_hdr->tlv_num = cpu_to_le16(ntlv);
- sta_hdr->muar_idx = mvif->omac_idx;
- buf_len = sizeof(*sta_hdr);
-
- sta_ht = (struct sta_rec_ht *)(buf + buf_len);
- sta_ht->tag = cpu_to_le16(STA_REC_HT);
- sta_ht->len = cpu_to_le16(sizeof(*sta_ht));
- sta_ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
- buf_len += sizeof(*sta_ht);
-
- if (sta->vht_cap.vht_supported) {
- struct sta_rec_vht *sta_vht;
-
- sta_vht = (struct sta_rec_vht *)(buf + buf_len);
- buf_len += sizeof(*sta_vht);
- sta_vht->tag = cpu_to_le16(STA_REC_VHT);
- sta_vht->len = cpu_to_le16(sizeof(*sta_vht));
- sta_vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
- sta_vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map;
- sta_vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map;
- }
-
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
- buf, buf_len, true);
-out:
- kfree(buf);
-
- return ret;
-}
-
-int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool add)
-{
- struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
- struct mt7615_vif *mvif = msta->vif;
- struct {
- struct wtbl_req_hdr hdr;
- struct wtbl_ba ba;
- } wtbl_req = {
- .hdr = {
- .wlan_idx = msta->wcid.idx,
- .operation = WTBL_SET,
- .tlv_num = cpu_to_le16(1),
- },
- .ba = {
- .tag = cpu_to_le16(WTBL_BA),
- .len = cpu_to_le16(sizeof(struct wtbl_ba)),
- .tid = params->tid,
- .ba_type = MT_BA_TYPE_ORIGINATOR,
- .sn = add ? cpu_to_le16(params->ssn) : 0,
- .ba_en = add,
- },
- };
- struct {
- struct sta_req_hdr hdr;
- struct sta_rec_ba ba;
- } sta_req = {
- .hdr = {
- .bss_idx = mvif->idx,
- .wlan_idx = msta->wcid.idx,
- .tlv_num = cpu_to_le16(1),
- .is_tlv_append = 1,
- .muar_idx = mvif->omac_idx,
- },
- .ba = {
- .tag = cpu_to_le16(STA_REC_BA),
- .len = cpu_to_le16(sizeof(struct sta_rec_ba)),
- .tid = params->tid,
- .ba_type = MT_BA_TYPE_ORIGINATOR,
- .amsdu = params->amsdu,
- .ba_en = add << params->tid,
- .ssn = cpu_to_le16(params->ssn),
- .winsize = cpu_to_le16(params->buf_size),
- },
- };
- int ret;
-
- if (add) {
- u8 idx, ba_range[] = { 4, 8, 12, 24, 36, 48, 54, 64 };
-
- for (idx = 7; idx > 0; idx--) {
- if (params->buf_size >= ba_range[idx])
- break;
- }
-
- wtbl_req.ba.ba_winsize_idx = idx;
- }
-
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- &wtbl_req, sizeof(wtbl_req), true);
- if (ret)
- return ret;
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
- &sta_req, sizeof(sta_req), true);
-}
-
-int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool add)
+int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index)
{
- struct mt7615_sta *msta = (struct mt7615_sta *)params->sta->drv_priv;
- struct mt7615_vif *mvif = msta->vif;
struct {
- struct wtbl_req_hdr hdr;
- struct wtbl_ba ba;
- } wtbl_req = {
- .hdr = {
- .wlan_idx = msta->wcid.idx,
- .operation = WTBL_SET,
- .tlv_num = cpu_to_le16(1),
- },
- .ba = {
- .tag = cpu_to_le16(WTBL_BA),
- .len = cpu_to_le16(sizeof(struct wtbl_ba)),
- .tid = params->tid,
- .ba_type = MT_BA_TYPE_RECIPIENT,
- .rst_ba_tid = params->tid,
- .rst_ba_sel = RST_BA_MAC_TID_MATCH,
- .rst_ba_sb = 1,
- },
- };
- struct {
- struct sta_req_hdr hdr;
- struct sta_rec_ba ba;
- } sta_req = {
- .hdr = {
- .bss_idx = mvif->idx,
- .wlan_idx = msta->wcid.idx,
- .tlv_num = cpu_to_le16(1),
- .is_tlv_append = 1,
- .muar_idx = mvif->omac_idx,
- },
- .ba = {
- .tag = cpu_to_le16(STA_REC_BA),
- .len = cpu_to_le16(sizeof(struct sta_rec_ba)),
- .tid = params->tid,
- .ba_type = MT_BA_TYPE_RECIPIENT,
- .amsdu = params->amsdu,
- .ba_en = add << params->tid,
- .ssn = cpu_to_le16(params->ssn),
- .winsize = cpu_to_le16(params->buf_size),
- },
+ u8 action;
+ u8 rsv[3];
+ } req = {
+ .action = index,
};
- int ret;
-
- memcpy(wtbl_req.ba.peer_addr, params->sta->addr, ETH_ALEN);
- ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_STA_REC_UPDATE,
- &sta_req, sizeof(sta_req), true);
- if (ret || !add)
- return ret;
-
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE,
- &wtbl_req, sizeof(wtbl_req), true);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req,
+ sizeof(req), true);
}
-int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index)
+int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable)
{
+ struct mt7615_dev *dev = phy->dev;
struct {
- u8 action;
- u8 rsv[3];
+ u8 format_id;
+ u8 sku_enable;
+ u8 band_idx;
+ u8 rsv;
} req = {
- .action = index,
+ .format_id = 0,
+ .band_idx = phy != &dev->phy,
+ .sku_enable = enable,
};
- return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req,
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, &req,
sizeof(req), true);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
index 1fd7dffa6eef..d1f7391472fc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
@@ -23,6 +23,57 @@ struct mt7615_mcu_txd {
u32 reserved[5];
} __packed __aligned(4);
+/**
+ * struct mt7615_uni_txd - mcu command descriptor for firmware v3
+ * @txd: hardware descriptor
+ * @len: total length not including txd
+ * @cid: command identifier
+ * @pkt_type: must be 0xa0 (cmd packet by long format)
+ * @frag_n: fragment number
+ * @seq: sequence number
+ * @checksum: 0 mean there is no checksum
+ * @s2d_index: index for command source and destination
+ * Definition | value | note
+ * CMD_S2D_IDX_H2N | 0x00 | command from HOST to WM
+ * CMD_S2D_IDX_C2N | 0x01 | command from WA to WM
+ * CMD_S2D_IDX_H2C | 0x02 | command from HOST to WA
+ * CMD_S2D_IDX_H2N_AND_H2C | 0x03 | command from HOST to WA and WM
+ *
+ * @option: command option
+ * BIT[0]: UNI_CMD_OPT_BIT_ACK
+ * set to 1 to request a fw reply
+ * if UNI_CMD_OPT_BIT_0_ACK is set and UNI_CMD_OPT_BIT_2_SET_QUERY
+ * is set, mcu firmware will send response event EID = 0x01
+ * (UNI_EVENT_ID_CMD_RESULT) to the host.
+ * BIT[1]: UNI_CMD_OPT_BIT_UNI_CMD
+ * 0: original command
+ * 1: unified command
+ * BIT[2]: UNI_CMD_OPT_BIT_SET_QUERY
+ * 0: QUERY command
+ * 1: SET command
+ */
+struct mt7615_uni_txd {
+ __le32 txd[8];
+
+ /* DW1 */
+ __le16 len;
+ __le16 cid;
+
+ /* DW2 */
+ u8 reserved;
+ u8 pkt_type;
+ u8 frag_n;
+ u8 seq;
+
+ /* DW3 */
+ __le16 checksum;
+ u8 s2d_index;
+ u8 option;
+
+ /* DW4 */
+ u8 reserved2[4];
+} __packed __aligned(4);
+
/* event table */
enum {
MCU_EVENT_TARGET_ADDRESS_LEN = 0x01,
@@ -45,6 +96,62 @@ enum {
MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
};
+enum {
+ MT_SKU_CCK_1_2 = 0,
+ MT_SKU_CCK_55_11,
+ MT_SKU_OFDM_6_9,
+ MT_SKU_OFDM_12_18,
+ MT_SKU_OFDM_24_36,
+ MT_SKU_OFDM_48,
+ MT_SKU_OFDM_54,
+ MT_SKU_HT20_0_8,
+ MT_SKU_HT20_32,
+ MT_SKU_HT20_1_2_9_10,
+ MT_SKU_HT20_3_4_11_12,
+ MT_SKU_HT20_5_13,
+ MT_SKU_HT20_6_14,
+ MT_SKU_HT20_7_15,
+ MT_SKU_HT40_0_8,
+ MT_SKU_HT40_32,
+ MT_SKU_HT40_1_2_9_10,
+ MT_SKU_HT40_3_4_11_12,
+ MT_SKU_HT40_5_13,
+ MT_SKU_HT40_6_14,
+ MT_SKU_HT40_7_15,
+ MT_SKU_VHT20_0,
+ MT_SKU_VHT20_1_2,
+ MT_SKU_VHT20_3_4,
+ MT_SKU_VHT20_5_6,
+ MT_SKU_VHT20_7,
+ MT_SKU_VHT20_8,
+ MT_SKU_VHT20_9,
+ MT_SKU_VHT40_0,
+ MT_SKU_VHT40_1_2,
+ MT_SKU_VHT40_3_4,
+ MT_SKU_VHT40_5_6,
+ MT_SKU_VHT40_7,
+ MT_SKU_VHT40_8,
+ MT_SKU_VHT40_9,
+ MT_SKU_VHT80_0,
+ MT_SKU_VHT80_1_2,
+ MT_SKU_VHT80_3_4,
+ MT_SKU_VHT80_5_6,
+ MT_SKU_VHT80_7,
+ MT_SKU_VHT80_8,
+ MT_SKU_VHT80_9,
+ MT_SKU_VHT160_0,
+ MT_SKU_VHT160_1_2,
+ MT_SKU_VHT160_3_4,
+ MT_SKU_VHT160_5_6,
+ MT_SKU_VHT160_7,
+ MT_SKU_VHT160_8,
+ MT_SKU_VHT160_9,
+ MT_SKU_1SS_DELTA,
+ MT_SKU_2SS_DELTA,
+ MT_SKU_3SS_DELTA,
+ MT_SKU_4SS_DELTA,
+};
+
struct mt7615_mcu_rxd {
__le32 rxd[4];
@@ -60,6 +167,52 @@ struct mt7615_mcu_rxd {
u8 s2d_index;
};
+struct mt7615_mcu_rdd_report {
+ struct mt7615_mcu_rxd rxd;
+
+ u8 idx;
+ u8 long_detected;
+ u8 constant_prf_detected;
+ u8 staggered_prf_detected;
+ u8 radar_type_idx;
+ u8 periodic_pulse_num;
+ u8 long_pulse_num;
+ u8 hw_pulse_num;
+
+ u8 out_lpn;
+ u8 out_spn;
+ u8 out_crpn;
+ u8 out_crpw;
+ u8 out_crbn;
+ u8 out_stgpn;
+ u8 out_stgpw;
+
+ u8 _rsv[2];
+
+ __le32 out_pri_const;
+ __le32 out_pri_stg[3];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ } long_pulse[32];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ } periodic_pulse[32];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ u8 sc_pass;
+ u8 sw_reset;
+ } hw_pulse[32];
+};
+
#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
#define MCU_PKT_ID 0xa0
@@ -77,22 +230,27 @@ enum {
MCU_S2D_H2CN
};
+#define MCU_FW_PREFIX BIT(31)
+#define MCU_UNI_PREFIX BIT(30)
+#define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX)
+
enum {
- MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01,
- MCU_CMD_FW_START_REQ = 0x02,
+ MCU_CMD_TARGET_ADDRESS_LEN_REQ = MCU_FW_PREFIX | 0x01,
+ MCU_CMD_FW_START_REQ = MCU_FW_PREFIX | 0x02,
MCU_CMD_INIT_ACCESS_REG = 0x3,
MCU_CMD_PATCH_START_REQ = 0x05,
- MCU_CMD_PATCH_FINISH_REQ = 0x07,
- MCU_CMD_PATCH_SEM_CONTROL = 0x10,
+ MCU_CMD_PATCH_FINISH_REQ = MCU_FW_PREFIX | 0x07,
+ MCU_CMD_PATCH_SEM_CONTROL = MCU_FW_PREFIX | 0x10,
MCU_CMD_EXT_CID = 0xED,
- MCU_CMD_FW_SCATTER = 0xEE,
- MCU_CMD_RESTART_DL_REQ = 0xEF,
+ MCU_CMD_FW_SCATTER = MCU_FW_PREFIX | 0xEE,
+ MCU_CMD_RESTART_DL_REQ = MCU_FW_PREFIX | 0xEF,
};
enum {
MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
+ MCU_EXT_CMD_FW_LOG_2_HOST = 0x13,
MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
@@ -102,13 +260,33 @@ enum {
MCU_EXT_CMD_WTBL_UPDATE = 0x32,
MCU_EXT_CMD_SET_RDD_CTRL = 0x3a,
MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
+ MCU_EXT_CMD_DBDC_CTRL = 0x45,
MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
MCU_EXT_CMD_BCN_OFFLOAD = 0x49,
MCU_EXT_CMD_SET_RX_PATH = 0x4e,
+ MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
+ MCU_EXT_CMD_SET_RDD_TH = 0x7c,
MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
};
enum {
+ MCU_UNI_CMD_DEV_INFO_UPDATE = MCU_UNI_PREFIX | 0x01,
+ MCU_UNI_CMD_BSS_INFO_UPDATE = MCU_UNI_PREFIX | 0x02,
+ MCU_UNI_CMD_STA_REC_UPDATE = MCU_UNI_PREFIX | 0x03,
+};
+
+#define MCU_CMD_ACK BIT(0)
+#define MCU_CMD_UNI BIT(1)
+#define MCU_CMD_QUERY BIT(2)
+
+#define MCU_CMD_UNI_EXT_ACK (MCU_CMD_ACK | MCU_CMD_UNI | MCU_CMD_QUERY)
+
+enum {
+ UNI_BSS_INFO_BASIC = 0,
+ UNI_BSS_INFO_BCN_CONTENT = 7,
+};
+
+enum {
PATCH_SEM_RELEASE = 0x0,
PATCH_SEM_GET = 0x1
};
@@ -156,6 +334,23 @@ enum {
DEV_INFO_MAX_NUM
};
+enum {
+ DBDC_TYPE_WMM,
+ DBDC_TYPE_MGMT,
+ DBDC_TYPE_BSS,
+ DBDC_TYPE_MBSS,
+ DBDC_TYPE_REPEATER,
+ DBDC_TYPE_MU,
+ DBDC_TYPE_BF,
+ DBDC_TYPE_PTA,
+ __DBDC_TYPE_MAX,
+};
+
+struct tlv {
+ __le16 tag;
+ __le16 len;
+} __packed;
+
struct bss_info_omac {
__le16 tag;
__le16 len;
@@ -365,18 +560,28 @@ struct wtbl_raw {
__le32 val;
} __packed;
-#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
- sizeof(struct wtbl_generic) + \
- sizeof(struct wtbl_rx) + \
- sizeof(struct wtbl_ht) + \
- sizeof(struct wtbl_vht) + \
- sizeof(struct wtbl_tx_ps) + \
- sizeof(struct wtbl_hdr_trans) + \
- sizeof(struct wtbl_ba) + \
- sizeof(struct wtbl_bf) + \
- sizeof(struct wtbl_smps) + \
- sizeof(struct wtbl_pn) + \
- sizeof(struct wtbl_spe))
+#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
+ sizeof(struct wtbl_generic) + \
+ sizeof(struct wtbl_rx) + \
+ sizeof(struct wtbl_ht) + \
+ sizeof(struct wtbl_vht) + \
+ sizeof(struct wtbl_tx_ps) + \
+ sizeof(struct wtbl_hdr_trans) +\
+ sizeof(struct wtbl_ba) + \
+ sizeof(struct wtbl_bf) + \
+ sizeof(struct wtbl_smps) + \
+ sizeof(struct wtbl_pn) + \
+ sizeof(struct wtbl_spe))
+
+#define MT7615_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
+ sizeof(struct sta_rec_basic) + \
+ sizeof(struct sta_rec_ht) + \
+ sizeof(struct sta_rec_vht) + \
+ sizeof(struct tlv) + \
+ MT7615_WTBL_UPDATE_MAX_SIZE)
+
+#define MT7615_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \
+ sizeof(struct wtbl_ba))
enum {
WTBL_GENERIC,
@@ -399,6 +604,11 @@ enum {
WTBL_MAX_NUM
};
+struct sta_ntlv_hdr {
+ u8 rsv[2];
+ __le16 tlv_num;
+} __packed;
+
struct sta_req_hdr {
u8 bss_idx;
u8 wlan_idx;
@@ -408,6 +618,15 @@ struct sta_req_hdr {
u8 rsv[2];
} __packed;
+struct sta_rec_state {
+ __le16 tag;
+ __le16 len;
+ u8 state;
+ __le32 flags;
+ u8 vhtop;
+ u8 pad[2];
+} __packed;
+
struct sta_rec_basic {
__le16 tag;
__le16 len;
@@ -447,10 +666,6 @@ struct sta_rec_ba {
__le16 winsize;
} __packed;
-#define MT7615_STA_REC_UPDATE_MAX_SIZE (sizeof(struct sta_rec_basic) + \
- sizeof(struct sta_rec_ht) + \
- sizeof(struct sta_rec_vht))
-
enum {
STA_REC_BASIC,
STA_REC_RA,
@@ -459,11 +674,12 @@ enum {
STA_REC_BF,
STA_REC_AMSDU, /* for CR4 */
STA_REC_BA,
- STA_REC_RED, /* not used */
+ STA_REC_STATE,
STA_REC_TX_PROC, /* for hdr trans and CSO in CR4 */
STA_REC_HT,
STA_REC_VHT,
STA_REC_APPS,
+ STA_REC_WTBL = 13,
STA_REC_MAX_NUM
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
new file mode 100644
index 000000000000..d2eff5442824
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -0,0 +1,174 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "mt7615.h"
+#include "regs.h"
+#include "mac.h"
+#include "../trace.h"
+
+const u32 mt7615e_reg_map[] = {
+ [MT_TOP_CFG_BASE] = 0x01000,
+ [MT_HW_BASE] = 0x01000,
+ [MT_PCIE_REMAP_2] = 0x02504,
+ [MT_ARB_BASE] = 0x20c00,
+ [MT_HIF_BASE] = 0x04000,
+ [MT_CSR_BASE] = 0x07000,
+ [MT_PHY_BASE] = 0x10000,
+ [MT_CFG_BASE] = 0x20200,
+ [MT_AGG_BASE] = 0x20a00,
+ [MT_TMAC_BASE] = 0x21000,
+ [MT_RMAC_BASE] = 0x21200,
+ [MT_DMA_BASE] = 0x21800,
+ [MT_WTBL_BASE_ON] = 0x23000,
+ [MT_WTBL_BASE_OFF] = 0x23400,
+ [MT_LPON_BASE] = 0x24200,
+ [MT_MIB_BASE] = 0x24800,
+ [MT_WTBL_BASE_ADDR] = 0x30000,
+ [MT_PCIE_REMAP_BASE2] = 0x80000,
+ [MT_TOP_MISC_BASE] = 0xc0000,
+ [MT_EFUSE_ADDR_BASE] = 0x81070000,
+};
+
+const u32 mt7663e_reg_map[] = {
+ [MT_TOP_CFG_BASE] = 0x01000,
+ [MT_HW_BASE] = 0x02000,
+ [MT_DMA_SHDL_BASE] = 0x06000,
+ [MT_PCIE_REMAP_2] = 0x0700c,
+ [MT_ARB_BASE] = 0x20c00,
+ [MT_HIF_BASE] = 0x04000,
+ [MT_CSR_BASE] = 0x07000,
+ [MT_PHY_BASE] = 0x10000,
+ [MT_CFG_BASE] = 0x20000,
+ [MT_AGG_BASE] = 0x22000,
+ [MT_TMAC_BASE] = 0x24000,
+ [MT_RMAC_BASE] = 0x25000,
+ [MT_DMA_BASE] = 0x27000,
+ [MT_WTBL_BASE_ON] = 0x29000,
+ [MT_WTBL_BASE_OFF] = 0x29800,
+ [MT_LPON_BASE] = 0x2b000,
+ [MT_MIB_BASE] = 0x2d000,
+ [MT_WTBL_BASE_ADDR] = 0x30000,
+ [MT_PCIE_REMAP_BASE2] = 0x90000,
+ [MT_TOP_MISC_BASE] = 0xc0000,
+ [MT_EFUSE_ADDR_BASE] = 0x78011000,
+};
+
+u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
+{
+ u32 base, offset;
+
+ if (is_mt7663(&dev->mt76)) {
+ base = addr & MT7663_MCU_PCIE_REMAP_2_BASE;
+ offset = addr & MT7663_MCU_PCIE_REMAP_2_OFFSET;
+ } else {
+ base = addr & MT_MCU_PCIE_REMAP_2_BASE;
+ offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
+ }
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base);
+
+ return MT_PCIE_REMAP_BASE_2 + offset;
+}
+
+static void
+mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ mt7615_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7615_dev *dev = dev_instance;
+ u32 intr;
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return IRQ_NONE;
+
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
+ intr &= dev->mt76.mmio.irqmask;
+
+ if (intr & MT_INT_TX_DONE_ALL) {
+ mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ napi_schedule(&dev->mt76.tx_napi);
+ }
+
+ if (intr & MT_INT_RX_DONE(0)) {
+ mt7615_irq_disable(dev, MT_INT_RX_DONE(0));
+ napi_schedule(&dev->mt76.napi[0]);
+ }
+
+ if (intr & MT_INT_RX_DONE(1)) {
+ mt7615_irq_disable(dev, MT_INT_RX_DONE(1));
+ napi_schedule(&dev->mt76.napi[1]);
+ }
+
+ if (intr & MT_INT_MCU_CMD) {
+ u32 val = mt76_rr(dev, MT_MCU_CMD);
+
+ if (val & MT_MCU_CMD_ERROR_MASK) {
+ dev->reset_state = val;
+ ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
+ wake_up(&dev->reset_wait);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
+ int irq, const u32 *map)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ /* txwi_size = txd size + txp size */
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp_common),
+ .drv_flags = MT_DRV_TXWI_NO_FREE,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .tx_prepare_skb = mt7615_tx_prepare_skb,
+ .tx_complete_skb = mt7615_tx_complete_skb,
+ .rx_skb = mt7615_queue_rx_skb,
+ .rx_poll_complete = mt7615_rx_poll_complete,
+ .sta_ps = mt7615_sta_ps,
+ .sta_add = mt7615_mac_sta_add,
+ .sta_remove = mt7615_mac_sta_remove,
+ .update_survey = mt7615_update_channel,
+ };
+ struct mt7615_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ mdev = mt76_alloc_device(pdev, sizeof(*dev), &mt7615_ops, &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7615_dev, mt76);
+ mt76_mmio_init(&dev->mt76, mem_base);
+
+ dev->reg_map = map;
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ if (is_mt7663(mdev))
+ mt76_wr(dev, MT_PCIE_IRQ_ENABLE, 1);
+
+ ret = mt7615_register_device(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ ieee80211_free_hw(mt76_hw(dev));
+ return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 21486831172c..676ca622c35a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -6,6 +6,7 @@
#include <linux/interrupt.h>
#include <linux/ktime.h>
+#include <linux/regmap.h>
#include "../mt76.h"
#include "regs.h"
@@ -17,9 +18,11 @@
MT7615_MAX_INTERFACES)
#define MT7615_WATCHDOG_TIME (HZ / 10)
+#define MT7615_RESET_TIMEOUT (30 * HZ)
#define MT7615_RATE_RETRY 2
#define MT7615_TX_RING_SIZE 1024
+#define MT7615_TX_MGMT_RING_SIZE 128
#define MT7615_TX_MCU_RING_SIZE 128
#define MT7615_TX_FWDL_RING_SIZE 128
@@ -30,14 +33,34 @@
#define MT7615_FIRMWARE_N9 "mediatek/mt7615_n9.bin"
#define MT7615_ROM_PATCH "mediatek/mt7615_rom_patch.bin"
+#define MT7622_FIRMWARE_N9 "mediatek/mt7622_n9.bin"
+#define MT7622_ROM_PATCH "mediatek/mt7622_rom_patch.bin"
+
+#define MT7615_FIRMWARE_V1 1
+#define MT7615_FIRMWARE_V2 2
+#define MT7615_FIRMWARE_V3 3
+
+#define MT7663_ROM_PATCH "mediatek/mt7663pr2h_v3.bin"
+#define MT7663_FIRMWARE_N9 "mediatek/mt7663_n9_v3.bin"
+
#define MT7615_EEPROM_SIZE 1024
#define MT7615_TOKEN_SIZE 4096
#define MT_FRAC_SCALE 12
#define MT_FRAC(val, div) (((val) << MT_FRAC_SCALE) / (div))
+#define MT_CHFREQ_VALID BIT(7)
+#define MT_CHFREQ_DBDC_IDX BIT(6)
+#define MT_CHFREQ_SEQ GENMASK(5, 0)
+
+#define MT7615_BAR_RATE_DEFAULT 0x4b /* OFDM 6M */
+#define MT7615_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
+#define MT7615_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
+
struct mt7615_vif;
struct mt7615_sta;
+struct mt7615_dfs_pulse;
+struct mt7615_dfs_pattern;
enum mt7615_hw_txq_id {
MT7615_TXQ_MAIN,
@@ -46,6 +69,16 @@ enum mt7615_hw_txq_id {
MT7615_TXQ_FWDL,
};
+enum mt7622_hw_txq_id {
+ MT7622_TXQ_AC0,
+ MT7622_TXQ_AC1,
+ MT7622_TXQ_AC2,
+ MT7622_TXQ_FWDL = MT7615_TXQ_FWDL,
+ MT7622_TXQ_AC3,
+ MT7622_TXQ_MGMT,
+ MT7622_TXQ_MCU = 15,
+};
+
struct mt7615_rate_set {
struct ieee80211_tx_rate probe_rate;
struct ieee80211_tx_rate rates[4];
@@ -79,12 +112,90 @@ struct mt7615_vif {
struct mt7615_sta sta;
};
+struct mib_stats {
+ u32 ack_fail_cnt;
+ u32 fcs_err_cnt;
+ u32 rts_cnt;
+ u32 rts_retries_cnt;
+};
+
+struct mt7615_phy {
+ struct mt76_phy *mt76;
+ struct mt7615_dev *dev;
+
+ u32 rxfilter;
+ u32 omac_mask;
+
+ u16 noise;
+
+ unsigned long last_cca_adj;
+ int false_cca_ofdm, false_cca_cck;
+ s8 ofdm_sensitivity;
+ s8 cck_sensitivity;
+
+ u16 chainmask;
+
+ s16 coverage_class;
+ u8 slottime;
+
+ u8 chfreq;
+ u8 rdd_state;
+ int dfs_state;
+
+ __le32 rx_ampdu_ts;
+ u32 ampdu_ref;
+
+ struct mib_stats mib;
+};
+
+#define mt7615_mcu_add_tx_ba(dev, ...) (dev)->mcu_ops->add_tx_ba((dev), __VA_ARGS__)
+#define mt7615_mcu_add_rx_ba(dev, ...) (dev)->mcu_ops->add_rx_ba((dev), __VA_ARGS__)
+#define mt7615_mcu_sta_add(dev, ...) (dev)->mcu_ops->sta_add((dev), __VA_ARGS__)
+#define mt7615_mcu_add_dev_info(dev, ...) (dev)->mcu_ops->add_dev_info((dev), __VA_ARGS__)
+#define mt7615_mcu_add_bss_info(dev, ...) (dev)->mcu_ops->add_bss_info((dev), __VA_ARGS__)
+#define mt7615_mcu_add_beacon(dev, ...) (dev)->mcu_ops->add_beacon_offload((dev), __VA_ARGS__)
+#define mt7615_mcu_set_pm(dev, ...) (dev)->mcu_ops->set_pm_state((dev), __VA_ARGS__)
+struct mt7615_mcu_ops {
+ int (*add_tx_ba)(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable);
+ int (*add_rx_ba)(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable);
+ int (*sta_add)(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable);
+ int (*add_dev_info)(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool enable);
+ int (*add_bss_info)(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ bool enable);
+ int (*add_beacon_offload)(struct mt7615_dev *dev,
+ struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, bool enable);
+ int (*set_pm_state)(struct mt7615_dev *dev, int band, int state);
+};
+
struct mt7615_dev {
- struct mt76_dev mt76; /* must be first */
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
+
+ struct mt7615_phy phy;
u32 vif_mask;
u32 omac_mask;
- __le32 rx_ampdu_ts;
+ u16 chainmask;
+
+ const struct mt7615_mcu_ops *mcu_ops;
+ struct regmap *infracfg;
+ const u32 *reg_map;
+
+ struct work_struct mcu_work;
+
+ struct work_struct reset_work;
+ wait_queue_head_t reset_wait;
+ u32 reset_state;
struct list_head sta_poll_list;
spinlock_t sta_poll_lock;
@@ -96,17 +207,15 @@ struct mt7615_dev {
s16 power;
} radar_pattern;
u32 hw_pattern;
- int dfs_state;
- int false_cca_ofdm, false_cca_cck;
- unsigned long last_cca_adj;
u8 mac_work_count;
- s8 ofdm_sensitivity;
- s8 cck_sensitivity;
bool scs_en;
+ bool fw_debug;
spinlock_t token_lock;
struct idr token;
+
+ u8 fw_ver;
};
enum {
@@ -135,11 +244,6 @@ enum {
};
enum {
- MT_HW_RDD0,
- MT_HW_RDD1,
-};
-
-enum {
MT_RX_SEL0,
MT_RX_SEL1,
};
@@ -158,13 +262,57 @@ enum mt7615_rdd_cmd {
RDD_RESUME_BF,
};
+static inline struct mt7615_phy *
+mt7615_hw_phy(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return phy->priv;
+}
+
+static inline struct mt7615_dev *
+mt7615_hw_dev(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return container_of(phy->dev, struct mt7615_dev, mt76);
+}
+
+static inline struct mt7615_phy *
+mt7615_ext_phy(struct mt7615_dev *dev)
+{
+ struct mt76_phy *phy = dev->mt76.phy2;
+
+ if (!phy)
+ return NULL;
+
+ return phy->priv;
+}
+
extern const struct ieee80211_ops mt7615_ops;
+extern const u32 mt7615e_reg_map[__MT_BASE_MAX];
+extern const u32 mt7663e_reg_map[__MT_BASE_MAX];
extern struct pci_driver mt7615_pci_driver;
+extern struct platform_driver mt7622_wmac_driver;
+#ifdef CONFIG_MT7622_WMAC
+int mt7622_wmac_init(struct mt7615_dev *dev);
+#else
+static inline int mt7622_wmac_init(struct mt7615_dev *dev)
+{
+ return 0;
+}
+#endif
+
+int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
+ int irq, const u32 *map);
u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
+void mt7615_init_device(struct mt7615_dev *dev);
int mt7615_register_device(struct mt7615_dev *dev);
void mt7615_unregister_device(struct mt7615_dev *dev);
+int mt7615_register_ext_phy(struct mt7615_dev *dev);
+void mt7615_unregister_ext_phy(struct mt7615_dev *dev);
int mt7615_eeprom_init(struct mt7615_dev *dev);
int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
struct ieee80211_channel *chan,
@@ -172,58 +320,37 @@ int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
int mt7615_dma_init(struct mt7615_dev *dev);
void mt7615_dma_cleanup(struct mt7615_dev *dev);
int mt7615_mcu_init(struct mt7615_dev *dev);
-int mt7615_mcu_set_dev_info(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, bool enable);
-int mt7615_mcu_set_bss_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- int en);
-void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
+bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev);
+void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates);
-int mt7615_mcu_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- bool enable);
-int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-int mt7615_mcu_del_wtbl(struct mt7615_dev *dev, struct ieee80211_sta *sta);
int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
-int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
- struct ieee80211_vif *vif, bool en);
-int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool en);
-int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- int en);
-int mt7615_mcu_set_channel(struct mt7615_dev *dev);
+int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd);
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
const struct ieee80211_tx_queue_params *params);
-int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool add);
-int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool add);
-int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb);
int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
enum mt7615_rdd_cmd cmd, u8 index,
u8 rx_sel, u8 val);
-int mt7615_dfs_start_radar_detector(struct mt7615_dev *dev);
-int mt7615_dfs_stop_radar_detector(struct mt7615_dev *dev);
int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev);
+int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl);
static inline bool is_mt7622(struct mt76_dev *dev)
{
+ if (!IS_ENABLED(CONFIG_MT7622_WMAC))
+ return false;
+
return mt76_chip(dev) == 0x7622;
}
-static inline void mt7615_dfs_check_channel(struct mt7615_dev *dev)
+static inline bool is_mt7615(struct mt76_dev *dev)
{
- enum nl80211_chan_width width = dev->mt76.chandef.width;
- u32 freq = dev->mt76.chandef.chan->center_freq;
- struct ieee80211_hw *hw = mt76_hw(dev);
+ return mt76_chip(dev) == 0x7615;
+}
- if (hw->conf.chandef.chan->center_freq != freq ||
- hw->conf.chandef.width != width)
- dev->dfs_state = -1;
+static inline bool is_mt7663(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7663;
}
static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
@@ -239,27 +366,32 @@ static inline void mt7615_irq_disable(struct mt7615_dev *dev, u32 mask)
void mt7615_update_channel(struct mt76_dev *mdev);
bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask);
void mt7615_mac_reset_counters(struct mt7615_dev *dev);
-void mt7615_mac_cca_stats_reset(struct mt7615_dev *dev);
+void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy);
void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable);
+void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy);
void mt7615_mac_sta_poll(struct mt7615_dev *dev);
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int pid,
- struct ieee80211_key_conf *key);
+ struct ieee80211_key_conf *key, bool beacon);
+void mt7615_mac_set_timing(struct mt7615_phy *phy);
int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb);
void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data);
void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb);
int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd);
+void mt7615_mac_reset_work(struct work_struct *work);
+int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
+int mt7615_mcu_set_dbdc(struct mt7615_dev *dev);
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
-int mt7615_mcu_init_mac(struct mt7615_dev *dev);
-int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val);
-int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter);
+int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable);
+int mt7615_mcu_set_rts_thresh(struct mt7615_phy *phy, u32 val);
int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index);
-int mt7615_mcu_set_tx_power(struct mt7615_dev *dev);
void mt7615_mcu_exit(struct mt7615_dev *dev);
+void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
+ int cmd, int *wait_seq);
int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
@@ -272,17 +404,20 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
-int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
void mt7615_mac_work(struct work_struct *work);
void mt7615_txp_skb_unmap(struct mt76_dev *dev,
struct mt76_txwi_cache *txwi);
-int mt76_dfs_start_rdd(struct mt7615_dev *dev, bool force);
-int mt7615_dfs_init_radar_detector(struct mt7615_dev *dev);
+int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val);
+int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
+ const struct mt7615_dfs_pulse *pulse);
+int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index,
+ const struct mt7615_dfs_pattern *pattern);
+int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable);
+int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy);
int mt7615_init_debugfs(struct mt7615_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h
new file mode 100644
index 000000000000..d3eb49d83b98
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#if !defined(__MT7615_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT7615_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt7615.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt7615
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+ wiphy_name(mt76_hw(dev)->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s"
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define TOKEN_ENTRY __field(u16, token)
+#define TOKEN_ASSIGN __entry->token = token
+#define TOKEN_PR_FMT " %d"
+#define TOKEN_PR_ARG __entry->token
+
+DECLARE_EVENT_CLASS(dev_token,
+ TP_PROTO(struct mt7615_dev *dev, u16 token),
+ TP_ARGS(dev, token),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ TOKEN_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ TOKEN_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT TOKEN_PR_FMT,
+ DEV_PR_ARG, TOKEN_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_token, mac_tx_free,
+ TP_PROTO(struct mt7615_dev *dev, u16 token),
+ TP_ARGS(dev, token)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mt7615_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
index 1eb1eb659c3f..c8d0f893a47f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -10,84 +10,17 @@
#include <linux/pci.h>
#include "mt7615.h"
-#include "mac.h"
static const struct pci_device_id mt7615_pci_device_table[] = {
{ PCI_DEVICE(0x14c3, 0x7615) },
+ { PCI_DEVICE(0x14c3, 0x7663) },
{ },
};
-u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
-{
- u32 base = addr & MT_MCU_PCIE_REMAP_2_BASE;
- u32 offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
-
- mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base);
-
- return MT_PCIE_REMAP_BASE_2 + offset;
-}
-
-static void
-mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-{
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
-
- mt7615_irq_enable(dev, MT_INT_RX_DONE(q));
-}
-
-static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
-{
- struct mt7615_dev *dev = dev_instance;
- u32 intr;
-
- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
-
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
- return IRQ_NONE;
-
- intr &= dev->mt76.mmio.irqmask;
-
- if (intr & MT_INT_TX_DONE_ALL) {
- mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
- napi_schedule(&dev->mt76.tx_napi);
- }
-
- if (intr & MT_INT_RX_DONE(0)) {
- mt7615_irq_disable(dev, MT_INT_RX_DONE(0));
- napi_schedule(&dev->mt76.napi[0]);
- }
-
- if (intr & MT_INT_RX_DONE(1)) {
- mt7615_irq_disable(dev, MT_INT_RX_DONE(1));
- napi_schedule(&dev->mt76.napi[1]);
- }
-
- return IRQ_HANDLED;
-}
-
static int mt7615_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- static const struct mt76_driver_ops drv_ops = {
- /* txwi_size = txd size + txp size */
- .txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp),
- .drv_flags = MT_DRV_TXWI_NO_FREE,
- .survey_flags = SURVEY_INFO_TIME_TX |
- SURVEY_INFO_TIME_RX |
- SURVEY_INFO_TIME_BSS_RX,
- .tx_prepare_skb = mt7615_tx_prepare_skb,
- .tx_complete_skb = mt7615_tx_complete_skb,
- .rx_skb = mt7615_queue_rx_skb,
- .rx_poll_complete = mt7615_rx_poll_complete,
- .sta_ps = mt7615_sta_ps,
- .sta_add = mt7615_sta_add,
- .sta_assoc = mt7615_sta_assoc,
- .sta_remove = mt7615_sta_remove,
- .update_survey = mt7615_update_channel,
- };
- struct mt7615_dev *dev;
- struct mt76_dev *mdev;
+ const u32 *map;
int ret;
ret = pcim_enable_device(pdev);
@@ -104,31 +37,9 @@ static int mt7615_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7615_ops,
- &drv_ops);
- if (!mdev)
- return -ENOMEM;
-
- dev = container_of(mdev, struct mt7615_dev, mt76);
- mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
-
- mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
- (mt76_rr(dev, MT_HW_REV) & 0xff);
- dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
-
- ret = devm_request_irq(mdev->dev, pdev->irq, mt7615_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
- if (ret)
- goto error;
-
- ret = mt7615_register_device(dev);
- if (ret)
- goto error;
-
- return 0;
-error:
- ieee80211_free_hw(mt76_hw(dev));
- return ret;
+ map = id->device == 0x7663 ? mt7663e_reg_map : mt7615e_reg_map;
+ return mt7615_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
+ pdev->irq, map);
}
static void mt7615_pci_remove(struct pci_dev *pdev)
@@ -146,10 +57,9 @@ struct pci_driver mt7615_pci_driver = {
.remove = mt7615_pci_remove,
};
-module_pci_driver(mt7615_pci_driver);
-
MODULE_DEVICE_TABLE(pci, mt7615_pci_device_table);
MODULE_FIRMWARE(MT7615_FIRMWARE_CR4);
MODULE_FIRMWARE(MT7615_FIRMWARE_N9);
MODULE_FIRMWARE(MT7615_ROM_PATCH);
-MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(MT7663_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7663_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
index 61a4aa9ac6e6..1e0d95b917e1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -4,13 +4,46 @@
#ifndef __MT7615_REGS_H
#define __MT7615_REGS_H
-#define MT_HW_REV 0x1000
-#define MT_HW_CHIPID 0x1008
-#define MT_TOP_STRAP_STA 0x1010
+enum mt7615_reg_base {
+ MT_TOP_CFG_BASE,
+ MT_HW_BASE,
+ MT_DMA_SHDL_BASE,
+ MT_PCIE_REMAP_2,
+ MT_ARB_BASE,
+ MT_HIF_BASE,
+ MT_CSR_BASE,
+ MT_PHY_BASE,
+ MT_CFG_BASE,
+ MT_AGG_BASE,
+ MT_TMAC_BASE,
+ MT_RMAC_BASE,
+ MT_DMA_BASE,
+ MT_WTBL_BASE_ON,
+ MT_WTBL_BASE_OFF,
+ MT_LPON_BASE,
+ MT_MIB_BASE,
+ MT_WTBL_BASE_ADDR,
+ MT_PCIE_REMAP_BASE2,
+ MT_TOP_MISC_BASE,
+ MT_EFUSE_ADDR_BASE,
+ __MT_BASE_MAX,
+};
+
+#define MT_HW_INFO_BASE ((dev)->reg_map[MT_HW_BASE])
+#define MT_HW_INFO(ofs) (MT_HW_INFO_BASE + (ofs))
+#define MT_HW_REV MT_HW_INFO(0x000)
+#define MT_HW_CHIPID MT_HW_INFO(0x008)
+#define MT_TOP_STRAP_STA MT_HW_INFO(0x010)
#define MT_TOP_3NSS BIT(24)
-#define MT_TOP_MISC2 0x1134
+
+#define MT_TOP_OFF_RSV 0x1128
+#define MT_TOP_OFF_RSV_FW_STATE GENMASK(18, 16)
+
+#define MT_TOP_MISC2 ((dev)->reg_map[MT_TOP_CFG_BASE] + 0x134)
#define MT_TOP_MISC2_FW_STATE GENMASK(2, 0)
+#define MT7663_TOP_MISC2_FW_STATE GENMASK(3, 1)
+
#define MT_MCU_BASE 0x2000
#define MT_MCU(ofs) (MT_MCU_BASE + (ofs))
@@ -19,26 +52,39 @@
#define MT_MCU_PCIE_REMAP_1_BASE GENMASK(31, 18)
#define MT_PCIE_REMAP_BASE_1 0x40000
-#define MT_MCU_PCIE_REMAP_2 MT_MCU(0x504)
+#define MT_MCU_PCIE_REMAP_2 ((dev)->reg_map[MT_PCIE_REMAP_2])
#define MT_MCU_PCIE_REMAP_2_OFFSET GENMASK(18, 0)
#define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19)
-#define MT_PCIE_REMAP_BASE_2 0x80000
+#define MT_PCIE_REMAP_BASE_2 ((dev)->reg_map[MT_PCIE_REMAP_BASE2])
+
+#define MT_HIF(ofs) ((dev)->reg_map[MT_HIF_BASE] + (ofs))
+
+#define MT7663_MCU_PCIE_REMAP_2_OFFSET GENMASK(15, 0)
+#define MT7663_MCU_PCIE_REMAP_2_BASE GENMASK(31, 16)
-#define MT_HIF_BASE 0x4000
-#define MT_HIF(ofs) (MT_HIF_BASE + (ofs))
+#define MT_HIF2_BASE 0xf0000
+#define MT_HIF2(ofs) (MT_HIF2_BASE + (ofs))
+#define MT_PCIE_IRQ_ENABLE MT_HIF2(0x188)
#define MT_CFG_LPCR_HOST MT_HIF(0x1f0)
#define MT_CFG_LPCR_HOST_FW_OWN BIT(0)
#define MT_CFG_LPCR_HOST_DRV_OWN BIT(1)
+#define MT_MCU_INT_EVENT MT_HIF(0x1f8)
+#define MT_MCU_INT_EVENT_PDMA_STOPPED BIT(0)
+#define MT_MCU_INT_EVENT_PDMA_INIT BIT(1)
+#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2)
+#define MT_MCU_INT_EVENT_RESET_DONE BIT(3)
+
#define MT_INT_SOURCE_CSR MT_HIF(0x200)
#define MT_INT_MASK_CSR MT_HIF(0x204)
#define MT_DELAY_INT_CFG MT_HIF(0x210)
#define MT_INT_RX_DONE(_n) BIT(_n)
#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
-#define MT_INT_TX_DONE_ALL GENMASK(7, 4)
+#define MT_INT_TX_DONE_ALL GENMASK(19, 4)
#define MT_INT_TX_DONE(_n) BIT((_n) + 4)
+#define MT_INT_MCU_CMD BIT(30)
#define MT_WPDMA_GLO_CFG MT_HIF(0x208)
#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
@@ -49,6 +95,7 @@
#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0 BIT(9)
+#define MT_WPDMA_GLO_CFG_BYPASS_TX_SCH BIT(9) /* MT7622 */
#define MT_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
#define MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21 GENMASK(23, 22)
@@ -58,6 +105,22 @@
#define MT_WPDMA_RST_IDX MT_HIF(0x20c)
+#define MT_WPDMA_MEM_RNG_ERR MT_HIF(0x224)
+
+#define MT_MCU_CMD MT_HIF(0x234)
+#define MT_MCU_CMD_CLEAR_FW_OWN BIT(0)
+#define MT_MCU_CMD_STOP_PDMA_FW_RELOAD BIT(1)
+#define MT_MCU_CMD_STOP_PDMA BIT(2)
+#define MT_MCU_CMD_RESET_DONE BIT(3)
+#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
+#define MT_MCU_CMD_NORMAL_STATE BIT(5)
+#define MT_MCU_CMD_LMAC_ERROR BIT(24)
+#define MT_MCU_CMD_PSE_ERROR BIT(25)
+#define MT_MCU_CMD_PLE_ERROR BIT(26)
+#define MT_MCU_CMD_PDMA_ERROR BIT(27)
+#define MT_MCU_CMD_PCIE_ERROR BIT(28)
+#define MT_MCU_CMD_ERROR_MASK (GENMASK(5, 1) | GENMASK(28, 24))
+
#define MT_TX_RING_BASE MT_HIF(0x300)
#define MT_RX_RING_BASE MT_HIF(0x400)
@@ -67,6 +130,9 @@
#define MT_WPDMA_ABT_CFG MT_HIF(0x530)
#define MT_WPDMA_ABT_CFG1 MT_HIF(0x534)
+#define MT_CSR(ofs) ((dev)->reg_map[MT_CSR_BASE] + (ofs))
+#define MT_CONN_HIF_ON_LPCTL MT_CSR(0x000)
+
#define MT_PLE_BASE 0x8000
#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
@@ -78,41 +144,40 @@
#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \
((n) << 2))
-#define MT_WF_PHY_BASE 0x10000
+#define MT_WF_PHY_BASE ((dev)->reg_map[MT_PHY_BASE])
#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
-#define MT_WF_PHY_WF2_RFCTRL0 MT_WF_PHY(0x1900)
+#define MT_WF_PHY_WF2_RFCTRL0(n) MT_WF_PHY(0x1900 + (n) * 0x400)
#define MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN BIT(9)
-#define MT_WF_PHY_R0_B0_PHYMUX_5 MT_WF_PHY(0x0614)
+#define MT_WF_PHY_R0_PHYMUX_5(_phy) MT_WF_PHY(0x0614 + ((_phy) << 9))
-#define MT_WF_PHY_R0_B0_PHYCTRL_STS0 MT_WF_PHY(0x020c)
+#define MT_WF_PHY_R0_PHYCTRL_STS0(_phy) MT_WF_PHY(0x020c + ((_phy) << 9))
#define MT_WF_PHYCTRL_STAT_PD_OFDM GENMASK(31, 16)
#define MT_WF_PHYCTRL_STAT_PD_CCK GENMASK(15, 0)
-#define MT_WF_PHY_R0_B0_PHYCTRL_STS5 MT_WF_PHY(0x0220)
+#define MT_WF_PHY_R0_PHYCTRL_STS5(_phy) MT_WF_PHY(0x0220 + ((_phy) << 9))
#define MT_WF_PHYCTRL_STAT_MDRDY_OFDM GENMASK(31, 16)
#define MT_WF_PHYCTRL_STAT_MDRDY_CCK GENMASK(15, 0)
-#define MT_WF_PHY_B0_MIN_PRI_PWR MT_WF_PHY(0x229c)
-#define MT_WF_PHY_B0_PD_OFDM_MASK GENMASK(28, 20)
-#define MT_WF_PHY_B0_PD_OFDM(v) ((v) << 20)
-#define MT_WF_PHY_B0_PD_BLK BIT(19)
+#define MT_WF_PHY_MIN_PRI_PWR(_phy) MT_WF_PHY((_phy) ? 0x084 : 0x229c)
+#define MT_WF_PHY_PD_OFDM_MASK(_phy) ((_phy) ? GENMASK(24, 16) : \
+ GENMASK(28, 20))
+#define MT_WF_PHY_PD_OFDM(_phy, v) ((v) << ((_phy) ? 16 : 20))
+#define MT_WF_PHY_PD_BLK(_phy) ((_phy) ? BIT(25) : BIT(19))
-#define MT_WF_PHY_B1_MIN_PRI_PWR MT_WF_PHY(0x084)
-#define MT_WF_PHY_B1_PD_OFDM_MASK GENMASK(24, 16)
-#define MT_WF_PHY_B1_PD_OFDM(v) ((v) << 16)
-#define MT_WF_PHY_B1_PD_BLK BIT(25)
+#define MT_WF_PHY_RXTD_BASE MT_WF_PHY(0x2200)
+#define MT_WF_PHY_RXTD(_n) (MT_WF_PHY_RXTD_BASE + ((_n) << 2))
-#define MT_WF_PHY_B0_RXTD_CCK_PD MT_WF_PHY(0x2310)
-#define MT_WF_PHY_B0_PD_CCK_MASK GENMASK(8, 1)
-#define MT_WF_PHY_B0_PD_CCK(v) ((v) << 1)
+#define MT_WF_PHY_RXTD_CCK_PD(_phy) MT_WF_PHY((_phy) ? 0x2314 : 0x2310)
+#define MT_WF_PHY_PD_CCK_MASK(_phy) (_phy) ? GENMASK(31, 24) : \
+ GENMASK(8, 1)
+#define MT_WF_PHY_PD_CCK(_phy, v) ((v) << ((_phy) ? 24 : 1))
-#define MT_WF_PHY_B1_RXTD_CCK_PD MT_WF_PHY(0x2314)
-#define MT_WF_PHY_B1_PD_CCK_MASK GENMASK(31, 24)
-#define MT_WF_PHY_B1_PD_CCK(v) ((v) << 24)
+#define MT_WF_PHY_RXTD2_BASE MT_WF_PHY(0x2a00)
+#define MT_WF_PHY_RXTD2(_n) (MT_WF_PHY_RXTD2_BASE + ((_n) << 2))
-#define MT_WF_CFG_BASE 0x20200
+#define MT_WF_CFG_BASE ((dev)->reg_map[MT_CFG_BASE])
#define MT_WF_CFG(ofs) (MT_WF_CFG_BASE + (ofs))
#define MT_CFG_CCR MT_WF_CFG(0x000)
@@ -121,7 +186,7 @@
#define MT_CFG_CCR_MAC_D1_2X_GC_EN BIT(30)
#define MT_CFG_CCR_MAC_D0_2X_GC_EN BIT(31)
-#define MT_WF_AGG_BASE 0x20a00
+#define MT_WF_AGG_BASE ((dev)->reg_map[MT_AGG_BASE])
#define MT_WF_AGG(ofs) (MT_WF_AGG_BASE + (ofs))
#define MT_AGG_ARCR MT_WF_AGG(0x010)
@@ -131,8 +196,8 @@
#define MT_AGG_ARCR_RATE_DOWN_RATIO_EN BIT(19)
#define MT_AGG_ARCR_RATE_UP_EXTRA_TH GENMASK(22, 20)
-#define MT_AGG_ARUCR MT_WF_AGG(0x018)
-#define MT_AGG_ARDCR MT_WF_AGG(0x01c)
+#define MT_AGG_ARUCR(_band) MT_WF_AGG(0x018 + (_band) * 0x100)
+#define MT_AGG_ARDCR(_band) MT_WF_AGG(0x01c + (_band) * 0x100)
#define MT_AGG_ARxCR_LIMIT_SHIFT(_n) (4 * (_n))
#define MT_AGG_ARxCR_LIMIT(_n) GENMASK(2 + \
MT_AGG_ARxCR_LIMIT_SHIFT(_n), \
@@ -142,8 +207,7 @@
#define MT_AGG_ASRCR1 MT_WF_AGG(0x064)
#define MT_AGG_ASRCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(5, 0))
-#define MT_AGG_ACR0 MT_WF_AGG(0x070)
-#define MT_AGG_ACR1 MT_WF_AGG(0x170)
+#define MT_AGG_ACR(_band) MT_WF_AGG(0x070 + (_band) * 0x100)
#define MT_AGG_ACR_NO_BA_RULE BIT(0)
#define MT_AGG_ACR_NO_BA_AR_RULE BIT(1)
#define MT_AGG_ACR_PKT_TIME_EN BIT(2)
@@ -153,24 +217,43 @@
#define MT_AGG_SCR MT_WF_AGG(0x0fc)
#define MT_AGG_SCR_NLNAV_MID_PTEC_DIS BIT(3)
-#define MT_WF_TMAC_BASE 0x21000
+#define MT_WF_ARB_BASE ((dev)->reg_map[MT_ARB_BASE])
+#define MT_WF_ARB(ofs) (MT_WF_ARB_BASE + (ofs))
+
+#define MT_ARB_SCR MT_WF_ARB(0x080)
+#define MT_ARB_SCR_TX0_DISABLE BIT(8)
+#define MT_ARB_SCR_RX0_DISABLE BIT(9)
+#define MT_ARB_SCR_TX1_DISABLE BIT(10)
+#define MT_ARB_SCR_RX1_DISABLE BIT(11)
+
+#define MT_WF_TMAC_BASE ((dev)->reg_map[MT_TMAC_BASE])
#define MT_WF_TMAC(ofs) (MT_WF_TMAC_BASE + (ofs))
-#define MT_TMAC_TRCR0 MT_WF_TMAC(0x09c)
-#define MT_TMAC_TRCR1 MT_WF_TMAC(0x070)
+#define MT_TMAC_CDTR MT_WF_TMAC(0x090)
+#define MT_TMAC_ODTR MT_WF_TMAC(0x094)
+#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
+#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
+
+#define MT_TMAC_TRCR(_band) MT_WF_TMAC((_band) ? 0x070 : 0x09c)
#define MT_TMAC_TRCR_CCA_SEL GENMASK(31, 30)
#define MT_TMAC_TRCR_SEC_CCA_SEL GENMASK(29, 28)
+#define MT_TMAC_ICR(_band) MT_WF_TMAC((_band) ? 0x074 : 0x0a4)
+#define MT_IFS_EIFS GENMASK(8, 0)
+#define MT_IFS_RIFS GENMASK(14, 10)
+#define MT_IFS_SIFS GENMASK(22, 16)
+#define MT_IFS_SLOT GENMASK(30, 24)
+
#define MT_TMAC_CTCR0 MT_WF_TMAC(0x0f4)
#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
#define MT_TMAC_CTCR0_INS_DDLMT_DENSITY GENMASK(15, 12)
#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
-#define MT_WF_RMAC_BASE 0x21200
+#define MT_WF_RMAC_BASE ((dev)->reg_map[MT_RMAC_BASE])
#define MT_WF_RMAC(ofs) (MT_WF_RMAC_BASE + (ofs))
-#define MT_WF_RFCR MT_WF_RMAC(0x000)
+#define MT_WF_RFCR(_band) MT_WF_RMAC((_band) ? 0x100 : 0x000)
#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0)
#define MT_WF_RFCR_DROP_FCSFAIL BIT(1)
#define MT_WF_RFCR_DROP_VERSION BIT(3)
@@ -193,13 +276,15 @@
#define MT_WF_RFCR_DROP_NDPA BIT(20)
#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
-#define MT_WF_RFCR1 MT_WF_RMAC(0x004)
+#define MT_WF_RFCR1(_band) MT_WF_RMAC((_band) ? 0x104 : 0x004)
#define MT_WF_RFCR1_DROP_ACK BIT(4)
#define MT_WF_RFCR1_DROP_BF_POLL BIT(5)
#define MT_WF_RFCR1_DROP_BA BIT(6)
#define MT_WF_RFCR1_DROP_CFEND BIT(7)
#define MT_WF_RFCR1_DROP_CFACK BIT(8)
+#define MT_CHFREQ(_band) MT_WF_RMAC((_band) ? 0x130 : 0x030)
+
#define MT_WF_RMAC_MIB_TIME0 MT_WF_RMAC(0x03c4)
#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30)
@@ -207,17 +292,17 @@
#define MT_WF_RMAC_MIB_AIRTIME0 MT_WF_RMAC(0x0380)
#define MT_WF_RMAC_MIB_TIME5 MT_WF_RMAC(0x03d8)
+#define MT_WF_RMAC_MIB_TIME6 MT_WF_RMAC(0x03dc)
#define MT_MIB_OBSSTIME_MASK GENMASK(23, 0)
-#define MT_WF_DMA_BASE 0x21800
+#define MT_WF_DMA_BASE ((dev)->reg_map[MT_DMA_BASE])
#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
#define MT_DMA_DCR0 MT_WF_DMA(0x000)
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 2)
#define MT_DMA_DCR0_RX_VEC_DROP BIT(17)
-#define MT_DMA_BN0RCFR0 MT_WF_DMA(0x070)
-#define MT_DMA_BN1RCFR0 MT_WF_DMA(0x0b0)
+#define MT_DMA_RCFR0(_band) MT_WF_DMA(0x070 + (_band) * 0x40)
#define MT_DMA_RCFR0_MCU_RX_MGMT BIT(2)
#define MT_DMA_RCFR0_MCU_RX_CTL_NON_BAR BIT(3)
#define MT_DMA_RCFR0_MCU_RX_CTL_BAR BIT(4)
@@ -225,10 +310,10 @@
#define MT_DMA_RCFR0_RX_DROPPED_UCAST GENMASK(25, 24)
#define MT_DMA_RCFR0_RX_DROPPED_MCAST GENMASK(27, 26)
-#define MT_WTBL_BASE 0x30000
+#define MT_WTBL_BASE(dev) ((dev)->reg_map[MT_WTBL_BASE_ADDR])
#define MT_WTBL_ENTRY_SIZE 256
-#define MT_WTBL_OFF_BASE 0x23400
+#define MT_WTBL_OFF_BASE ((dev)->reg_map[MT_WTBL_BASE_OFF])
#define MT_WTBL_OFF(n) (MT_WTBL_OFF_BASE + (n))
#define MT_WTBL_W0_KEY_IDX GENMASK(24, 23)
@@ -245,7 +330,11 @@
#define MT_WTBL_UPDATE_TX_COUNT_CLEAR BIT(14)
#define MT_WTBL_UPDATE_BUSY BIT(31)
-#define MT_WTBL_ON_BASE 0x23000
+#define MT_TOP_MISC(ofs) ((dev)->reg_map[MT_TOP_MISC_BASE] + (ofs))
+#define MT_CONN_ON_MISC MT_TOP_MISC(0x1140)
+#define MT_TOP_MISC2_FW_N9_RDY BIT(2)
+
+#define MT_WTBL_ON_BASE ((dev)->reg_map[MT_WTBL_BASE_ON])
#define MT_WTBL_ON(_n) (MT_WTBL_ON_BASE + (_n))
#define MT_WTBL_RICR0 MT_WTBL_ON(0x010)
@@ -281,8 +370,7 @@
#define MT_WTBL_W27_CC_BW_SEL GENMASK(6, 5)
-#define MT_LPON_BASE 0x24200
-#define MT_LPON(_n) (MT_LPON_BASE + (_n))
+#define MT_LPON(_n) ((dev)->reg_map[MT_LPON_BASE] + (_n))
#define MT_LPON_T0CR MT_LPON(0x010)
#define MT_LPON_T0CR_MODE GENMASK(1, 0)
@@ -290,13 +378,13 @@
#define MT_LPON_UTTR0 MT_LPON(0x018)
#define MT_LPON_UTTR1 MT_LPON(0x01c)
-#define MT_WF_MIB_BASE 0x24800
+#define MT_WF_MIB_BASE (dev->reg_map[MT_MIB_BASE])
#define MT_WF_MIB(ofs) (MT_WF_MIB_BASE + (ofs))
#define MT_MIB_M0_MISC_CR MT_WF_MIB(0x00c)
-#define MT_MIB_MB_SDR0(n) MT_WF_MIB(0x100 + ((n) << 4))
-#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
-#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR3(n) MT_WF_MIB(0x014 + ((n) << 9))
+#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
#define MT_MIB_SDR9(n) MT_WF_MIB(0x02c + ((n) << 9))
#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0)
@@ -309,9 +397,59 @@
#define MT_MIB_SDR37(n) MT_WF_MIB(0x09c + ((n) << 9))
#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0)
+#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(0x100 + ((_band) << 9) + \
+ ((n) << 4))
+#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
+#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(0x104 + ((_band) << 9) + \
+ ((n) << 4))
+#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
+
#define MT_TX_AGG_CNT(n) MT_WF_MIB(0xa8 + ((n) << 2))
-#define MT_EFUSE_BASE 0x81070000
+#define MT_DMA_SHDL(ofs) (dev->reg_map[MT_DMA_SHDL_BASE] + (ofs))
+
+#define MT_DMASHDL_BASE 0x5000a000
+#define MT_DMASHDL_OPTIONAL 0x008
+#define MT_DMASHDL_PAGE 0x00c
+
+#define MT_DMASHDL_REFILL 0x010
+
+#define MT_DMASHDL_PKT_MAX_SIZE 0x01c
+#define MT_DMASHDL_PKT_MAX_SIZE_PLE GENMASK(11, 0)
+#define MT_DMASHDL_PKT_MAX_SIZE_PSE GENMASK(27, 16)
+
+#define MT_DMASHDL_GROUP_QUOTA(_n) (0x020 + ((_n) << 2))
+#define MT_DMASHDL_GROUP_QUOTA_MIN GENMASK(11, 0)
+#define MT_DMASHDL_GROUP_QUOTA_MAX GENMASK(27, 16)
+
+#define MT_DMASHDL_SCHED_SET0 0x0b0
+#define MT_DMASHDL_SCHED_SET1 0x0b4
+
+#define MT_DMASHDL_Q_MAP(_n) (0x0d0 + ((_n) << 2))
+#define MT_DMASHDL_Q_MAP_MASK GENMASK(3, 0)
+#define MT_DMASHDL_Q_MAP_SHIFT(_n) (4 * ((_n) % 8))
+
+#define MT_LED_BASE_PHYS 0x80024000
+#define MT_LED_PHYS(_n) (MT_LED_BASE_PHYS + (_n))
+
+#define MT_LED_CTRL MT_LED_PHYS(0x00)
+
+#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_TX_MANUAL_BLINK(_n) BIT(3 + (8 * (_n)))
+#define MT_LED_CTRL_TX_OVER_BLINK(_n) BIT(5 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n)))
+
+#define MT_LED_STATUS_0(_n) MT_LED_PHYS(0x10 + ((_n) * 8))
+#define MT_LED_STATUS_1(_n) MT_LED_PHYS(0x14 + ((_n) * 8))
+#define MT_LED_STATUS_OFF GENMASK(31, 24)
+#define MT_LED_STATUS_ON GENMASK(23, 16)
+#define MT_LED_STATUS_DURATION GENMASK(15, 0)
+
+#define MT_EFUSE_BASE ((dev)->reg_map[MT_EFUSE_ADDR_BASE])
#define MT_EFUSE_BASE_CTRL 0x000
#define MT_EFUSE_BASE_CTRL_EMPTY BIT(30)
@@ -328,4 +466,8 @@
#define MT_EFUSE_WDATA(_i) (0x010 + ((_i) * 4))
#define MT_EFUSE_RDATA(_i) (0x030 + ((_i) * 4))
+/* INFRACFG host register range on MT7622 */
+#define MT_INFRACFG_MISC 0x700
+#define MT_INFRACFG_MISC_AP2CONN_WAKE BIT(1)
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
new file mode 100644
index 000000000000..43aa49706c66
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include "mt7615.h"
+
+int mt7622_wmac_init(struct mt7615_dev *dev)
+{
+ struct device_node *np = dev->mt76.dev->of_node;
+
+ if (!is_mt7622(&dev->mt76))
+ return 0;
+
+ dev->infracfg = syscon_regmap_lookup_by_phandle(np, "mediatek,infracfg");
+ if (IS_ERR(dev->infracfg)) {
+ dev_err(dev->mt76.dev, "Cannot find infracfg controller\n");
+ return PTR_ERR(dev->infracfg);
+ }
+
+ return 0;
+}
+
+static int mt7622_wmac_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ void __iomem *mem_base;
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get device IRQ\n");
+ return irq;
+ }
+
+ mem_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mem_base)) {
+ dev_err(&pdev->dev, "Failed to get memory resource\n");
+ return PTR_ERR(mem_base);
+ }
+
+ return mt7615_mmio_probe(&pdev->dev, mem_base, irq, mt7615e_reg_map);
+}
+
+static int mt7622_wmac_remove(struct platform_device *pdev)
+{
+ struct mt7615_dev *dev = platform_get_drvdata(pdev);
+
+ mt7615_unregister_device(dev);
+
+ return 0;
+}
+
+static const struct of_device_id mt7622_wmac_of_match[] = {
+ { .compatible = "mediatek,mt7622-wmac" },
+ {},
+};
+
+struct platform_driver mt7622_wmac_driver = {
+ .driver = {
+ .name = "mt7622-wmac",
+ .of_match_table = mt7622_wmac_of_match,
+ },
+ .probe = mt7622_wmac_probe,
+ .remove = mt7622_wmac_remove,
+};
+
+MODULE_FIRMWARE(MT7622_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7622_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/trace.c b/drivers/net/wireless/mediatek/mt76/mt7615/trace.c
new file mode 100644
index 000000000000..6c02d5aff68f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "mt7615_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index d1405528b504..9087607b621e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -109,7 +109,7 @@ static void mt76x0_set_freq_offset(struct mt76x02_dev *dev)
void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
s8 val, lna_5g[3], lna_2g;
u16 rssi_offset;
@@ -129,7 +129,7 @@ void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
static s8 mt76x0_get_delta(struct mt76x02_dev *dev)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
u8 val;
if (chandef->width == NL80211_CHAN_WIDTH_80) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index 388b54cded1b..57f8d56737eb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -264,12 +264,12 @@ int mt76x0_register_device(struct mt76x02_dev *dev)
if (dev->mt76.cap.has_5ghz) {
/* overwrite unsupported features */
- mt76x0_vht_cap_mask(&dev->mt76.sband_5g.sband);
- mt76x0_init_txpower(dev, &dev->mt76.sband_5g.sband);
+ mt76x0_vht_cap_mask(&dev->mphy.sband_5g.sband);
+ mt76x0_init_txpower(dev, &dev->mphy.sband_5g.sband);
}
if (dev->mt76.cap.has_2ghz)
- mt76x0_init_txpower(dev, &dev->mt76.sband_2g.sband);
+ mt76x0_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt76x02_init_debugfs(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index b2ccf50512dc..700ae9c12f1d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -16,7 +16,7 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
if (mt76_is_mmio(&dev->mt76))
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
- mt76_set_channel(&dev->mt76);
+ mt76_set_channel(&dev->mphy);
mt76x0_phy_set_channel(dev, chandef);
mt76x02_mac_cc_reset(dev);
@@ -28,7 +28,7 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
}
mt76x02_pre_tbtt_enable(dev, true);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
}
int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
@@ -44,9 +44,9 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- dev->mt76.txpower_conf = hw->conf.power_level * 2;
+ dev->txpower_conf = hw->conf.power_level * 2;
- if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
mt76x0_phy_set_txpower(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index e2974e0ae1fc..0b520ae08d01 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -20,7 +20,7 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
return 0;
}
@@ -47,7 +47,7 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
mt76x0e_stop_hw(dev);
}
@@ -67,6 +67,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x02_bss_info_changed,
.sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76_sw_scan,
@@ -124,7 +125,7 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
if (err < 0)
return err;
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
return 0;
}
@@ -195,7 +196,7 @@ error:
static void mt76x0e_cleanup(struct mt76x02_dev *dev)
{
- clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mt76x0_chip_onoff(dev, false, false);
mt76x0e_stop_hw(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
index 038187b390ce..007c762c6db1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
@@ -126,7 +126,7 @@ int mt76x0e_mcu_init(struct mt76x02_dev *dev)
if (err < 0)
return err;
- set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 2ecd45f8af90..09f34deb6ba1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -23,7 +23,7 @@ mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value)
int ret = 0;
u8 bank, reg;
- if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ if (test_bit(MT76_REMOVED, &dev->mphy.state))
return -ENODEV;
bank = MT_RF_BANK(offset);
@@ -62,7 +62,7 @@ static int mt76x0_rf_csr_rr(struct mt76x02_dev *dev, u32 offset)
u32 val;
u8 bank, reg;
- if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ if (test_bit(MT76_REMOVED, &dev->mphy.state))
return -ENODEV;
bank = MT_RF_BANK(offset);
@@ -109,7 +109,7 @@ mt76x0_rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
};
WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
- &dev->mt76.state));
+ &dev->mphy.state));
return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
} else {
return mt76x0_rf_csr_wr(dev, offset, val);
@@ -127,7 +127,7 @@ static int mt76x0_rf_rr(struct mt76x02_dev *dev, u32 offset)
};
WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
- &dev->mt76.state));
+ &dev->mphy.state));
ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
val = pair.value;
} else {
@@ -502,7 +502,7 @@ mt76x0_phy_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width)
static void mt76x0_phy_tssi_dc_calibrate(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
u32 val;
if (chan->band == NL80211_BAND_5GHZ)
@@ -543,7 +543,7 @@ static int
mt76x0_phy_tssi_adc_calibrate(struct mt76x02_dev *dev, s16 *ltssi,
u8 *info)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
u32 val;
val = (chan->band == NL80211_BAND_5GHZ) ? 0x80055 : 0x80050;
@@ -696,7 +696,7 @@ mt76x0_phy_get_delta_power(struct mt76x02_dev *dev, u8 tx_mode,
s8 target_power, s8 target_pa_power,
s16 ltssi)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
int tssi_target = target_power << 12, tssi_slope;
int tssi_offset, tssi_db, ret;
u32 data;
@@ -844,12 +844,12 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
struct mt76_rate_power *t = &dev->mt76.rate_power;
s8 info;
- mt76x0_get_tx_power_per_rate(dev, dev->mt76.chandef.chan, t);
- mt76x0_get_power_info(dev, dev->mt76.chandef.chan, &info);
+ mt76x0_get_tx_power_per_rate(dev, dev->mphy.chandef.chan, t);
+ mt76x0_get_power_info(dev, dev->mphy.chandef.chan, &info);
mt76x02_add_rate_power_offset(t, info);
- mt76x02_limit_rate_power(t, dev->mt76.txpower_conf);
- dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
+ mt76x02_limit_rate_power(t, dev->txpower_conf);
+ dev->mphy.txpower_cur = mt76x02_get_max_rate_power(t);
mt76x02_add_rate_power_offset(t, -info);
dev->target_power = info;
@@ -858,7 +858,7 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
int is_5ghz = (chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
u32 val, tx_alc, reg_val;
@@ -933,7 +933,7 @@ void mt76x0_phy_set_channel(struct mt76x02_dev *dev,
FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
};
- bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ bool scan = test_bit(MT76_SCANNING, &dev->mphy.state);
int ch_group_index, freq, freq1;
u8 channel;
u32 val;
@@ -1037,7 +1037,7 @@ static void mt76x0_phy_temp_sensor(struct mt76x02_dev *dev)
if (abs(val - dev->cal.temp_vco) > 20) {
mt76x02_mcu_calibrate(dev, MCU_CAL_VCO,
- dev->mt76.chandef.chan->hw_value);
+ dev->mphy.chandef.chan->hw_value);
dev->cal.temp_vco = val;
}
if (abs(val - dev->cal.temp) > 30) {
@@ -1057,7 +1057,7 @@ static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev)
mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, gain);
- if ((dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR) &&
+ if ((dev->mphy.chandef.chan->flags & IEEE80211_CHAN_RADAR) &&
!is_mt7630(dev))
mt76x02_phy_dfs_adjust_agc(dev);
}
@@ -1069,7 +1069,7 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
u8 gain_delta;
int low_gain;
- dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76);
+ dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76, false);
if (!dev->cal.avg_rssi_all)
dev->cal.avg_rssi_all = -75;
@@ -1155,7 +1155,6 @@ static void mt76x0_rf_patch_reg_array(struct mt76x02_dev *dev,
static void mt76x0_phy_rf_init(struct mt76x02_dev *dev)
{
int i;
- u8 val;
mt76x0_rf_patch_reg_array(dev, mt76x0_rf_central_tab,
ARRAY_SIZE(mt76x0_rf_central_tab));
@@ -1188,7 +1187,7 @@ static void mt76x0_phy_rf_init(struct mt76x02_dev *dev)
*/
mt76x0_rf_wr(dev, MT_RF(0, 22),
min_t(u8, dev->cal.rx.freq_offset, 0xbf));
- val = mt76x0_rf_rr(dev, MT_RF(0, 22));
+ mt76x0_rf_rr(dev, MT_RF(0, 22));
/* Reset procedure DAC during power-up:
* - set B0.R73<7>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 65ba9fc6ea0b..5535b9c0632f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -71,7 +71,7 @@ static void mt76x0_init_usb_dma(struct mt76x02_dev *dev)
static void mt76x0u_cleanup(struct mt76x02_dev *dev)
{
- clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
mt76x0_chip_onoff(dev, false, false);
mt76u_queues_deinit(&dev->mt76);
}
@@ -80,13 +80,13 @@ static void mt76x0u_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mt76.mac_work);
mt76u_stop_tx(&dev->mt76);
mt76x02u_exit_beacon_config(dev);
- if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ if (test_bit(MT76_REMOVED, &dev->mphy.state))
return;
if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
@@ -112,7 +112,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
return 0;
}
@@ -126,6 +126,7 @@ static const struct ieee80211_ops mt76x0u_ops = {
.configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x02_bss_info_changed,
.sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76_sw_scan,
@@ -172,8 +173,14 @@ static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
static int mt76x0u_register_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = dev->mt76.hw;
+ struct mt76_usb *usb = &dev->mt76.usb;
int err;
+ usb->mcu.data = devm_kmalloc(dev->mt76.dev, MCU_RESP_URB_SIZE,
+ GFP_KERNEL);
+ if (!usb->mcu.data)
+ return -ENOMEM;
+
err = mt76u_alloc_queues(&dev->mt76);
if (err < 0)
goto out_err;
@@ -182,17 +189,13 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
if (err < 0)
goto out_err;
+ /* check hw sg support in order to enable AMSDU */
+ hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_TX_SG_MAX_SIZE : 1;
err = mt76x0_register_device(dev);
if (err < 0)
goto out_err;
- /* check hw sg support in order to enable AMSDU */
- if (dev->mt76.usb.sg_en)
- hw->max_tx_fragments = MT_TX_SG_MAX_SIZE;
- else
- hw->max_tx_fragments = 1;
-
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
return 0;
@@ -240,7 +243,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
usb_set_intfdata(usb_intf, dev);
mt76x02u_init_mcu(mdev);
- ret = mt76u_init(mdev, usb_intf);
+ ret = mt76u_init(mdev, usb_intf, false);
if (ret)
goto err;
@@ -283,7 +286,7 @@ err:
static void mt76x0_disconnect(struct usb_interface *usb_intf)
{
struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
- bool initialized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ bool initialized = test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
if (!initialized)
return;
@@ -304,7 +307,7 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
mt76u_stop_rx(&dev->mt76);
- clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
mt76x0_chip_onoff(dev, false, false);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
index 888a930a5e08..45502fd4693f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
@@ -168,7 +168,7 @@ int mt76x0u_mcu_init(struct mt76x02_dev *dev)
if (ret < 0)
return ret;
- set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 0ca0bbfe8769..23040c193ca5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -70,18 +70,23 @@ struct mt76x02_beacon_ops {
(dev)->beacon_ops->pre_tbtt_enable(dev, enable)
struct mt76x02_dev {
- struct mt76_dev mt76; /* must be first */
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
struct mac_address macaddr_list[8];
struct mutex phy_mutex;
u16 vif_mask;
+ u16 chainmask;
u8 txdone_seq;
DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
spinlock_t txstatus_fifo_lock;
u32 tx_airtime;
+ u32 ampdu_ref;
struct sk_buff *rx_head;
@@ -93,8 +98,7 @@ struct mt76x02_dev {
const struct mt76x02_beacon_ops *beacon_ops;
- struct sk_buff *beacons[8];
- u8 beacon_data_mask;
+ u8 beacon_data_count;
u8 tbtt_count;
@@ -104,13 +108,14 @@ struct mt76x02_dev {
struct mt76x02_calibration cal;
+ int txpower_conf;
s8 target_power;
s8 target_power_delta[2];
bool enable_tpc;
bool no_2ghz;
- u8 coverage_class;
+ s16 coverage_class;
u8 slottime;
struct mt76x02_dfs_pattern_detector dfs_pd;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
index 4209209ac940..5d034cec191b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
@@ -26,78 +26,40 @@ static int
mt76x02_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
{
int beacon_len = dev->beacon_ops->slot_size;
- struct mt76x02_txwi txwi;
if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
return -ENOSPC;
- mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
-
- mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
- offset += sizeof(txwi);
-
- mt76_wr_copy(dev, offset, skb->data, skb->len);
- return 0;
-}
-
-static int
-__mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx,
- struct sk_buff *skb)
-{
- int beacon_len = dev->beacon_ops->slot_size;
- int beacon_addr = MT_BEACON_BASE + (beacon_len * bcn_idx);
- int ret = 0;
- int i;
-
- /* Prevent corrupt transmissions during update */
- mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
+ /* USB devices already reserve enough skb headroom for txwi's. This
+ * helps to save slow copies over USB.
+ */
+ if (mt76_is_usb(&dev->mt76)) {
+ struct mt76x02_txwi *txwi;
- if (skb) {
- ret = mt76x02_write_beacon(dev, beacon_addr, skb);
- if (!ret)
- dev->beacon_data_mask |= BIT(bcn_idx);
+ txwi = (struct mt76x02_txwi *)(skb->data - sizeof(*txwi));
+ mt76x02_mac_write_txwi(dev, txwi, skb, NULL, NULL, skb->len);
+ skb_push(skb, sizeof(*txwi));
} else {
- dev->beacon_data_mask &= ~BIT(bcn_idx);
- for (i = 0; i < beacon_len; i += 4)
- mt76_wr(dev, beacon_addr + i, 0);
- }
+ struct mt76x02_txwi txwi;
- mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
+ mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
+ mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
+ offset += sizeof(txwi);
+ }
- return ret;
+ mt76_wr_copy(dev, offset, skb->data, skb->len);
+ return 0;
}
-int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
- struct sk_buff *skb)
+void mt76x02_mac_set_beacon(struct mt76x02_dev *dev,
+ struct sk_buff *skb)
{
- bool force_update = false;
- int bcn_idx = 0;
- int i;
+ int bcn_len = dev->beacon_ops->slot_size;
+ int bcn_addr = MT_BEACON_BASE + (bcn_len * dev->beacon_data_count);
- for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
- if (vif_idx == i) {
- force_update = !!dev->beacons[i] ^ !!skb;
- dev_kfree_skb(dev->beacons[i]);
- dev->beacons[i] = skb;
- __mt76x02_mac_set_beacon(dev, bcn_idx, skb);
- } else if (force_update && dev->beacons[i]) {
- __mt76x02_mac_set_beacon(dev, bcn_idx,
- dev->beacons[i]);
- }
-
- bcn_idx += !!dev->beacons[i];
- }
-
- for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
- if (!(dev->beacon_data_mask & BIT(i)))
- break;
-
- __mt76x02_mac_set_beacon(dev, i, NULL);
- }
-
- mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
- bcn_idx - 1);
- return 0;
+ if (!mt76x02_write_beacon(dev, bcn_addr, skb))
+ dev->beacon_data_count++;
+ dev_kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(mt76x02_mac_set_beacon);
@@ -116,7 +78,6 @@ void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
dev->mt76.beacon_mask |= BIT(mvif->idx);
} else {
dev->mt76.beacon_mask &= ~BIT(mvif->idx);
- mt76x02_mac_set_beacon(dev, mvif->idx, NULL);
}
if (!!old_mask == !!dev->mt76.beacon_mask)
@@ -182,7 +143,7 @@ mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!skb)
return;
- mt76x02_mac_set_beacon(dev, mvif->idx, skb);
+ mt76x02_mac_set_beacon(dev, skb);
}
EXPORT_SYMBOL_GPL(mt76x02_update_beacon_iter);
@@ -241,17 +202,11 @@ EXPORT_SYMBOL_GPL(mt76x02_enqueue_buffered_bc);
void mt76x02_init_beacon_config(struct mt76x02_dev *dev)
{
- int i;
-
mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_TBTT_EN |
MT_BEACON_TIME_CFG_BEACON_TX));
mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_SYNC_MODE);
mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
-
- for (i = 0; i < 8; i++)
- mt76x02_mac_set_beacon(dev, i, NULL);
-
mt76x02_set_beacon_offsets(dev);
}
EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index 5dec33ed8527..ff6a9e4daac0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -307,8 +307,8 @@ static bool mt76x02_dfs_check_hw_pulse(struct mt76x02_dev *dev,
pulse->period <= 100100);
break;
case NL80211_DFS_JP:
- if (dev->mt76.chandef.chan->center_freq >= 5250 &&
- dev->mt76.chandef.chan->center_freq <= 5350) {
+ if (dev->mphy.chandef.chan->center_freq >= 5250 &&
+ dev->mphy.chandef.chan->center_freq <= 5350) {
/* JPW53 */
if (pulse->w1 <= 130)
ret = (pulse->period >= 28360 &&
@@ -616,7 +616,7 @@ static void mt76x02_dfs_tasklet(unsigned long arg)
u32 engine_mask;
int i;
- if (test_bit(MT76_SCANNING, &dev->mt76.state))
+ if (test_bit(MT76_SCANNING, &dev->mphy.state))
goto out;
if (time_is_before_jiffies(dfs_pd->last_sw_check +
@@ -702,7 +702,7 @@ static void mt76x02_dfs_set_bbp_params(struct mt76x02_dev *dev)
u8 i, shift;
u32 data;
- switch (dev->mt76.chandef.width) {
+ switch (dev->mphy.chandef.width) {
case NL80211_CHAN_WIDTH_40:
shift = MT_DFS_NUM_ENGINES;
break;
@@ -722,8 +722,8 @@ static void mt76x02_dfs_set_bbp_params(struct mt76x02_dev *dev)
radar_specs = &etsi_radar_specs[shift];
break;
case NL80211_DFS_JP:
- if (dev->mt76.chandef.chan->center_freq >= 5250 &&
- dev->mt76.chandef.chan->center_freq <= 5350)
+ if (dev->mphy.chandef.chan->center_freq >= 5250 &&
+ dev->mphy.chandef.chan->center_freq <= 5350)
radar_specs = &jp_w53_radar_specs[shift];
else
radar_specs = &jp_w56_radar_specs[shift];
@@ -822,7 +822,7 @@ EXPORT_SYMBOL_GPL(mt76x02_phy_dfs_adjust_agc);
void mt76x02_dfs_init_params(struct mt76x02_dev *dev)
{
- struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+ struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
dev->mt76.region != NL80211_DFS_UNSET) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 4460548f346a..8b072277ea10 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -6,6 +6,7 @@
#include "mt76x02.h"
#include "mt76x02_trace.h"
+#include "trace.h"
void mt76x02_mac_reset_counters(struct mt76x02_dev *dev)
{
@@ -200,7 +201,7 @@ mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
bw = 1;
} else {
const struct ieee80211_rate *r;
- int band = dev->mt76.chandef.chan->band;
+ int band = dev->mphy.chandef.chan->band;
u16 val;
r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
@@ -344,7 +345,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
u16 txwi_flags = 0;
u8 nss;
s8 txpwr_adj, max_txpwr_adj;
- u8 ccmp_pn[8], nstreams = dev->mt76.chainmask & 0xf;
+ u8 ccmp_pn[8], nstreams = dev->chainmask & 0xf;
memset(txwi, 0, sizeof(*txwi));
@@ -386,7 +387,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
}
- txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf,
+ txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->txpower_conf,
max_txpwr_adj);
txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
@@ -487,17 +488,17 @@ mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, struct mt76x02_sta *msta,
first_rate |= st->pktid & MT_PKTID_RATE;
mt76x02_mac_process_tx_rate(&rate[0], first_rate,
- dev->mt76.chandef.chan->band);
+ dev->mphy.chandef.chan->band);
} else if (rate[0].idx < 0) {
if (!msta)
return;
mt76x02_mac_process_tx_rate(&rate[0], msta->wcid.tx_info,
- dev->mt76.chandef.chan->band);
+ dev->mphy.chandef.chan->band);
}
mt76x02_mac_process_tx_rate(&last_rate, st->rate,
- dev->mt76.chandef.chan->band);
+ dev->mphy.chandef.chan->band);
for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
retry--;
@@ -630,7 +631,7 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
if (!len)
goto out;
- duration = mt76_calc_tx_airtime(&dev->mt76, &info, len);
+ duration = ieee80211_calc_tx_airtime(mt76_hw(dev), &info, len);
spin_lock_bh(&dev->mt76.cc_lock);
dev->tx_airtime += duration;
@@ -679,7 +680,7 @@ mt76x02_mac_process_rate(struct mt76x02_dev *dev,
status->rate_idx = idx;
break;
case MT_PHY_TYPE_VHT: {
- u8 n_rxstream = dev->mt76.chainmask & 0xf;
+ u8 n_rxstream = dev->chainmask & 0xf;
status->encoding = RX_ENC_VHT;
status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
@@ -741,6 +742,8 @@ void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr)
get_unaligned_le16(dev->mt76.macaddr + 4) |
FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 APs + 8 STAs */
MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
+ /* enable 7 additional beacon slots and control them with bypass mask */
+ mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N, 7);
for (i = 0; i < 16; i++)
mt76x02_mac_set_bssid(dev, i, null_addr);
@@ -769,13 +772,13 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
u16 rate = le16_to_cpu(rxwi->rate);
u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
- int pad_len = 0, nstreams = dev->mt76.chainmask & 0xf;
+ int pad_len = 0, nstreams = dev->chainmask & 0xf;
s8 signal;
u8 pn_len;
u8 wcid;
int len;
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
return -EINVAL;
if (rxinfo & MT_RXINFO_L2PAD)
@@ -824,7 +827,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
if (rxinfo & MT_RXINFO_AMPDU) {
status->flag |= RX_FLAG_AMPDU_DETAILS;
- status->ampdu_ref = dev->mt76.ampdu_ref;
+ status->ampdu_ref = dev->ampdu_ref;
/*
* When receiving an A-MPDU subframe and RSSI info is not valid,
@@ -832,8 +835,8 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
* are coming. The last one will have valid RSSI info
*/
if (rxinfo & MT_RXINFO_RSSI) {
- if (!++dev->mt76.ampdu_ref)
- dev->mt76.ampdu_ref++;
+ if (!++dev->ampdu_ref)
+ dev->ampdu_ref++;
}
}
@@ -853,8 +856,8 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
signal = max_t(s8, signal, status->chain_signal[1]);
}
status->signal = signal;
- status->freq = dev->mt76.chandef.chan->center_freq;
- status->band = dev->mt76.chandef.chan->band;
+ status->freq = dev->mphy.chandef.chan->center_freq;
+ status->band = dev->mphy.chandef.chan->band;
status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
@@ -868,7 +871,7 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
u8 update = 1;
bool ret;
- if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
return;
trace_mac_txstat_poll(dev);
@@ -908,7 +911,7 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
txwi = (struct mt76x02_txwi *)txwi_ptr;
- trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
+ trace_mac_txdone(mdev, txwi->wcid, txwi->pktid);
mt76_tx_complete_skb(mdev, e->skb);
}
@@ -1018,7 +1021,7 @@ void mt76x02_update_channel(struct mt76_dev *mdev)
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76_channel_state *state;
- state = mdev->chan_state;
+ state = mdev->phy.chan_state;
state->cc_busy += mt76_rr(dev, MT_CH_BUSY);
spin_lock_bh(&dev->mt76.cc_lock);
@@ -1074,7 +1077,7 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev)
dev->ed_silent = 0;
if (dev->ed_monitor) {
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20;
mt76_clear(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
@@ -1184,7 +1187,7 @@ void mt76x02_mac_work(struct work_struct *work)
void mt76x02_mac_cc_reset(struct mt76x02_dev *dev)
{
- dev->mt76.survey_time = ktime_get_boottime();
+ dev->mphy.survey_time = ktime_get_boottime();
mt76_wr(dev, MT_CH_TIME_CFG,
MT_CH_TIME_CFG_TIMER_EN |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index 7d946aa77182..c70d17b2290c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -152,7 +152,7 @@ static inline bool mt76x02_wait_for_mac(struct mt76_dev *dev)
int i;
for (i = 0; i < 500; i++) {
- if (test_bit(MT76_REMOVED, &dev->state))
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
return false;
switch (dev->bus->rr(dev, MAC_CSR0)) {
@@ -201,8 +201,7 @@ void mt76x02_mac_work(struct work_struct *work);
void mt76x02_mac_cc_reset(struct mt76x02_dev *dev);
void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr);
-int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
- struct sk_buff *skb);
+void mt76x02_mac_set_beacon(struct mt76x02_dev *dev, struct sk_buff *skb);
void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
struct ieee80211_vif *vif, bool enable);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
index 6274b6a24b07..5664749ad6c1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
@@ -24,11 +24,11 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
if (!skb)
return -ENOMEM;
- mutex_lock(&mdev->mmio.mcu.mutex);
+ mutex_lock(&mdev->mcu.mutex);
- seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+ seq = ++mdev->mcu.msg_seq & 0xf;
if (!seq)
- seq = ++mdev->mmio.mcu.msg_seq & 0xf;
+ seq = ++mdev->mcu.msg_seq & 0xf;
tx_info = MT_MCU_MSG_TYPE_CMD |
FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
@@ -65,7 +65,7 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
}
out:
- mutex_unlock(&mdev->mmio.mcu.mutex);
+ mutex_unlock(&mdev->mcu.mutex);
return ret;
}
@@ -141,7 +141,7 @@ int mt76x02_mcu_cleanup(struct mt76x02_dev *dev)
mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
usleep_range(20000, 30000);
- while ((skb = skb_dequeue(&dev->mt76.mmio.mcu.res_q)) != NULL)
+ while ((skb = skb_dequeue(&dev->mt76.mcu.res_q)) != NULL)
dev_kfree_skb(skb);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 4e2371c926d8..7dcc5d342e9f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -9,7 +9,7 @@
#include "mt76x02.h"
#include "mt76x02_mcu.h"
-#include "mt76x02_trace.h"
+#include "trace.h"
static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
{
@@ -24,10 +24,17 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
mt76x02_resync_beacon_timer(dev);
+ /* Prevent corrupt transmissions during update */
+ mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff);
+ dev->beacon_data_count = 0;
+
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76x02_update_beacon_iter, dev);
+ mt76_wr(dev, MT_BCN_BYPASS_MASK,
+ 0xff00 | ~(0xff00 >> dev->beacon_data_count));
+
mt76_csa_check(&dev->mt76);
if (dev->mt76.csa_complete)
@@ -151,7 +158,7 @@ static void mt76x02_tx_tasklet(unsigned long data)
mt76x02_mac_poll_tx_status(dev, false);
mt76x02_process_tx_status_fifo(dev);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
}
static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
@@ -261,10 +268,10 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return IRQ_NONE;
- trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask);
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
intr &= dev->mt76.mmio.irqmask;
@@ -402,7 +409,7 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev)
lockdep_assert_held(&dev->mt76.mutex);
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
rcu_read_lock();
ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
@@ -420,6 +427,8 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev)
if (!wcid)
continue;
+ rcu_assign_pointer(dev->mt76.wcid[i], NULL);
+
priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
sta = container_of(priv, struct ieee80211_sta, drv_priv);
@@ -441,7 +450,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
int i;
ieee80211_stop_queues(dev->mt76.hw);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &dev->mphy.state);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
tasklet_disable(&dev->mt76.tx_tasklet);
@@ -452,6 +461,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mutex_lock(&dev->mt76.mutex);
+ dev->mcu_timeout = 0;
if (restart)
mt76x02_reset_state(dev);
@@ -476,7 +486,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
if (restart)
mt76_mcu_restart(dev);
- for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
+ for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_tx_cleanup(dev, i, true);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
@@ -496,7 +506,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mutex_unlock(&dev->mt76.mutex);
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
tasklet_enable(&dev->mt76.tx_tasklet);
napi_enable(&dev->mt76.tx_napi);
@@ -514,7 +524,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
ieee80211_restart_hw(dev->mt76.hw);
} else {
ieee80211_wake_queues(dev->mt76.hw);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
}
}
@@ -535,10 +545,6 @@ static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
restart:
mt76x02_watchdog_reset(dev);
- mutex_lock(&dev->mt76.mmio.mcu.mutex);
- dev->mcu_timeout = 0;
- mutex_unlock(&dev->mt76.mmio.mcu.mutex);
-
dev->tx_hang_reset++;
dev->tx_hang_check = 0;
memset(dev->mt76.tx_dma_idx, 0xff,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
index d7334267b530..aaadc15ea83c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -16,7 +16,7 @@ void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev)
val = mt76_rr(dev, MT_BBP(AGC, 0));
val &= ~BIT(4);
- switch (dev->mt76.chainmask & 0xf) {
+ switch (dev->chainmask & 0xf) {
case 2:
val |= BIT(3);
break;
@@ -35,7 +35,7 @@ void mt76x02_phy_set_txdac(struct mt76x02_dev *dev)
{
int txpath;
- txpath = (dev->mt76.chainmask >> 8) & 0xf;
+ txpath = (dev->chainmask >> 8) & 0xf;
switch (txpath) {
case 2:
mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
index fc2e41006a0d..1def25bf735a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
@@ -11,7 +11,7 @@
static inline int
mt76x02_get_rssi_gain_thresh(struct mt76x02_dev *dev)
{
- switch (dev->mt76.chandef.width) {
+ switch (dev->mphy.chandef.width) {
case NL80211_CHAN_WIDTH_80:
return -62;
case NL80211_CHAN_WIDTH_40:
@@ -24,7 +24,7 @@ mt76x02_get_rssi_gain_thresh(struct mt76x02_dev *dev)
static inline int
mt76x02_get_low_rssi_gain_thresh(struct mt76x02_dev *dev)
{
- switch (dev->mt76.chandef.width) {
+ switch (dev->mphy.chandef.width) {
case NL80211_CHAN_WIDTH_80:
return -76;
case NL80211_CHAN_WIDTH_40:
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
index 21c0f351fa09..3e722276b5c2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -235,15 +235,9 @@
#define MT_LED_S0(_n) (MT_LED_S0_BASE + 8 * (_n))
#define MT_LED_S1_BASE 0x0780
#define MT_LED_S1(_n) (MT_LED_S1_BASE + 8 * (_n))
-#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24)
-#define MT_LED_STATUS_OFF(_v) (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \
- MT_LED_STATUS_OFF_MASK)
-#define MT_LED_STATUS_ON_MASK GENMASK(23, 16)
-#define MT_LED_STATUS_ON(_v) (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \
- MT_LED_STATUS_ON_MASK)
-#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 8)
-#define MT_LED_STATUS_DURATION(_v) (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \
- MT_LED_STATUS_DURATION_MASK)
+#define MT_LED_STATUS_OFF GENMASK(31, 24)
+#define MT_LED_STATUS_ON GENMASK(23, 16)
+#define MT_LED_STATUS_DURATION GENMASK(15, 8)
#define MT_FCE_PSE_CTRL 0x0800
#define MT_FCE_PARAMETERS 0x0804
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
index 61ecaf0fe065..6a98092e996b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h
@@ -20,7 +20,6 @@
#define DEV_PR_ARG __entry->wiphy_name
#define TXID_ENTRY __field(u8, wcid) __field(u8, pktid)
-#define TXID_ASSIGN __entry->wcid = wcid; __entry->pktid = pktid
#define TXID_PR_FMT " [%d:%d]"
#define TXID_PR_ARG __entry->wcid, __entry->pktid
@@ -36,28 +35,6 @@ DECLARE_EVENT_CLASS(dev_evt,
TP_printk(DEV_PR_FMT, DEV_PR_ARG)
);
-DECLARE_EVENT_CLASS(dev_txid_evt,
- TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid),
- TP_ARGS(dev, wcid, pktid),
- TP_STRUCT__entry(
- DEV_ENTRY
- TXID_ENTRY
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- TXID_ASSIGN;
- ),
- TP_printk(
- DEV_PR_FMT TXID_PR_FMT,
- DEV_PR_ARG, TXID_PR_ARG
- )
-);
-
-DEFINE_EVENT(dev_txid_evt, mac_txdone_add,
- TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid),
- TP_ARGS(dev, wcid, pktid)
-);
-
DEFINE_EVENT(dev_evt, mac_txstat_poll,
TP_PROTO(struct mt76x02_dev *dev),
TP_ARGS(dev)
@@ -100,29 +77,6 @@ TRACE_EVENT(mac_txstat_fetch,
)
);
-TRACE_EVENT(dev_irq,
- TP_PROTO(struct mt76x02_dev *dev, u32 val, u32 mask),
-
- TP_ARGS(dev, val, mask),
-
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u32, val)
- __field(u32, mask)
- ),
-
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->val = val;
- __entry->mask = mask;
- ),
-
- TP_printk(
- DEV_PR_FMT " %08x & %08x",
- DEV_PR_ARG, __entry->val, __entry->mask
- )
-);
-
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
index 13825f642087..96fdf423a348 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -28,7 +28,7 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
wcid = &mvif->group_wcid;
}
- mt76_tx(&dev->mt76, control->sta, wcid, skb);
+ mt76_tx(&dev->mphy, control->sta, wcid, skb);
}
EXPORT_SYMBOL_GPL(mt76x02_tx);
@@ -39,7 +39,6 @@ void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
void *rxwi = skb->data;
if (q == MT_RXQ_MCU) {
- /* this is used just by mmio code */
mt76_mcu_rx_event(&dev->mt76, skb);
return;
}
@@ -74,7 +73,7 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
max_txpwr = dev->mt76.rate_power.ht[rate->idx & 0xf];
} else {
- enum nl80211_band band = dev->mt76.chandef.chan->band;
+ enum nl80211_band band = dev->mphy.chandef.chan->band;
if (band == NL80211_BAND_2GHZ) {
const struct ieee80211_rate *r;
@@ -96,7 +95,7 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj)
{
- txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf);
+ txpwr = min_t(s8, txpwr, dev->txpower_conf);
txpwr -= (dev->target_power + dev->target_power_delta[0]);
txpwr = min_t(s8, txpwr, max_txpwr_adj);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index d03d3c8e296c..0180b6200b17 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -46,8 +46,7 @@ EXPORT_SYMBOL_GPL(mt76x02u_mac_start);
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
{
- struct sk_buff *iter, *last = skb;
- u32 info, pad;
+ u32 info;
/* Buffer layout:
* | 4B | xfer len | pad | 4B |
@@ -57,28 +56,8 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
*/
info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
- put_unaligned_le32(info, skb_push(skb, sizeof(info)));
- /* Add zero pad of 4 - 7 bytes */
- pad = round_up(skb->len, 4) + 4 - skb->len;
-
- /* First packet of a A-MSDU burst keeps track of the whole burst
- * length, need to update length of it and the last packet.
- */
- skb_walk_frags(skb, iter) {
- last = iter;
- if (!iter->next) {
- skb->data_len += pad;
- skb->len += pad;
- break;
- }
- }
-
- if (skb_pad(last, pad))
- return -ENOMEM;
- __skb_put(last, pad);
-
- return 0;
+ return mt76u_skb_dma_info(skb, info);
}
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
@@ -198,7 +177,7 @@ static void mt76x02u_pre_tbtt_work(struct work_struct *work)
container_of(work, struct mt76x02_dev, pre_tbtt_work);
struct beacon_bc_data data = {};
struct sk_buff *skb;
- int i, nbeacons;
+ int nbeacons;
if (!dev->mt76.beacon_mask)
return;
@@ -208,17 +187,30 @@ static void mt76x02u_pre_tbtt_work(struct work_struct *work)
mt76x02_resync_beacon_timer(dev);
+ /* Prevent corrupt transmissions during update */
+ mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff);
+ dev->beacon_data_count = 0;
+
ieee80211_iterate_active_interfaces(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76x02_update_beacon_iter, dev);
+ mt76_csa_check(&dev->mt76);
+
+ if (dev->mt76.csa_complete) {
+ mt76_csa_finish(&dev->mt76);
+ goto out;
+ }
+
nbeacons = hweight8(dev->mt76.beacon_mask);
mt76x02_enqueue_buffered_bc(dev, &data, N_BCN_SLOTS - nbeacons);
- for (i = nbeacons; i < N_BCN_SLOTS; i++) {
- skb = __skb_dequeue(&data.q);
- mt76x02_mac_set_beacon(dev, i, skb);
- }
+ while ((skb = __skb_dequeue(&data.q)) != NULL)
+ mt76x02_mac_set_beacon(dev, skb);
+
+out:
+ mt76_wr(dev, MT_BCN_BYPASS_MASK,
+ 0xff00 | ~(0xff00 >> dev->beacon_data_count));
mt76x02u_restart_pre_tbtt_timer(dev);
}
@@ -244,20 +236,11 @@ static void mt76x02u_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
static void mt76x02u_beacon_enable(struct mt76x02_dev *dev, bool en)
{
- int i;
-
if (WARN_ON_ONCE(!dev->mt76.beacon_int))
return;
- if (en) {
+ if (en)
mt76x02u_start_pre_tbtt_timer(dev);
- } else {
- /* Timer is already stopped, only clean up
- * PS buffered frames if any.
- */
- for (i = 0; i < N_BCN_SLOTS; i++)
- mt76x02_mac_set_beacon(dev, i, NULL);
- }
}
void mt76x02u_init_beacon_config(struct mt76x02_dev *dev)
@@ -280,7 +263,7 @@ EXPORT_SYMBOL_GPL(mt76x02u_init_beacon_config);
void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev)
{
- if (!test_bit(MT76_REMOVED, &dev->mt76.state))
+ if (!test_bit(MT76_REMOVED, &dev->mphy.state))
mt76_clear(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index a993cd7e9948..843b86560ed4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -55,7 +55,8 @@ static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
u32 rxfce;
for (i = 0; i < 5; i++) {
- ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len, 300);
+ ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len,
+ 300, MT_EP_IN_CMD_RESP);
if (ret == -ETIMEDOUT)
continue;
if (ret)
@@ -82,18 +83,17 @@ static int
__mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp)
{
- struct mt76_usb *usb = &dev->usb;
- int ret;
u8 seq = 0;
u32 info;
+ int ret;
- if (test_bit(MT76_REMOVED, &dev->state))
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
return 0;
if (wait_resp) {
- seq = ++usb->mcu.msg_seq & 0xf;
+ seq = ++dev->mcu.msg_seq & 0xf;
if (!seq)
- seq = ++usb->mcu.msg_seq & 0xf;
+ seq = ++dev->mcu.msg_seq & 0xf;
}
info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
@@ -103,7 +103,8 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
if (ret)
return ret;
- ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500);
+ ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500,
+ MT_EP_OUT_INBAND_CMD);
if (ret)
return ret;
@@ -119,7 +120,6 @@ static int
mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
int len, bool wait_resp)
{
- struct mt76_usb *usb = &dev->usb;
struct sk_buff *skb;
int err;
@@ -127,9 +127,9 @@ mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
if (!skb)
return -ENOMEM;
- mutex_lock(&usb->mcu.mutex);
+ mutex_lock(&dev->mcu.mutex);
err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
- mutex_unlock(&usb->mcu.mutex);
+ mutex_unlock(&dev->mcu.mutex);
return err;
}
@@ -143,9 +143,8 @@ static int
mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
const struct mt76_reg_pair *data, int n)
{
- const int CMD_RANDOM_WRITE = 12;
const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
- struct mt76_usb *usb = &dev->usb;
+ const int CMD_RANDOM_WRITE = 12;
struct sk_buff *skb;
int cnt, i, ret;
@@ -164,9 +163,9 @@ mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
skb_put_le32(skb, data[i].value);
}
- mutex_lock(&usb->mcu.mutex);
+ mutex_lock(&dev->mcu.mutex);
ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
- mutex_unlock(&usb->mcu.mutex);
+ mutex_unlock(&dev->mcu.mutex);
if (ret)
return ret;
@@ -200,7 +199,7 @@ mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
skb_put_le32(skb, data[i].value);
}
- mutex_lock(&usb->mcu.mutex);
+ mutex_lock(&dev->mcu.mutex);
usb->mcu.rp = data;
usb->mcu.rp_len = n;
@@ -211,7 +210,7 @@ mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
usb->mcu.rp = NULL;
- mutex_unlock(&usb->mcu.mutex);
+ mutex_unlock(&dev->mcu.mutex);
return ret;
}
@@ -248,7 +247,8 @@ __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, u8 *data,
data_len = MT_CMD_HDR_LEN + len + sizeof(info);
- err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000);
+ err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000,
+ MT_EP_OUT_INBAND_CMD);
if (err) {
dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err);
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 0960fc56b672..b7a120b0856d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -96,9 +96,9 @@ mt76x02_led_set_config(struct mt76_dev *mdev, u8 delay_on,
mt76);
u32 val;
- val = MT_LED_STATUS_DURATION(0xff) |
- MT_LED_STATUS_OFF(delay_off) |
- MT_LED_STATUS_ON(delay_on);
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
mt76_wr(dev, MT_LED_S0(mdev->led_pin), val);
mt76_wr(dev, MT_LED_S1(mdev->led_pin), val);
@@ -166,7 +166,6 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
wiphy->reg_notifier = mt76x02_regd_notifier;
wiphy->iface_combinations = mt76x02_if_comb;
wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02_if_comb);
- wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
@@ -182,21 +181,22 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
hw->vif_data_size = sizeof(struct mt76x02_vif);
ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
dev->mt76.global_wcid.idx = 255;
dev->mt76.global_wcid.hw_key_idx = -1;
dev->slottime = 9;
if (is_mt76x2(dev)) {
- dev->mt76.sband_2g.sband.ht_cap.cap |=
+ dev->mphy.sband_2g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.sband_5g.sband.ht_cap.cap |=
+ dev->mphy.sband_5g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.chainmask = 0x202;
- dev->mt76.antenna_mask = 3;
+ dev->chainmask = 0x202;
+ dev->mphy.antenna_mask = 3;
} else {
- dev->mt76.chainmask = 0x101;
- dev->mt76.antenna_mask = 1;
+ dev->chainmask = 0x101;
+ dev->mphy.antenna_mask = 1;
}
}
EXPORT_SYMBOL_GPL(mt76x02_init_device);
@@ -325,7 +325,9 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
if (vif->type == NL80211_IFTYPE_STATION)
idx += 8;
- if (dev->vif_mask & BIT(idx))
+ /* vif is already set or idx is 8 for AP/Mesh/... */
+ if (dev->vif_mask & BIT(idx) ||
+ (vif->type != NL80211_IFTYPE_STATION && idx > 7))
return -EBUSY;
dev->vif_mask |= BIT(idx);
@@ -545,7 +547,7 @@ void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
- dev->coverage_class = coverage_class;
+ dev->coverage_class = max_t(s16, coverage_class, 0);
mt76x02_set_tx_ackto(dev);
mutex_unlock(&dev->mt76.mutex);
}
@@ -602,7 +604,7 @@ void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
{
struct mt76x02_dev *dev = hw->priv;
- clear_bit(MT76_SCANNING, &dev->mt76.state);
+ clear_bit(MT76_SCANNING, &dev->mphy.state);
if (dev->cal.gain_init_done) {
/* Restore AGC gain and resume calibration after scanning. */
dev->cal.low_gain = -1;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
index 7b2b187fbf47..caf089538c11 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
@@ -13,5 +13,3 @@ mt76x2e-y := \
mt76x2u-y := \
usb.o usb_init.o usb_main.o usb_mac.o usb_mcu.o \
usb_phy.o
-
-CFLAGS_pci_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index 9f91556c7f38..4a748a6f0ce2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -248,7 +248,7 @@ mt76x2_get_5g_rx_gain(struct mt76x02_dev *dev, u8 channel)
void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
int channel = chan->hw_value;
s8 lna_5g[3], lna_2g;
u8 lna;
@@ -455,7 +455,7 @@ EXPORT_SYMBOL_GPL(mt76x2_get_power_info);
int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t)
{
- enum nl80211_band band = dev->mt76.chandef.chan->band;
+ enum nl80211_band band = dev->mphy.chandef.chan->band;
u16 val, slope;
u8 bounds;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
index 4dcf6518cb0d..3755632e6494 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
@@ -53,7 +53,7 @@ mt76x2_has_ext_lna(struct mt76x02_dev *dev)
{
u32 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
- if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+ if (dev->mphy.chandef.chan->band == NL80211_BAND_2GHZ)
return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
else
return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index 79e583eb066b..a92a479aebaa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -82,7 +82,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev)
{ MT_PBF_SYS_CTRL, 0x00080c00 },
{ MT_PBF_CFG, 0x1efebcff },
{ MT_FCE_PSE_CTRL, 0x00000001 },
- { MT_MAC_SYS_CTRL, 0x0000000c },
+ { MT_MAC_SYS_CTRL, 0x00000000 },
{ MT_MAX_LEN_CFG, 0x003e3f00 },
{ MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 },
{ MT_AMPDU_MAX_LEN_20M2S, 0x000000aa },
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
index 76d8cd37d4de..9635c04ce032 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
@@ -29,7 +29,7 @@ int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
.idx = channel,
.scan = scan,
.bw = bw,
- .chainmask = cpu_to_le16(dev->mt76.chainmask),
+ .chainmask = cpu_to_le16(dev->chainmask),
};
/* first set the channel without the extension channel info */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index 41680c420cda..eca95b7f64d2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -30,7 +30,7 @@ static inline bool is_mt7612(struct mt76x02_dev *dev)
static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
return ((chan->flags & IEEE80211_CHAN_RADAR) &&
chan->dfs_state != NL80211_DFS_AVAILABLE);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 33fcec9179b2..c69579e5f647 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -239,7 +239,7 @@ static int mt76x2_init_hardware(struct mt76x02_dev *dev)
if (ret)
return ret;
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
mt76x02_mac_start(dev);
ret = mt76x2_mcu_init(dev);
@@ -289,8 +289,8 @@ int mt76x2_register_device(struct mt76x02_dev *dev)
goto fail;
mt76x02_init_debugfs(dev);
- mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
- mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+ mt76x2_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt76x2_init_txpower(dev, &dev->mphy.sband_5g.sband);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index cfe8905ce73f..105e5b99b3f9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -22,7 +22,7 @@ mt76x2_start(struct ieee80211_hw *hw)
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
MT_WATCHDOG_TIME);
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
return 0;
}
@@ -31,7 +31,7 @@ mt76x2_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
mt76x2_stop_hardware(dev);
}
@@ -45,9 +45,9 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &dev->mphy.state);
- mt76_set_channel(&dev->mt76);
+ mt76_set_channel(&dev->mphy);
mt76x2_mac_stop(dev, true);
ret = mt76x2_phy_set_channel(dev, chandef);
@@ -57,13 +57,13 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76x2_mac_resume(dev);
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
mutex_unlock(&dev->mt76.mutex);
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
return ret;
}
@@ -86,14 +86,14 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- dev->mt76.txpower_conf = hw->conf.power_level * 2;
+ dev->txpower_conf = hw->conf.power_level * 2;
/* convert to per-chain power for 2x2 devices */
- dev->mt76.txpower_conf -= 6;
+ dev->txpower_conf -= 6;
- if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) {
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
mt76x2_phy_set_txpower(dev);
- mt76x02_tx_set_txpwr_auto(dev, dev->mt76.txpower_conf);
+ mt76x02_tx_set_txpwr_auto(dev, dev->txpower_conf);
}
}
@@ -124,8 +124,8 @@ static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
mutex_lock(&dev->mt76.mutex);
- dev->mt76.chainmask = (tx_ant == 3) ? 0x202 : 0x101;
- dev->mt76.antenna_mask = tx_ant;
+ dev->chainmask = (tx_ant == 3) ? 0x202 : 0x101;
+ dev->mphy.antenna_mask = tx_ant;
mt76_set_stream_caps(&dev->mt76, true);
mt76x2_phy_set_antenna(dev);
@@ -145,6 +145,7 @@ const struct ieee80211_ops mt76x2_ops = {
.configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x02_bss_info_changed,
.sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76_sw_scan,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
index 23f35bf8d47b..8831337df23e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
@@ -12,7 +12,7 @@
static bool
mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
u32 flag = 0;
if (!mt76x2_tssi_enabled(dev))
@@ -35,7 +35,7 @@ mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
static void
mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
if (dev->cal.channel_cal_done)
@@ -74,7 +74,7 @@ void mt76x2_phy_set_antenna(struct mt76x02_dev *dev)
val = mt76_rr(dev, MT_BBP(AGC, 0));
val &= ~(BIT(4) | BIT(1));
- switch (dev->mt76.antenna_mask) {
+ switch (dev->mphy.antenna_mask) {
case 1:
/* disable mac DAC control */
mt76_clear(dev, MT_BBP(IBI, 9), BIT(11));
@@ -118,7 +118,7 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef)
{
struct ieee80211_channel *chan = chandef->chan;
- bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ bool scan = test_bit(MT76_SCANNING, &dev->mphy.state);
enum nl80211_band band = chan->band;
u8 channel;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index edbab4fa7f6e..ed2dcb05d614 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -136,8 +136,8 @@ mt76x2_get_min_rate_power(struct mt76_rate_power *r)
void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
{
- enum nl80211_chan_width width = dev->mt76.chandef.width;
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ enum nl80211_chan_width width = dev->mphy.chandef.width;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
struct mt76x2_tx_power_info txp;
int txp_0, txp_1, delta = 0;
struct mt76_rate_power t = {};
@@ -152,8 +152,8 @@ void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
mt76x2_get_rate_power(dev, &t, chan);
mt76x02_add_rate_power_offset(&t, txp.target_power + delta);
- mt76x02_limit_rate_power(&t, dev->mt76.txpower_conf);
- dev->mt76.txpower_cur = mt76x02_get_max_rate_power(&t);
+ mt76x02_limit_rate_power(&t, dev->txpower_conf);
+ dev->mphy.txpower_cur = mt76x02_get_max_rate_power(&t);
base_power = mt76x2_get_min_rate_power(&t);
delta = base_power - txp.target_power;
@@ -202,7 +202,7 @@ EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
struct mt76x2_tx_power_info txp;
struct mt76x2_tssi_comp t = {};
@@ -252,12 +252,12 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
val = 0x1836 << 16;
if (!mt76x2_has_ext_lna(dev) &&
- dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
+ dev->mphy.chandef.width >= NL80211_CHAN_WIDTH_40)
val = 0x1e42 << 16;
if (mt76x2_has_ext_lna(dev) &&
- dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ &&
- dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40)
+ dev->mphy.chandef.chan->band == NL80211_BAND_2GHZ &&
+ dev->mphy.chandef.width < NL80211_CHAN_WIDTH_40)
val = 0x0f36 << 16;
val |= 0xf8;
@@ -267,7 +267,7 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
mt76_wr(dev, MT_BBP(AGC, 9),
val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1]));
- if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR)
+ if (dev->mphy.chandef.chan->flags & IEEE80211_CHAN_RADAR)
mt76x02_phy_dfs_adjust_agc(dev);
}
@@ -280,7 +280,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
int low_gain;
u32 val;
- dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76);
+ dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76, false);
if (!dev->cal.avg_rssi_all)
dev->cal.avg_rssi_all = -75;
@@ -297,7 +297,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
return;
}
- if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) {
+ if (dev->mphy.chandef.width == NL80211_CHAN_WIDTH_80) {
mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf;
if (low_gain == 2)
@@ -315,11 +315,11 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
low_gain_delta = 14;
agc_37 = 0x2121262c;
- if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+ if (dev->mphy.chandef.chan->band == NL80211_BAND_2GHZ)
agc_35 = 0x11111516;
else if (low_gain == 2)
agc_35 = agc_37 = 0x08080808;
- else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+ else if (dev->mphy.chandef.width == NL80211_CHAN_WIDTH_80)
agc_35 = 0x10101014;
else
agc_35 = 0x11111116;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index b64ad816cc25..eafa283ca699 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -54,7 +54,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
usb_set_intfdata(intf, dev);
mt76x02u_init_mcu(mdev);
- err = mt76u_init(mdev, intf);
+ err = mt76u_init(mdev, intf, false);
if (err < 0)
goto err;
@@ -86,7 +86,7 @@ static void mt76x2u_disconnect(struct usb_interface *intf)
struct mt76x02_dev *dev = usb_get_intfdata(intf);
struct ieee80211_hw *hw = mt76_hw(dev);
- set_bit(MT76_REMOVED, &dev->mt76.state);
+ set_bit(MT76_REMOVED, &dev->mphy.state);
ieee80211_unregister_hw(hw);
mt76x2u_cleanup(dev);
mt76u_deinit(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index 2910068f4e79..ffc2deba29ac 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -190,6 +190,7 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
int mt76x2u_register_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt76_usb *usb = &dev->mt76.usb;
int err;
INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
@@ -199,6 +200,11 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
if (err < 0)
return err;
+ usb->mcu.data = devm_kmalloc(dev->mt76.dev, MCU_RESP_URB_SIZE,
+ GFP_KERNEL);
+ if (!usb->mcu.data)
+ return -ENOMEM;
+
err = mt76u_alloc_queues(&dev->mt76);
if (err < 0)
goto fail;
@@ -207,22 +213,18 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
if (err < 0)
goto fail;
+ /* check hw sg support in order to enable AMSDU */
+ hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_TX_SG_MAX_SIZE : 1;
err = mt76_register_device(&dev->mt76, true, mt76x02_rates,
ARRAY_SIZE(mt76x02_rates));
if (err)
goto fail;
- /* check hw sg support in order to enable AMSDU */
- if (dev->mt76.usb.sg_en)
- hw->max_tx_fragments = MT_TX_SG_MAX_SIZE;
- else
- hw->max_tx_fragments = 1;
-
- set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
mt76x02_init_debugfs(dev);
- mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
- mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
+ mt76x2_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt76x2_init_txpower(dev, &dev->mphy.sband_5g.sband);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
index 59cbe826188a..eaa622833f85 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
@@ -98,7 +98,7 @@ int mt76x2u_mac_stop(struct mt76x02_dev *dev)
bool stopped = false;
u32 rts_cfg;
- if (test_bit(MT76_REMOVED, &dev->mt76.state))
+ if (test_bit(MT76_REMOVED, &dev->mphy.state))
return -EIO;
rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index 9e97204841f5..bab4e6e1904e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -17,7 +17,7 @@ static int mt76x2u_start(struct ieee80211_hw *hw)
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT_MAC_WORK_INTERVAL);
- set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ set_bit(MT76_STATE_RUNNING, &dev->mphy.state);
return 0;
}
@@ -26,7 +26,7 @@ static void mt76x2u_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
mt76u_stop_tx(&dev->mt76);
mt76x2u_stop_hw(dev);
}
@@ -41,9 +41,9 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
mt76x02_pre_tbtt_enable(dev, false);
mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mt76.state);
+ set_bit(MT76_RESET, &dev->mphy.state);
- mt76_set_channel(&dev->mt76);
+ mt76_set_channel(&dev->mphy);
mt76x2_mac_stop(dev, false);
@@ -52,11 +52,11 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
mt76x02_mac_cc_reset(dev);
mt76x2_mac_resume(dev);
- clear_bit(MT76_RESET, &dev->mt76.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
mutex_unlock(&dev->mt76.mutex);
mt76x02_pre_tbtt_enable(dev, true);
- mt76_txq_schedule_all(&dev->mt76);
+ mt76_txq_schedule_all(&dev->mphy);
return err;
}
@@ -78,12 +78,12 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- dev->mt76.txpower_conf = hw->conf.power_level * 2;
+ dev->txpower_conf = hw->conf.power_level * 2;
/* convert to per-chain power for 2x2 devices */
- dev->mt76.txpower_conf -= 6;
+ dev->txpower_conf -= 6;
- if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
mt76x2_phy_set_txpower(dev);
}
@@ -105,6 +105,7 @@ const struct ieee80211_ops mt76x2u_ops = {
.add_interface = mt76x02_add_interface,
.remove_interface = mt76x02_remove_interface,
.sta_state = mt76_sta_state,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.set_key = mt76x02_set_key,
.ampdu_action = mt76x02_ampdu_action,
.config = mt76x2u_config,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
index b1381f9df992..a04a98f5ce1e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
@@ -10,7 +10,7 @@
static void
mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ struct ieee80211_channel *chan = dev->mphy.chandef.chan;
bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
if (dev->cal.channel_cal_done)
@@ -82,7 +82,7 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
};
- bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+ bool scan = test_bit(MT76_SCANNING, &dev->mphy.state);
struct ieee80211_channel *chan = chandef->chan;
u8 channel = chan->hw_value, bw, bw_index;
int ch_group_index, freq, freq1, ret;
@@ -185,7 +185,7 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
struct ieee80211_channel *chan;
u32 flag = 0;
- chan = dev->mt76.chandef.chan;
+ chan = dev->mphy.chandef.chan;
if (chan->band == NL80211_BAND_5GHZ)
flag |= BIT(0);
if (mt76x02_ext_pa_enabled(dev, chan->band))
diff --git a/drivers/net/wireless/mediatek/mt76/trace.c b/drivers/net/wireless/mediatek/mt76/trace.c
index ed3df3c8b4b3..f199fcd2a63d 100644
--- a/drivers/net/wireless/mediatek/mt76/trace.c
+++ b/drivers/net/wireless/mediatek/mt76/trace.c
@@ -9,4 +9,7 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
+EXPORT_TRACEPOINT_SYMBOL_GPL(mac_txdone);
+EXPORT_TRACEPOINT_SYMBOL_GPL(dev_irq);
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/trace.h b/drivers/net/wireless/mediatek/mt76/trace.h
index 0b3e635da868..c3d0ef8e2890 100644
--- a/drivers/net/wireless/mediatek/mt76/trace.h
+++ b/drivers/net/wireless/mediatek/mt76/trace.h
@@ -14,7 +14,7 @@
#define MAXNAME 32
#define DEV_ENTRY __array(char, wiphy_name, 32)
-#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+#define DEVICE_ASSIGN strlcpy(__entry->wiphy_name, \
wiphy_name(dev->hw->wiphy), MAXNAME)
#define DEV_PR_FMT "%s"
#define DEV_PR_ARG __entry->wiphy_name
@@ -24,6 +24,11 @@
#define REG_PR_FMT " %04x=%08x"
#define REG_PR_ARG __entry->reg, __entry->val
+#define TXID_ENTRY __field(u8, wcid) __field(u8, pktid)
+#define TXID_ASSIGN __entry->wcid = wcid; __entry->pktid = pktid
+#define TXID_PR_FMT " [%d:%d]"
+#define TXID_PR_ARG __entry->wcid, __entry->pktid
+
DECLARE_EVENT_CLASS(dev_reg_evt,
TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
TP_ARGS(dev, reg, val),
@@ -32,7 +37,7 @@ DECLARE_EVENT_CLASS(dev_reg_evt,
REG_ENTRY
),
TP_fast_assign(
- DEV_ASSIGN;
+ DEVICE_ASSIGN;
REG_ASSIGN;
),
TP_printk(
@@ -51,6 +56,51 @@ DEFINE_EVENT(dev_reg_evt, reg_wr,
TP_ARGS(dev, reg, val)
);
+TRACE_EVENT(dev_irq,
+ TP_PROTO(struct mt76_dev *dev, u32 val, u32 mask),
+
+ TP_ARGS(dev, val, mask),
+
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, val)
+ __field(u32, mask)
+ ),
+
+ TP_fast_assign(
+ DEVICE_ASSIGN;
+ __entry->val = val;
+ __entry->mask = mask;
+ ),
+
+ TP_printk(
+ DEV_PR_FMT " %08x & %08x",
+ DEV_PR_ARG, __entry->val, __entry->mask
+ )
+);
+
+DECLARE_EVENT_CLASS(dev_txid_evt,
+ TP_PROTO(struct mt76_dev *dev, u8 wcid, u8 pktid),
+ TP_ARGS(dev, wcid, pktid),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ TXID_ENTRY
+ ),
+ TP_fast_assign(
+ DEVICE_ASSIGN;
+ TXID_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT TXID_PR_FMT,
+ DEV_PR_ARG, TXID_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_txid_evt, mac_txdone,
+ TP_PROTO(struct mt76_dev *dev, u8 wcid, u8 pktid),
+ TP_ARGS(dev, wcid, pktid)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 7ee91d946882..eff522dbda34 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -109,13 +109,17 @@ void
mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
__releases(&dev->status_list.unlock)
{
+ struct ieee80211_hw *hw;
struct sk_buff *skb;
spin_unlock_bh(&dev->status_list.lock);
__release(&dev->status_list.unlock);
- while ((skb = __skb_dequeue(list)) != NULL)
- ieee80211_tx_status(dev->hw, skb);
+ while ((skb = __skb_dequeue(list)) != NULL) {
+ hw = mt76_tx_status_get_hw(dev, skb);
+ ieee80211_tx_status(hw, skb);
+ }
+
}
EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
@@ -231,10 +235,12 @@ EXPORT_SYMBOL_GPL(mt76_tx_status_check);
void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
{
+ struct ieee80211_hw *hw;
struct sk_buff_head list;
if (!skb->prev) {
- ieee80211_free_txskb(dev->hw, skb);
+ hw = mt76_tx_status_get_hw(dev, skb);
+ ieee80211_free_txskb(hw, skb);
return;
}
@@ -245,13 +251,15 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
void
-mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
{
+ struct mt76_dev *dev = phy->dev;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct mt76_queue *q;
int qid = skb_get_queue_mapping(skb);
+ bool ext_phy = phy != &dev->phy;
if (WARN_ON(qid >= MT_TXQ_PSD)) {
qid = MT_TXQ_BE;
@@ -275,6 +283,9 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
mt76_check_agg_ssn(mtxq, skb);
}
+ if (ext_phy)
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+
q = dev->q_tx[qid].q;
spin_lock_bh(&q->lock);
@@ -282,7 +293,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
dev->queue_ops->kick(dev, q);
if (q->queued > q->ndesc - 8 && !q->stopped) {
- ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+ ieee80211_stop_queue(phy->hw, skb_get_queue_mapping(skb));
q->stopped = true;
}
@@ -291,9 +302,11 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
EXPORT_SYMBOL_GPL(mt76_tx);
static struct sk_buff *
-mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
+mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq, bool ps)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+ struct ieee80211_tx_info *info;
+ bool ext_phy = phy != &phy->dev->phy;
struct sk_buff *skb;
skb = skb_dequeue(&mtxq->retry_q);
@@ -306,10 +319,14 @@ mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
return skb;
}
- skb = ieee80211_tx_dequeue(dev->hw, txq);
+ skb = ieee80211_tx_dequeue(phy->hw, txq);
if (!skb)
return NULL;
+ info = IEEE80211_SKB_CB(skb);
+ if (ext_phy)
+ info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
+
return skb;
}
@@ -335,7 +352,8 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason,
bool more_data)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
struct sk_buff *last_skb = NULL;
struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
int i;
@@ -350,7 +368,7 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
continue;
do {
- skb = mt76_txq_dequeue(dev, mtxq, true);
+ skb = mt76_txq_dequeue(phy, mtxq, true);
if (!skb)
break;
@@ -377,9 +395,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
static int
-mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
+mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_sw_queue *sq,
struct mt76_txq *mtxq)
{
+ struct mt76_dev *dev = phy->dev;
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
enum mt76_txq_id qid = mt76_txq_get_qid(txq);
struct mt76_wcid *wcid = mtxq->wcid;
@@ -395,7 +414,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
return 0;
- skb = mt76_txq_dequeue(dev, mtxq, false);
+ skb = mt76_txq_dequeue(phy, mtxq, false);
if (!skb)
return 0;
@@ -423,10 +442,10 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
if (probe)
break;
- if (test_bit(MT76_RESET, &dev->state))
+ if (test_bit(MT76_RESET, &phy->state))
return -EBUSY;
- skb = mt76_txq_dequeue(dev, mtxq, false);
+ skb = mt76_txq_dequeue(phy, mtxq, false);
if (!skb)
break;
@@ -464,8 +483,9 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
}
static int
-mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
+mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
{
+ struct mt76_dev *dev = phy->dev;
struct mt76_sw_queue *sq = &dev->q_tx[qid];
struct mt76_queue *hwq = sq->q;
struct ieee80211_txq *txq;
@@ -478,12 +498,12 @@ mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
if (sq->swq_queued >= 4)
break;
- if (test_bit(MT76_RESET, &dev->state)) {
+ if (test_bit(MT76_RESET, &phy->state)) {
ret = -EBUSY;
break;
}
- txq = ieee80211_next_txq(dev->hw, qid);
+ txq = ieee80211_next_txq(phy->hw, qid);
if (!txq)
break;
@@ -505,8 +525,8 @@ mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
spin_lock_bh(&hwq->lock);
}
- ret += mt76_txq_send_burst(dev, sq, mtxq);
- ieee80211_return_txq(dev->hw, txq,
+ ret += mt76_txq_send_burst(phy, sq, mtxq);
+ ieee80211_return_txq(phy->hw, txq,
!skb_queue_empty(&mtxq->retry_q));
}
spin_unlock_bh(&hwq->lock);
@@ -514,8 +534,9 @@ mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
return ret;
}
-void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid)
+void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
{
+ struct mt76_dev *dev = phy->dev;
struct mt76_sw_queue *sq = &dev->q_tx[qid];
int len;
@@ -528,21 +549,21 @@ void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid)
rcu_read_lock();
do {
- ieee80211_txq_schedule_start(dev->hw, qid);
- len = mt76_txq_schedule_list(dev, qid);
- ieee80211_txq_schedule_end(dev->hw, qid);
+ ieee80211_txq_schedule_start(phy->hw, qid);
+ len = mt76_txq_schedule_list(phy, qid);
+ ieee80211_txq_schedule_end(phy->hw, qid);
} while (len > 0);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
-void mt76_txq_schedule_all(struct mt76_dev *dev)
+void mt76_txq_schedule_all(struct mt76_phy *phy)
{
int i;
for (i = 0; i <= MT_TXQ_BK; i++)
- mt76_txq_schedule(dev, i);
+ mt76_txq_schedule(phy, i);
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
@@ -550,7 +571,9 @@ void mt76_tx_tasklet(unsigned long data)
{
struct mt76_dev *dev = (struct mt76_dev *)data;
- mt76_txq_schedule_all(dev);
+ mt76_txq_schedule_all(&dev->phy);
+ if (dev->phy2)
+ mt76_txq_schedule_all(dev->phy2);
}
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
@@ -578,9 +601,10 @@ EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
{
- struct mt76_dev *dev = hw->priv;
+ struct mt76_phy *phy = hw->priv;
+ struct mt76_dev *dev = phy->dev;
- if (!test_bit(MT76_STATE_RUNNING, &dev->state))
+ if (!test_bit(MT76_STATE_RUNNING, &phy->state))
return;
tasklet_schedule(&dev->tx_tasklet);
@@ -589,6 +613,7 @@ EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
{
+ struct ieee80211_hw *hw;
struct mt76_txq *mtxq;
struct sk_buff *skb;
@@ -597,8 +622,10 @@ void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
mtxq = (struct mt76_txq *)txq->drv_priv;
- while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
- ieee80211_free_txskb(dev->hw, skb);
+ while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) {
+ hw = mt76_tx_status_get_hw(dev, skb);
+ ieee80211_free_txskb(hw, skb);
+ }
}
EXPORT_SYMBOL_GPL(mt76_txq_remove);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index d6d47081e281..a981da6c35a5 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -29,13 +29,13 @@ static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
: usb_sndctrlpipe(udev, 0);
for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
- if (test_bit(MT76_REMOVED, &dev->state))
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
return -EIO;
ret = usb_control_msg(udev, pipe, req, req_type, val,
offset, buf, len, MT_VEND_REQ_TOUT_MS);
if (ret == -ENODEV)
- set_bit(MT76_REMOVED, &dev->state);
+ set_bit(MT76_REMOVED, &dev->phy.state);
if (ret >= 0 || ret == -ENODEV)
return ret;
usleep_range(5000, 10000);
@@ -62,12 +62,25 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
}
EXPORT_SYMBOL_GPL(mt76u_vendor_request);
-static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
+static u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u32 addr)
{
struct mt76_usb *usb = &dev->usb;
u32 data = ~0;
- u16 offset;
int ret;
+
+ ret = __mt76u_vendor_request(dev, req,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ addr >> 16, addr, usb->data,
+ sizeof(__le32));
+ if (ret == sizeof(__le32))
+ data = get_unaligned_le32(usb->data);
+ trace_usb_reg_rr(dev, addr, data);
+
+ return data;
+}
+
+static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
u8 req;
switch (addr & MT_VEND_TYPE_MASK) {
@@ -81,16 +94,8 @@ static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
req = MT_VEND_MULTI_READ;
break;
}
- offset = addr & ~MT_VEND_TYPE_MASK;
-
- ret = __mt76u_vendor_request(dev, req,
- USB_DIR_IN | USB_TYPE_VENDOR,
- 0, offset, &usb->reg_val, sizeof(__le32));
- if (ret == sizeof(__le32))
- data = le32_to_cpu(usb->reg_val);
- trace_usb_reg_rr(dev, addr, data);
- return data;
+ return ___mt76u_rr(dev, req, addr & ~MT_VEND_TYPE_MASK);
}
static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
@@ -104,10 +109,32 @@ static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
return ret;
}
-static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+static u32 mt76u_rr_ext(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = ___mt76u_rr(dev, MT_VEND_READ_EXT, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void ___mt76u_wr(struct mt76_dev *dev, u8 req,
+ u32 addr, u32 val)
{
struct mt76_usb *usb = &dev->usb;
- u16 offset;
+
+ put_unaligned_le32(val, usb->data);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ addr >> 16, addr, usb->data,
+ sizeof(__le32));
+ trace_usb_reg_wr(dev, addr, val);
+}
+
+static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
u8 req;
switch (addr & MT_VEND_TYPE_MASK) {
@@ -118,13 +145,7 @@ static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
req = MT_VEND_MULTI_WRITE;
break;
}
- offset = addr & ~MT_VEND_TYPE_MASK;
-
- usb->reg_val = cpu_to_le32(val);
- __mt76u_vendor_request(dev, req,
- USB_DIR_OUT | USB_TYPE_VENDOR, 0,
- offset, &usb->reg_val, sizeof(__le32));
- trace_usb_reg_wr(dev, addr, val);
+ ___mt76u_wr(dev, req, addr & ~MT_VEND_TYPE_MASK, val);
}
static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
@@ -134,6 +155,13 @@ static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
mutex_unlock(&dev->usb.usb_ctrl_mtx);
}
+static void mt76u_wr_ext(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
u32 mask, u32 val)
{
@@ -145,22 +173,94 @@ static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
return val;
}
+static u32 mt76u_rmw_ext(struct mt76_dev *dev, u32 addr,
+ u32 mask, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= ___mt76u_rr(dev, MT_VEND_READ_EXT, addr) & ~mask;
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
static void mt76u_copy(struct mt76_dev *dev, u32 offset,
const void *data, int len)
{
struct mt76_usb *usb = &dev->usb;
- const u32 *val = data;
- int i, ret;
+ const u8 *val = data;
+ int ret;
+ int current_batch_size;
+ int i = 0;
+
+ /* Assure that always a multiple of 4 bytes are copied,
+ * otherwise beacons can be corrupted.
+ * See: "mt76: round up length on mt76_wr_copy"
+ * Commit 850e8f6fbd5d0003b0
+ */
+ len = round_up(len, 4);
mutex_lock(&usb->usb_ctrl_mtx);
- for (i = 0; i < DIV_ROUND_UP(len, 4); i++) {
- put_unaligned(val[i], (u32 *)usb->data);
+ while (i < len) {
+ current_batch_size = min_t(int, usb->data_len, len - i);
+ memcpy(usb->data, val + i, current_batch_size);
ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
USB_DIR_OUT | USB_TYPE_VENDOR,
- 0, offset + i * 4, usb->data,
- sizeof(u32));
+ 0, offset + i, usb->data,
+ current_batch_size);
+ if (ret < 0)
+ break;
+
+ i += current_batch_size;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+static void mt76u_copy_ext(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int ret, i = 0, batch_len;
+ const u8 *val = data;
+
+ len = round_up(len, 4);
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ batch_len = min_t(int, usb->data_len, len - i);
+ memcpy(usb->data, val + i, batch_len);
+ ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ (offset + i) >> 16, offset + i,
+ usb->data, batch_len);
if (ret < 0)
break;
+
+ i += batch_len;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+static void
+mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset,
+ void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int i = 0, batch_len, ret;
+ u8 *val = data;
+
+ len = round_up(len, 4);
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ batch_len = min_t(int, usb->data_len, len - i);
+ ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ (offset + i) >> 16, offset + i,
+ usb->data, batch_len);
+ if (ret < 0)
+ break;
+
+ memcpy(val + i, usb->data, batch_len);
+ i += batch_len;
}
mutex_unlock(&usb->usb_ctrl_mtx);
}
@@ -200,7 +300,7 @@ static int
mt76u_wr_rp(struct mt76_dev *dev, u32 base,
const struct mt76_reg_pair *data, int n)
{
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
else
return mt76u_req_wr_rp(dev, base, data, n);
@@ -227,7 +327,7 @@ static int
mt76u_rd_rp(struct mt76_dev *dev, u32 base,
struct mt76_reg_pair *data, int n)
{
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
else
return mt76u_req_rd_rp(dev, base, data, n);
@@ -306,11 +406,12 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
}
static int
-mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
+mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
+ struct urb *urb, int nsgs, gfp_t gfp)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
- if (dev->usb.sg_en)
+ if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
urb->transfer_buffer_length = q->buf_size;
@@ -334,23 +435,25 @@ mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
usb_init_urb(e->urb);
- if (dev->usb.sg_en)
+ if (dev->usb.sg_en && sg_max_size > 0)
e->urb->sg = (struct scatterlist *)(e->urb + 1);
return 0;
}
static int
-mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
+mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_entry *e)
{
- int err;
+ enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
+ int err, sg_size;
- err = mt76u_urb_alloc(dev, e, MT_RX_SG_MAX_SIZE);
+ sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
+ err = mt76u_urb_alloc(dev, e, sg_size);
if (err)
return err;
- return mt76u_refill_rx(dev, e->urb, MT_RX_SG_MAX_SIZE,
- GFP_KERNEL);
+ return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
}
static void mt76u_urb_free(struct urb *urb)
@@ -386,10 +489,9 @@ mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
urb->context = context;
}
-static inline struct urb *
-mt76u_get_next_rx_entry(struct mt76_dev *dev)
+static struct urb *
+mt76u_get_next_rx_entry(struct mt76_queue *q)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
struct urb *urb = NULL;
unsigned long flags;
@@ -404,14 +506,17 @@ mt76u_get_next_rx_entry(struct mt76_dev *dev)
return urb;
}
-static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
+static int
+mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
+ u32 data_len)
{
u16 dma_len, min_len;
dma_len = get_unaligned_le16(data);
- min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
- MT_FCE_INFO_LEN;
+ if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
+ return dma_len;
+ min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
if (data_len < min_len || !dma_len ||
dma_len + MT_DMA_HDR_LEN > data_len ||
(dma_len & 0x3))
@@ -420,11 +525,14 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
}
static struct sk_buff *
-mt76u_build_rx_skb(void *data, int len, int buf_size)
+mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
+ int len, int buf_size)
{
+ int head_room, drv_flags = dev->drv->drv_flags;
struct sk_buff *skb;
- if (SKB_WITH_OVERHEAD(buf_size) < MT_DMA_HDR_LEN + len) {
+ head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
+ if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
struct page *page;
/* slow path, not enough space for data and
@@ -434,8 +542,8 @@ mt76u_build_rx_skb(void *data, int len, int buf_size)
if (!skb)
return NULL;
- skb_put_data(skb, data + MT_DMA_HDR_LEN, MT_SKB_HEAD_LEN);
- data += (MT_DMA_HDR_LEN + MT_SKB_HEAD_LEN);
+ skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
+ data += head_room + MT_SKB_HEAD_LEN;
page = virt_to_head_page(data);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
page, data - page_address(page),
@@ -449,30 +557,31 @@ mt76u_build_rx_skb(void *data, int len, int buf_size)
if (!skb)
return NULL;
- skb_reserve(skb, MT_DMA_HDR_LEN);
+ skb_reserve(skb, head_room);
__skb_put(skb, len);
return skb;
}
static int
-mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
+mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
+ int buf_size)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
- int len, nsgs = 1;
+ int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
struct sk_buff *skb;
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
return 0;
- len = mt76u_get_rx_entry_len(data, urb->actual_length);
+ len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
if (len < 0)
return 0;
- data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
- skb = mt76u_build_rx_skb(data, data_len, q->buf_size);
+ head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
+ data_len = min_t(int, len, data_len - head_room);
+ skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
if (!skb)
return 0;
@@ -481,8 +590,8 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
data_len = min_t(int, len, urb->sg[nsgs].length);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
sg_page(&urb->sg[nsgs]),
- urb->sg[nsgs].offset,
- data_len, q->buf_size);
+ urb->sg[nsgs].offset, data_len,
+ buf_size);
len -= data_len;
nsgs++;
}
@@ -493,8 +602,8 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
static void mt76u_complete_rx(struct urb *urb)
{
- struct mt76_dev *dev = urb->context;
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
+ struct mt76_queue *q = urb->context;
unsigned long flags;
trace_rx_urb(dev, urb);
@@ -524,50 +633,69 @@ out:
}
static int
-mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb)
+mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
+ struct urb *urb)
{
- mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb,
- mt76u_complete_rx, dev);
+ int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
+
+ mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
+ mt76u_complete_rx, &dev->q_rx[qid]);
trace_submit_urb(dev, urb);
return usb_submit_urb(urb, GFP_ATOMIC);
}
-static void mt76u_rx_tasklet(unsigned long data)
+static void
+mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- struct mt76_dev *dev = (struct mt76_dev *)data;
+ int qid = q - &dev->q_rx[MT_RXQ_MAIN];
struct urb *urb;
int err, count;
- rcu_read_lock();
-
while (true) {
- urb = mt76u_get_next_rx_entry(dev);
+ urb = mt76u_get_next_rx_entry(q);
if (!urb)
break;
- count = mt76u_process_rx_entry(dev, urb);
+ count = mt76u_process_rx_entry(dev, urb, q->buf_size);
if (count > 0) {
- err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC);
+ err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
if (err < 0)
break;
}
- mt76u_submit_rx_buf(dev, urb);
+ mt76u_submit_rx_buf(dev, qid, urb);
}
- mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+ if (qid == MT_RXQ_MAIN)
+ mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+}
+
+static void mt76u_rx_tasklet(unsigned long data)
+{
+ struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76_queue *q;
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < __MT_RXQ_MAX; i++) {
+ q = &dev->q_rx[i];
+ if (!q->ndesc)
+ continue;
+ mt76u_process_rx_queue(dev, q);
+ }
rcu_read_unlock();
}
-static int mt76u_submit_rx_buffers(struct mt76_dev *dev)
+static int
+mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct mt76_queue *q = &dev->q_rx[qid];
unsigned long flags;
int i, err = 0;
spin_lock_irqsave(&q->lock, flags);
for (i = 0; i < q->ndesc; i++) {
- err = mt76u_submit_rx_buf(dev, q->entry[i].urb);
+ err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
if (err < 0)
break;
}
@@ -578,16 +706,12 @@ static int mt76u_submit_rx_buffers(struct mt76_dev *dev)
return err;
}
-static int mt76u_alloc_rx(struct mt76_dev *dev)
+static int
+mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
{
- struct mt76_usb *usb = &dev->usb;
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct mt76_queue *q = &dev->q_rx[qid];
int i, err;
- usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL);
- if (!usb->mcu.data)
- return -ENOMEM;
-
spin_lock_init(&q->lock);
q->entry = devm_kcalloc(dev->dev,
MT_NUM_RX_ENTRIES, sizeof(*q->entry),
@@ -599,17 +723,23 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
q->buf_size = PAGE_SIZE;
for (i = 0; i < q->ndesc; i++) {
- err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
+ err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
if (err < 0)
return err;
}
- return mt76u_submit_rx_buffers(dev);
+ return mt76u_submit_rx_buffers(dev, qid);
}
-static void mt76u_free_rx(struct mt76_dev *dev)
+int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
+{
+ return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
+}
+EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
+
+static void
+mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
struct page *page;
int i;
@@ -624,13 +754,33 @@ static void mt76u_free_rx(struct mt76_dev *dev)
memset(&q->rx_page, 0, sizeof(q->rx_page));
}
-void mt76u_stop_rx(struct mt76_dev *dev)
+static void mt76u_free_rx(struct mt76_dev *dev)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct mt76_queue *q;
int i;
- for (i = 0; i < q->ndesc; i++)
- usb_poison_urb(q->entry[i].urb);
+ for (i = 0; i < __MT_RXQ_MAX; i++) {
+ q = &dev->q_rx[i];
+ if (!q->ndesc)
+ continue;
+
+ mt76u_free_rx_queue(dev, q);
+ }
+}
+
+void mt76u_stop_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q;
+ int i, j;
+
+ for (i = 0; i < __MT_RXQ_MAX; i++) {
+ q = &dev->q_rx[i];
+ if (!q->ndesc)
+ continue;
+
+ for (j = 0; j < q->ndesc; j++)
+ usb_poison_urb(q->entry[j].urb);
+ }
tasklet_kill(&dev->usb.rx_tasklet);
}
@@ -638,13 +788,24 @@ EXPORT_SYMBOL_GPL(mt76u_stop_rx);
int mt76u_resume_rx(struct mt76_dev *dev)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- int i;
+ struct mt76_queue *q;
+ int i, j, err;
- for (i = 0; i < q->ndesc; i++)
- usb_unpoison_urb(q->entry[i].urb);
+ for (i = 0; i < __MT_RXQ_MAX; i++) {
+ q = &dev->q_rx[i];
+
+ if (!q->ndesc)
+ continue;
- return mt76u_submit_rx_buffers(dev);
+ for (j = 0; j < q->ndesc; j++)
+ usb_unpoison_urb(q->entry[j].urb);
+
+ err = mt76u_submit_rx_buffers(dev, i);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mt76u_resume_rx);
@@ -694,10 +855,11 @@ static void mt76u_tx_tasklet(unsigned long data)
spin_unlock_bh(&q->lock);
- mt76_txq_schedule(dev, i);
+ mt76_txq_schedule(&dev->phy, i);
- if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
- queue_work(dev->usb.stat_wq, &dev->usb.stat_work);
+ if (dev->drv->tx_status_data &&
+ !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
+ queue_work(dev->usb.wq, &dev->usb.stat_work);
if (wake)
ieee80211_wake_queue(dev->hw, i);
}
@@ -714,7 +876,7 @@ static void mt76u_tx_status_data(struct work_struct *work)
dev = container_of(usb, struct mt76_dev, usb);
while (true) {
- if (test_bit(MT76_REMOVED, &dev->state))
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
break;
if (!dev->drv->tx_status_data(dev, &update))
@@ -722,10 +884,10 @@ static void mt76u_tx_status_data(struct work_struct *work)
count++;
}
- if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
- queue_work(usb->stat_wq, &usb->stat_work);
+ if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
+ queue_work(usb->wq, &usb->stat_work);
else
- clear_bit(MT76_READING_STATS, &dev->state);
+ clear_bit(MT76_READING_STATS, &dev->phy.state);
}
static void mt76u_complete_tx(struct urb *urb)
@@ -759,6 +921,35 @@ mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
return urb->num_sgs;
}
+int mt76u_skb_dma_info(struct sk_buff *skb, u32 info)
+{
+ struct sk_buff *iter, *last = skb;
+ u32 pad;
+
+ put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+ /* Add zero pad of 4 - 7 bytes */
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+
+ /* First packet of a A-MSDU burst keeps track of the whole burst
+ * length, need to update length of it and the last packet.
+ */
+ skb_walk_frags(skb, iter) {
+ last = iter;
+ if (!iter->next) {
+ skb->data_len += pad;
+ skb->len += pad;
+ break;
+ }
+ }
+
+ if (skb_pad(last, pad))
+ return -ENOMEM;
+ __skb_put(last, pad);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
+
static int
mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
@@ -806,7 +997,7 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
if (err == -ENODEV)
- set_bit(MT76_REMOVED, &dev->state);
+ set_bit(MT76_REMOVED, &dev->phy.state);
else
dev_err(dev->dev, "tx urb submit failed:%d\n",
err);
@@ -816,6 +1007,14 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
}
}
+static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
+{
+ if (mt76_chip(dev) == 0x7663)
+ return ac ^ 0x3;
+
+ return mt76_ac_to_hwq(ac);
+}
+
static int mt76u_alloc_tx(struct mt76_dev *dev)
{
struct mt76_queue *q;
@@ -834,7 +1033,7 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
return -ENOMEM;
spin_lock_init(&q->lock);
- q->hw_idx = mt76_ac_to_hwq(i);
+ q->hw_idx = mt76u_ac_to_hwq(dev, i);
dev->q_tx[i].q = q;
q->entry = devm_kcalloc(dev->dev,
@@ -872,7 +1071,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
struct mt76_queue *q;
int i, j, ret;
- ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev),
+ ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
HZ / 5);
if (!ret) {
dev_err(dev->dev, "timed out waiting for pending tx\n");
@@ -905,7 +1104,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
}
cancel_work_sync(&dev->usb.stat_work);
- clear_bit(MT76_READING_STATS, &dev->state);
+ clear_bit(MT76_READING_STATS, &dev->phy.state);
mt76_tx_status_check(dev, NULL, true);
}
@@ -925,7 +1124,7 @@ int mt76u_alloc_queues(struct mt76_dev *dev)
{
int err;
- err = mt76u_alloc_rx(dev);
+ err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
if (err < 0)
return err;
@@ -938,31 +1137,48 @@ static const struct mt76_queue_ops usb_queue_ops = {
.kick = mt76u_tx_kick,
};
+void mt76u_deinit(struct mt76_dev *dev)
+{
+ if (dev->usb.wq) {
+ destroy_workqueue(dev->usb.wq);
+ dev->usb.wq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76u_deinit);
+
int mt76u_init(struct mt76_dev *dev,
- struct usb_interface *intf)
+ struct usb_interface *intf, bool ext)
{
- static const struct mt76_bus_ops mt76u_ops = {
- .rr = mt76u_rr,
- .wr = mt76u_wr,
- .rmw = mt76u_rmw,
- .write_copy = mt76u_copy,
+ static struct mt76_bus_ops mt76u_ops = {
+ .read_copy = mt76u_read_copy_ext,
.wr_rp = mt76u_wr_rp,
.rd_rp = mt76u_rd_rp,
.type = MT76_BUS_USB,
};
struct usb_device *udev = interface_to_usbdev(intf);
struct mt76_usb *usb = &dev->usb;
+ int err = -ENOMEM;
+
+ mt76u_ops.rr = ext ? mt76u_rr_ext : mt76u_rr;
+ mt76u_ops.wr = ext ? mt76u_wr_ext : mt76u_wr;
+ mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw;
+ mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy;
tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
- skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
- usb->stat_wq = alloc_workqueue("mt76u", WQ_UNBOUND, 0);
- if (!usb->stat_wq)
+ usb->wq = alloc_workqueue("mt76u", WQ_UNBOUND, 0);
+ if (!usb->wq)
return -ENOMEM;
- mutex_init(&usb->mcu.mutex);
+ usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
+ if (usb->data_len < 32)
+ usb->data_len = 32;
+
+ usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
+ if (!usb->data)
+ goto error;
mutex_init(&usb->usb_ctrl_mtx);
dev->bus = &mt76u_ops;
@@ -972,18 +1188,17 @@ int mt76u_init(struct mt76_dev *dev,
usb->sg_en = mt76u_check_sg(dev);
- return mt76u_set_endpoints(intf, usb);
-}
-EXPORT_SYMBOL_GPL(mt76u_init);
+ err = mt76u_set_endpoints(intf, usb);
+ if (err < 0)
+ goto error;
-void mt76u_deinit(struct mt76_dev *dev)
-{
- if (dev->usb.stat_wq) {
- destroy_workqueue(dev->usb.stat_wq);
- dev->usb.stat_wq = NULL;
- }
+ return 0;
+
+error:
+ mt76u_deinit(dev);
+ return err;
}
-EXPORT_SYMBOL_GPL(mt76u_deinit);
+EXPORT_SYMBOL_GPL(mt76u_init);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index 23d1e1da78b2..8c60c450125a 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -64,7 +64,7 @@ int mt76_wcid_alloc(unsigned long *mask, int size)
}
EXPORT_SYMBOL_GPL(mt76_wcid_alloc);
-int mt76_get_min_avg_rssi(struct mt76_dev *dev)
+int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy)
{
struct mt76_wcid *wcid;
int i, j, min_rssi = 0;
@@ -75,14 +75,18 @@ int mt76_get_min_avg_rssi(struct mt76_dev *dev)
for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
unsigned long mask = dev->wcid_mask[i];
+ unsigned long phy_mask = dev->wcid_phy_mask[i];
if (!mask)
continue;
- for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
+ for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1, phy_mask >>= 1) {
if (!(mask & 1))
continue;
+ if (!!(phy_mask & 1) != ext_phy)
+ continue;
+
wcid = rcu_dereference(dev->wcid[j]);
if (!wcid)
continue;
diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h
index fe3479c8e561..48a71e7479e5 100644
--- a/drivers/net/wireless/mediatek/mt76/util.h
+++ b/drivers/net/wireless/mediatek/mt76/util.h
@@ -16,8 +16,20 @@
int mt76_wcid_alloc(unsigned long *mask, int size);
+static inline bool
+mt76_wcid_mask_test(unsigned long *mask, int idx)
+{
+ return mask[idx / BITS_PER_LONG] & BIT(idx % BITS_PER_LONG);
+}
+
+static inline void
+mt76_wcid_mask_set(unsigned long *mask, int idx)
+{
+ mask[idx / BITS_PER_LONG] |= BIT(idx % BITS_PER_LONG);
+}
+
static inline void
-mt76_wcid_free(unsigned long *mask, int idx)
+mt76_wcid_mask_clear(unsigned long *mask, int idx)
{
mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 8849faa5bc10..8be17106008d 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -60,7 +60,8 @@ qtnf_mgmt_stypes[NUM_NL80211_IFTYPES] = {
BIT(IEEE80211_STYPE_AUTH >> 4),
},
[NL80211_IFTYPE_AP] = {
- .tx = BIT(IEEE80211_STYPE_ACTION >> 4),
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
@@ -101,6 +102,21 @@ qtnf_validate_iface_combinations(struct wiphy *wiphy,
ret = cfg80211_check_combinations(wiphy, &params);
+ if (ret)
+ return ret;
+
+ /* Check repeater interface combination: primary VIF should be STA only.
+ * STA (primary) + AP (secondary) is OK.
+ * AP (primary) + STA (secondary) is not supported.
+ */
+ vif = qtnf_mac_get_base_vif(mac);
+ if (vif && vif->wdev.iftype == NL80211_IFTYPE_AP &&
+ vif != change_vif && new_type == NL80211_IFTYPE_STATION) {
+ ret = -EINVAL;
+ pr_err("MAC%u invalid combination: AP as primary repeater interface is not supported\n",
+ mac->macid);
+ }
+
return ret;
}
@@ -248,7 +264,7 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
goto error_del_vif;
}
- if (mac->bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) {
+ if (qtnf_hwcap_is_set(&mac->bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE)) {
ret = qtnf_cmd_netdev_changeupper(vif, vif->netdev->ifindex);
if (ret) {
unregister_netdevice(vif->netdev);
@@ -679,10 +695,8 @@ qtnf_external_auth(struct wiphy *wiphy, struct net_device *dev,
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
- if (vif->wdev.iftype != NL80211_IFTYPE_STATION)
- return -EOPNOTSUPP;
-
- if (!ether_addr_equal(vif->bssid, auth->bssid))
+ if (vif->wdev.iftype == NL80211_IFTYPE_STATION &&
+ !ether_addr_equal(vif->bssid, auth->bssid))
pr_warn("unexpected bssid: %pM", auth->bssid);
ret = qtnf_cmd_send_external_auth(vif, auth);
@@ -739,7 +753,6 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_supported_band *sband;
const struct cfg80211_chan_def *chandef = &wdev->chandef;
struct ieee80211_channel *chan;
- struct qtnf_chan_stats stats;
int ret;
sband = wiphy->bands[NL80211_BAND_2GHZ];
@@ -755,49 +768,16 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
return -ENOENT;
chan = &sband->channels[idx];
- memset(&stats, 0, sizeof(stats));
-
survey->channel = chan;
survey->filled = 0x0;
- if (chandef->chan) {
- if (chan->hw_value == chandef->chan->hw_value)
- survey->filled = SURVEY_INFO_IN_USE;
- }
-
- ret = qtnf_cmd_get_chan_stats(mac, chan->hw_value, &stats);
- switch (ret) {
- case 0:
- if (unlikely(stats.chan_num != chan->hw_value)) {
- pr_err("received stats for channel %d instead of %d\n",
- stats.chan_num, chan->hw_value);
- ret = -EINVAL;
- break;
- }
+ if (chan == chandef->chan)
+ survey->filled = SURVEY_INFO_IN_USE;
- survey->filled |= SURVEY_INFO_TIME |
- SURVEY_INFO_TIME_SCAN |
- SURVEY_INFO_TIME_BUSY |
- SURVEY_INFO_TIME_RX |
- SURVEY_INFO_TIME_TX |
- SURVEY_INFO_NOISE_DBM;
-
- survey->time_scan = stats.cca_try;
- survey->time = stats.cca_try;
- survey->time_tx = stats.cca_tx;
- survey->time_rx = stats.cca_rx;
- survey->time_busy = stats.cca_busy;
- survey->noise = stats.chan_noise;
- break;
- case -ENOENT:
- pr_debug("no stats for channel %u\n", chan->hw_value);
- ret = 0;
- break;
- default:
+ ret = qtnf_cmd_get_chan_stats(mac, chan->center_freq, survey);
+ if (ret)
pr_debug("failed to get chan(%d) stats from card\n",
chan->hw_value);
- break;
- }
return ret;
}
@@ -943,6 +923,26 @@ static int qtnf_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
return ret;
}
+static int qtnf_update_owe_info(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_update_owe_info *owe_info)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+ int ret;
+
+ if (vif->wdev.iftype != NL80211_IFTYPE_AP)
+ return -EOPNOTSUPP;
+
+ ret = qtnf_cmd_send_update_owe(vif, owe_info);
+ if (ret) {
+ pr_err("VIF%u.%u: failed to update owe info\n",
+ vif->mac->macid, vif->vifid);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
#ifdef CONFIG_PM
static int qtnf_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan)
{
@@ -1039,6 +1039,7 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
.set_power_mgmt = qtnf_set_power_mgmt,
.get_tx_power = qtnf_get_tx_power,
.set_tx_power = qtnf_set_tx_power,
+ .update_owe_info = qtnf_update_owe_info,
#ifdef CONFIG_PM
.suspend = qtnf_suspend,
.resume = qtnf_resume,
@@ -1075,22 +1076,26 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy,
}
}
-struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
+struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus,
+ struct platform_device *pdev)
{
struct wiphy *wiphy;
if (qtnf_dfs_offload_get() &&
- (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD))
+ qtnf_hwcap_is_set(&bus->hw_info, QLINK_HW_CAPAB_DFS_OFFLOAD))
qtn_cfg80211_ops.start_radar_detection = NULL;
- if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_PWR_MGMT))
+ if (!qtnf_hwcap_is_set(&bus->hw_info, QLINK_HW_CAPAB_PWR_MGMT))
qtn_cfg80211_ops.set_power_mgmt = NULL;
wiphy = wiphy_new(&qtn_cfg80211_ops, sizeof(struct qtnf_wmac));
if (!wiphy)
return NULL;
- set_wiphy_dev(wiphy, bus->dev);
+ if (pdev)
+ set_wiphy_dev(wiphy, &pdev->dev);
+ else
+ set_wiphy_dev(wiphy, bus->dev);
return wiphy;
}
@@ -1142,7 +1147,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->coverage_class = macinfo->coverage_class;
wiphy->max_scan_ssids =
- (hw_info->max_scan_ssids) ? hw_info->max_scan_ssids : 1;
+ (macinfo->max_scan_ssids) ? macinfo->max_scan_ssids : 1;
wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN;
wiphy->mgmt_stypes = qtnf_mgmt_stypes;
wiphy->max_remain_on_channel_duration = 5000;
@@ -1166,10 +1171,10 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
if (qtnf_dfs_offload_get() &&
- (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD))
+ qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_DFS_OFFLOAD))
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
- if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_DWELL)
+ if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_SCAN_DWELL))
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_SET_SCAN_DWELL);
@@ -1185,16 +1190,16 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
ether_addr_copy(wiphy->perm_addr, mac->macaddr);
- if (hw_info->hw_capab & QLINK_HW_CAPAB_STA_INACT_TIMEOUT)
+ if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_STA_INACT_TIMEOUT))
wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
- if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR)
+ if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR))
wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
- if (!(hw_info->hw_capab & QLINK_HW_CAPAB_OBSS_SCAN))
+ if (!qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_OBSS_SCAN))
wiphy->features |= NL80211_FEATURE_NEED_OBSS_SCAN;
- if (hw_info->hw_capab & QLINK_HW_CAPAB_SAE)
+ if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_SAE))
wiphy->features |= NL80211_FEATURE_SAE;
#ifdef CONFIG_PM
@@ -1205,7 +1210,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
regdomain_is_known = isalpha(mac->rd->alpha2[0]) &&
isalpha(mac->rd->alpha2[1]);
- if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) {
+ if (qtnf_hwcap_is_set(hw_info, QLINK_HW_CAPAB_REG_UPDATE)) {
wiphy->reg_notifier = qtnf_cfg80211_reg_notifier;
if (mac->rd->alpha2[0] == '9' && mac->rd->alpha2[1] == '9') {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index d0d7ec8794c4..f40d8c3c3d9e 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -11,11 +11,11 @@
#include "bus.h"
#include "commands.h"
+/* Let device itself to select best values for current conditions */
#define QTNF_SCAN_TIME_AUTO 0
-/* Let device itself to select best values for current conditions */
-#define QTNF_SCAN_DWELL_ACTIVE_DEFAULT QTNF_SCAN_TIME_AUTO
-#define QTNF_SCAN_DWELL_PASSIVE_DEFAULT QTNF_SCAN_TIME_AUTO
+#define QTNF_SCAN_DWELL_ACTIVE_DEFAULT 90
+#define QTNF_SCAN_DWELL_PASSIVE_DEFAULT 100
#define QTNF_SCAN_SAMPLE_DURATION_DEFAULT QTNF_SCAN_TIME_AUTO
static int qtnf_cmd_check_reply_header(const struct qlink_resp *resp,
@@ -175,7 +175,8 @@ static void qtnf_cmd_tlv_ie_set_add(struct sk_buff *cmd_skb, u8 frame_type,
{
struct qlink_tlv_ie_set *tlv;
- tlv = (struct qlink_tlv_ie_set *)skb_put(cmd_skb, sizeof(*tlv) + len);
+ tlv = (struct qlink_tlv_ie_set *)skb_put(cmd_skb, sizeof(*tlv) +
+ round_up(len, QLINK_ALIGN));
tlv->hdr.type = cpu_to_le16(QTN_TLV_ID_IE_SET);
tlv->hdr.len = cpu_to_le16(len + sizeof(*tlv) - sizeof(tlv->hdr));
tlv->type = frame_type;
@@ -190,20 +191,24 @@ static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
{
unsigned int len = sizeof(struct qlink_cmd_start_ap);
- len += s->ssid_len;
- len += s->beacon.head_len;
- len += s->beacon.tail_len;
- len += s->beacon.beacon_ies_len;
- len += s->beacon.proberesp_ies_len;
- len += s->beacon.assocresp_ies_len;
- len += s->beacon.probe_resp_len;
+ len += round_up(s->ssid_len, QLINK_ALIGN);
+ len += round_up(s->beacon.head_len, QLINK_ALIGN);
+ len += round_up(s->beacon.tail_len, QLINK_ALIGN);
+ len += round_up(s->beacon.beacon_ies_len, QLINK_ALIGN);
+ len += round_up(s->beacon.proberesp_ies_len, QLINK_ALIGN);
+ len += round_up(s->beacon.assocresp_ies_len, QLINK_ALIGN);
+ len += round_up(s->beacon.probe_resp_len, QLINK_ALIGN);
if (cfg80211_chandef_valid(&s->chandef))
len += sizeof(struct qlink_tlv_chandef);
- if (s->acl)
+ if (s->acl) {
+ unsigned int acl_len = struct_size(s->acl, mac_addrs,
+ s->acl->n_acl_entries);
+
len += sizeof(struct qlink_tlv_hdr) +
- struct_size(s->acl, mac_addrs, s->acl->n_acl_entries);
+ round_up(acl_len, QLINK_ALIGN);
+ }
if (len > (sizeof(struct qlink_cmd) + QTNF_MAX_CMD_BUF_SIZE)) {
pr_err("VIF%u.%u: can not fit AP settings: %u\n",
@@ -315,7 +320,8 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
if (s->ht_cap) {
struct qlink_tlv_hdr *tlv = (struct qlink_tlv_hdr *)
- skb_put(cmd_skb, sizeof(*tlv) + sizeof(*s->ht_cap));
+ skb_put(cmd_skb, sizeof(*tlv) +
+ round_up(sizeof(*s->ht_cap), QLINK_ALIGN));
tlv->type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
tlv->len = cpu_to_le16(sizeof(*s->ht_cap));
@@ -339,7 +345,8 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
size_t acl_size = struct_size(s->acl, mac_addrs,
s->acl->n_acl_entries);
struct qlink_tlv_hdr *tlv =
- skb_put(cmd_skb, sizeof(*tlv) + acl_size);
+ skb_put(cmd_skb,
+ sizeof(*tlv) + round_up(acl_size, QLINK_ALIGN));
tlv->type = cpu_to_le16(QTN_TLV_ID_ACL_DATA);
tlv->len = cpu_to_le16(acl_size);
@@ -581,10 +588,10 @@ qtnf_sta_info_parse_flags(struct nl80211_sta_flag_update *dst,
}
static void
-qtnf_cmd_sta_info_parse(struct station_info *sinfo,
- const struct qlink_tlv_hdr *tlv,
+qtnf_cmd_sta_info_parse(struct station_info *sinfo, const u8 *data,
size_t resp_size)
{
+ const struct qlink_tlv_hdr *tlv;
const struct qlink_sta_stats *stats = NULL;
const u8 *map = NULL;
unsigned int map_len = 0;
@@ -595,11 +602,11 @@ qtnf_cmd_sta_info_parse(struct station_info *sinfo,
(qtnf_utils_is_bit_set(map, bitn, map_len) && \
(offsetofend(struct qlink_sta_stats, stat_name) <= stats_len))
- while (resp_size >= sizeof(*tlv)) {
+ qlink_for_each_tlv(tlv, data, resp_size) {
tlv_len = le16_to_cpu(tlv->len);
switch (le16_to_cpu(tlv->type)) {
- case QTN_TLV_ID_STA_STATS_MAP:
+ case QTN_TLV_ID_BITMAP:
map_len = tlv_len;
map = tlv->val;
break;
@@ -610,9 +617,11 @@ qtnf_cmd_sta_info_parse(struct station_info *sinfo,
default:
break;
}
+ }
- resp_size -= tlv_len + sizeof(*tlv);
- tlv = (const struct qlink_tlv_hdr *)(tlv->val + tlv_len);
+ if (!qlink_tlv_parsing_ok(tlv, data, resp_size)) {
+ pr_err("Malformed TLV buffer\n");
+ return;
}
if (!map || !stats)
@@ -736,9 +745,7 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
goto out;
}
- qtnf_cmd_sta_info_parse(sinfo,
- (const struct qlink_tlv_hdr *)resp->info,
- var_resp_len);
+ qtnf_cmd_sta_info_parse(sinfo, resp->info, var_resp_len);
out:
qtnf_bus_unlock(vif->mac->bus);
@@ -895,31 +902,21 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
const char *uboot_ver = NULL;
u32 hw_ver = 0;
u16 tlv_type;
- u16 tlv_value_len;
+ u16 tlv_len;
hwinfo->num_mac = resp->num_mac;
hwinfo->mac_bitmap = resp->mac_bitmap;
hwinfo->fw_ver = le32_to_cpu(resp->fw_ver);
- hwinfo->ql_proto_ver = le16_to_cpu(resp->ql_proto_ver);
hwinfo->total_tx_chain = resp->total_tx_chain;
hwinfo->total_rx_chain = resp->total_rx_chain;
- hwinfo->hw_capab = le32_to_cpu(resp->hw_capab);
bld_tmstamp = le32_to_cpu(resp->bld_tmstamp);
plat_id = le32_to_cpu(resp->plat_id);
hw_ver = le32_to_cpu(resp->hw_ver);
- tlv = (const struct qlink_tlv_hdr *)resp->info;
-
- while (info_len >= sizeof(*tlv)) {
+ qlink_for_each_tlv(tlv, resp->info, info_len) {
tlv_type = le16_to_cpu(tlv->type);
- tlv_value_len = le16_to_cpu(tlv->len);
-
- if (tlv_value_len + sizeof(*tlv) > info_len) {
- pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
- tlv_type, tlv_value_len);
- return -EINVAL;
- }
+ tlv_len = le16_to_cpu(tlv->len);
switch (tlv_type) {
case QTN_TLV_ID_BUILD_NAME:
@@ -943,36 +940,43 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
case QTN_TLV_ID_UBOOT_VER:
uboot_ver = (const void *)tlv->val;
break;
- case QTN_TLV_ID_MAX_SCAN_SSIDS:
- hwinfo->max_scan_ssids = *tlv->val;
+ case QTN_TLV_ID_BITMAP:
+ memcpy(hwinfo->hw_capab, tlv->val,
+ min(sizeof(hwinfo->hw_capab), (size_t)tlv_len));
break;
default:
break;
}
+ }
+
+ if (!qlink_tlv_parsing_ok(tlv, resp->info, info_len)) {
+ pr_err("Malformed TLV buffer\n");
+ return -EINVAL;
+ }
- info_len -= tlv_value_len + sizeof(*tlv);
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
- }
-
- pr_info("fw_version=%d, MACs map %#x, chains Tx=%u Rx=%u, capab=0x%x\n",
- hwinfo->fw_ver, hwinfo->mac_bitmap,
- hwinfo->total_tx_chain, hwinfo->total_rx_chain,
- hwinfo->hw_capab);
-
- pr_info("\nBuild name: %s" \
- "\nBuild revision: %s" \
- "\nBuild type: %s" \
- "\nBuild label: %s" \
- "\nBuild timestamp: %lu" \
- "\nPlatform ID: %lu" \
- "\nHardware ID: %s" \
- "\nCalibration version: %s" \
- "\nU-Boot version: %s" \
- "\nHardware version: 0x%08x\n",
+ pr_info("\nBuild name: %s\n"
+ "Build revision: %s\n"
+ "Build type: %s\n"
+ "Build label: %s\n"
+ "Build timestamp: %lu\n"
+ "Platform ID: %lu\n"
+ "Hardware ID: %s\n"
+ "Calibration version: %s\n"
+ "U-Boot version: %s\n"
+ "Hardware version: 0x%08x\n"
+ "Qlink ver: %u.%u\n"
+ "MACs map: %#x\n"
+ "Chains Rx-Tx: %ux%u\n"
+ "FW version: 0x%x\n",
bld_name, bld_rev, bld_type, bld_label,
(unsigned long)bld_tmstamp,
(unsigned long)plat_id,
- hw_id, calibration_ver, uboot_ver, hw_ver);
+ hw_id, calibration_ver, uboot_ver, hw_ver,
+ QLINK_VER_MAJOR(bus->hw_info.ql_proto_ver),
+ QLINK_VER_MINOR(bus->hw_info.ql_proto_ver),
+ hwinfo->mac_bitmap,
+ hwinfo->total_rx_chain, hwinfo->total_tx_chain,
+ hwinfo->fw_ver);
strlcpy(hwinfo->fw_version, bld_label, sizeof(hwinfo->fw_version));
hwinfo->hw_version = hw_ver;
@@ -1016,18 +1020,15 @@ qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
const struct qlink_resp_get_mac_info *resp,
size_t tlv_buf_size)
{
- const u8 *tlv_buf = resp->var_info;
- struct ieee80211_iface_combination *comb = NULL;
+ struct ieee80211_iface_combination *comb = mac->macinfo.if_comb;
size_t n_comb = 0;
struct ieee80211_iface_limit *limits;
- const struct qlink_iface_comb_num *comb_num;
const struct qlink_iface_limit_record *rec;
const struct qlink_iface_limit *lim;
const struct qlink_wowlan_capab_data *wowlan;
u16 rec_len;
u16 tlv_type;
u16 tlv_value_len;
- size_t tlv_full_len;
const struct qlink_tlv_hdr *tlv;
u8 *ext_capa = NULL;
u8 *ext_capa_mask = NULL;
@@ -1066,44 +1067,11 @@ qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
break;
}
- tlv = (const struct qlink_tlv_hdr *)tlv_buf;
- while (tlv_buf_size >= sizeof(struct qlink_tlv_hdr)) {
+ qlink_for_each_tlv(tlv, resp->var_info, tlv_buf_size) {
tlv_type = le16_to_cpu(tlv->type);
tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
- if (tlv_full_len > tlv_buf_size) {
- pr_warn("MAC%u: malformed TLV 0x%.2X; LEN: %u\n",
- mac->macid, tlv_type, tlv_value_len);
- return -EINVAL;
- }
switch (tlv_type) {
- case QTN_TLV_ID_NUM_IFACE_COMB:
- if (tlv_value_len != sizeof(*comb_num))
- return -EINVAL;
-
- comb_num = (void *)tlv->val;
-
- /* free earlier iface comb memory */
- qtnf_mac_iface_comb_free(mac);
-
- mac->macinfo.n_if_comb =
- le32_to_cpu(comb_num->iface_comb_num);
-
- mac->macinfo.if_comb =
- kcalloc(mac->macinfo.n_if_comb,
- sizeof(*mac->macinfo.if_comb),
- GFP_KERNEL);
-
- if (!mac->macinfo.if_comb)
- return -ENOMEM;
-
- comb = mac->macinfo.if_comb;
-
- pr_debug("MAC%u: %zu iface combinations\n",
- mac->macid, mac->macinfo.n_if_comb);
-
- break;
case QTN_TLV_ID_IFACE_LIMIT:
if (unlikely(!comb)) {
pr_warn("MAC%u: no combinations advertised\n",
@@ -1207,14 +1175,10 @@ qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
mac->macid, tlv_type);
break;
}
-
- tlv_buf_size -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (tlv_buf_size) {
- pr_warn("MAC%u: malformed TLV buf; bytes left: %zu\n",
- mac->macid, tlv_buf_size);
+ if (!qlink_tlv_parsing_ok(tlv, resp->var_info, tlv_buf_size)) {
+ pr_err("Malformed TLV buffer\n");
return -EINVAL;
}
@@ -1260,13 +1224,15 @@ qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
return 0;
}
-static void
+static int
qtnf_cmd_resp_proc_mac_info(struct qtnf_wmac *mac,
const struct qlink_resp_get_mac_info *resp_info)
{
struct qtnf_mac_info *mac_info;
struct qtnf_vif *vif;
+ qtnf_mac_iface_comb_free(mac);
+
mac_info = &mac->macinfo;
mac_info->bands_cap = resp_info->bands_cap;
@@ -1285,12 +1251,28 @@ qtnf_cmd_resp_proc_mac_info(struct qtnf_wmac *mac,
mac_info->radar_detect_widths =
qlink_chan_width_mask_to_nl(le16_to_cpu(
resp_info->radar_detect_widths));
- mac_info->max_acl_mac_addrs = le32_to_cpu(resp_info->max_acl_mac_addrs);
+ mac_info->max_acl_mac_addrs = le16_to_cpu(resp_info->max_acl_mac_addrs);
+ mac_info->frag_thr = le32_to_cpu(resp_info->frag_threshold);
+ mac_info->rts_thr = le32_to_cpu(resp_info->rts_threshold);
+ mac_info->sretry_limit = resp_info->retry_short;
+ mac_info->lretry_limit = resp_info->retry_long;
+ mac_info->coverage_class = resp_info->coverage_class;
+ mac_info->max_scan_ssids = resp_info->max_scan_ssids;
memcpy(&mac_info->ht_cap_mod_mask, &resp_info->ht_cap_mod_mask,
sizeof(mac_info->ht_cap_mod_mask));
memcpy(&mac_info->vht_cap_mod_mask, &resp_info->vht_cap_mod_mask,
sizeof(mac_info->vht_cap_mod_mask));
+
+ mac_info->n_if_comb = resp_info->n_iface_combinations;
+ mac_info->if_comb = kcalloc(mac->macinfo.n_if_comb,
+ sizeof(*mac->macinfo.if_comb),
+ GFP_KERNEL);
+
+ if (!mac->macinfo.if_comb)
+ return -ENOMEM;
+
+ return 0;
}
static void qtnf_cmd_resp_band_fill_htcap(const u8 *info,
@@ -1389,7 +1371,6 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
size_t payload_len)
{
u16 tlv_type;
- size_t tlv_len;
size_t tlv_dlen;
const struct qlink_tlv_hdr *tlv;
const struct qlink_channel *qchan;
@@ -1424,24 +1405,15 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
return -ENOMEM;
}
- tlv = (struct qlink_tlv_hdr *)resp->info;
-
- while (payload_len >= sizeof(*tlv)) {
+ qlink_for_each_tlv(tlv, resp->info, payload_len) {
tlv_type = le16_to_cpu(tlv->type);
tlv_dlen = le16_to_cpu(tlv->len);
- tlv_len = tlv_dlen + sizeof(*tlv);
-
- if (tlv_len > payload_len) {
- pr_warn("malformed TLV 0x%.2X; LEN: %zu\n",
- tlv_type, tlv_len);
- goto error_ret;
- }
switch (tlv_type) {
case QTN_TLV_ID_CHANNEL:
if (unlikely(tlv_dlen != sizeof(*qchan))) {
pr_err("invalid channel TLV len %zu\n",
- tlv_len);
+ tlv_dlen);
goto error_ret;
}
@@ -1544,13 +1516,10 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
pr_warn("unknown TLV type: %#x\n", tlv_type);
break;
}
-
- payload_len -= tlv_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_dlen);
}
- if (payload_len) {
- pr_err("malformed TLV buf; bytes left: %zu\n", payload_len);
+ if (!qlink_tlv_parsing_ok(tlv, resp->info, payload_len)) {
+ pr_err("Malformed TLV buffer\n");
goto error_ret;
}
@@ -1570,128 +1539,6 @@ error_ret:
return ret;
}
-static int qtnf_cmd_resp_proc_phy_params(struct qtnf_wmac *mac,
- const u8 *payload, size_t payload_len)
-{
- struct qtnf_mac_info *mac_info;
- struct qlink_tlv_frag_rts_thr *phy_thr;
- struct qlink_tlv_rlimit *limit;
- struct qlink_tlv_cclass *class;
- u16 tlv_type;
- u16 tlv_value_len;
- size_t tlv_full_len;
- const struct qlink_tlv_hdr *tlv;
-
- mac_info = &mac->macinfo;
-
- tlv = (struct qlink_tlv_hdr *)payload;
- while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
- tlv_type = le16_to_cpu(tlv->type);
- tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
-
- if (tlv_full_len > payload_len) {
- pr_warn("MAC%u: malformed TLV 0x%.2X; LEN: %u\n",
- mac->macid, tlv_type, tlv_value_len);
- return -EINVAL;
- }
-
- switch (tlv_type) {
- case QTN_TLV_ID_FRAG_THRESH:
- phy_thr = (void *)tlv;
- mac_info->frag_thr = le32_to_cpu(phy_thr->thr);
- break;
- case QTN_TLV_ID_RTS_THRESH:
- phy_thr = (void *)tlv;
- mac_info->rts_thr = le32_to_cpu(phy_thr->thr);
- break;
- case QTN_TLV_ID_SRETRY_LIMIT:
- limit = (void *)tlv;
- mac_info->sretry_limit = limit->rlimit;
- break;
- case QTN_TLV_ID_LRETRY_LIMIT:
- limit = (void *)tlv;
- mac_info->lretry_limit = limit->rlimit;
- break;
- case QTN_TLV_ID_COVERAGE_CLASS:
- class = (void *)tlv;
- mac_info->coverage_class = class->cclass;
- break;
- default:
- pr_err("MAC%u: Unknown TLV type: %#x\n", mac->macid,
- le16_to_cpu(tlv->type));
- break;
- }
-
- payload_len -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
- }
-
- if (payload_len) {
- pr_warn("MAC%u: malformed TLV buf; bytes left: %zu\n",
- mac->macid, payload_len);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int
-qtnf_cmd_resp_proc_chan_stat_info(struct qtnf_chan_stats *stats,
- const u8 *payload, size_t payload_len)
-{
- struct qlink_chan_stats *qlink_stats;
- const struct qlink_tlv_hdr *tlv;
- size_t tlv_full_len;
- u16 tlv_value_len;
- u16 tlv_type;
-
- tlv = (struct qlink_tlv_hdr *)payload;
- while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
- tlv_type = le16_to_cpu(tlv->type);
- tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
- if (tlv_full_len > payload_len) {
- pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
- tlv_type, tlv_value_len);
- return -EINVAL;
- }
- switch (tlv_type) {
- case QTN_TLV_ID_CHANNEL_STATS:
- if (unlikely(tlv_value_len != sizeof(*qlink_stats))) {
- pr_err("invalid CHANNEL_STATS entry size\n");
- return -EINVAL;
- }
-
- qlink_stats = (void *)tlv->val;
-
- stats->chan_num = le32_to_cpu(qlink_stats->chan_num);
- stats->cca_tx = le32_to_cpu(qlink_stats->cca_tx);
- stats->cca_rx = le32_to_cpu(qlink_stats->cca_rx);
- stats->cca_busy = le32_to_cpu(qlink_stats->cca_busy);
- stats->cca_try = le32_to_cpu(qlink_stats->cca_try);
- stats->chan_noise = qlink_stats->chan_noise;
-
- pr_debug("chan(%u) try(%u) busy(%u) noise(%d)\n",
- stats->chan_num, stats->cca_try,
- stats->cca_busy, stats->chan_noise);
- break;
- default:
- pr_warn("Unknown TLV type: %#x\n",
- le16_to_cpu(tlv->type));
- }
- payload_len -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
- }
-
- if (payload_len) {
- pr_warn("malformed TLV buf; bytes left: %zu\n", payload_len);
- return -EINVAL;
- }
-
- return 0;
-}
-
int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
{
struct sk_buff *cmd_skb, *resp_skb = NULL;
@@ -1712,7 +1559,10 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
goto out;
resp = (const struct qlink_resp_get_mac_info *)resp_skb->data;
- qtnf_cmd_resp_proc_mac_info(mac, resp);
+ ret = qtnf_cmd_resp_proc_mac_info(mac, resp);
+ if (ret)
+ goto out;
+
ret = qtnf_parse_variable_mac_info(mac, resp, var_data_len);
out:
@@ -1793,35 +1643,6 @@ out:
return ret;
}
-int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac)
-{
- struct sk_buff *cmd_skb, *resp_skb = NULL;
- struct qlink_resp_phy_params *resp;
- size_t response_size = 0;
- int ret = 0;
-
- cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
- QLINK_CMD_PHY_PARAMS_GET,
- sizeof(struct qlink_cmd));
- if (!cmd_skb)
- return -ENOMEM;
-
- qtnf_bus_lock(mac->bus);
- ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
- sizeof(*resp), &response_size);
- if (ret)
- goto out;
-
- resp = (struct qlink_resp_phy_params *)resp_skb->data;
- ret = qtnf_cmd_resp_proc_phy_params(mac, resp->info, response_size);
-
-out:
- qtnf_bus_unlock(mac->bus);
- consume_skb(resp_skb);
-
- return ret;
-}
-
int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed)
{
struct wiphy *wiphy = priv_to_wiphy(mac);
@@ -1843,16 +1664,16 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed)
qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_RTS_THRESH,
wiphy->rts_threshold);
if (changed & WIPHY_PARAM_COVERAGE_CLASS)
- qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_COVERAGE_CLASS,
- wiphy->coverage_class);
+ qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_COVERAGE_CLASS,
+ wiphy->coverage_class);
if (changed & WIPHY_PARAM_RETRY_LONG)
- qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_LRETRY_LIMIT,
- wiphy->retry_long);
+ qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_LRETRY_LIMIT,
+ wiphy->retry_long);
if (changed & WIPHY_PARAM_RETRY_SHORT)
- qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_SRETRY_LIMIT,
- wiphy->retry_short);
+ qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_SRETRY_LIMIT,
+ wiphy->retry_short);
ret = qtnf_cmd_send(mac->bus, cmd_skb);
if (ret)
@@ -1866,23 +1687,35 @@ out:
int qtnf_cmd_send_init_fw(struct qtnf_bus *bus)
{
+ struct sk_buff *resp_skb = NULL;
+ struct qlink_resp_init_fw *resp;
+ struct qlink_cmd_init_fw *cmd;
struct sk_buff *cmd_skb;
- int ret = 0;
+ size_t info_len = 0;
+ int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
QLINK_CMD_FW_INIT,
- sizeof(struct qlink_cmd));
+ sizeof(*cmd));
if (!cmd_skb)
return -ENOMEM;
+ cmd = (struct qlink_cmd_init_fw *)cmd_skb->data;
+ cmd->qlink_proto_ver = cpu_to_le32(QLINK_PROTO_VER);
+
qtnf_bus_lock(bus);
- ret = qtnf_cmd_send(bus, cmd_skb);
+ ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb,
+ sizeof(*resp), &info_len);
+ qtnf_bus_unlock(bus);
+
if (ret)
goto out;
-out:
- qtnf_bus_unlock(bus);
+ resp = (struct qlink_resp_init_fw *)resp_skb->data;
+ bus->hw_info.ql_proto_ver = le32_to_cpu(resp->qlink_proto_ver);
+out:
+ consume_skb(resp_skb);
return ret;
}
@@ -2178,108 +2011,90 @@ static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb,
memcpy(randmac->mac_addr_mask, mac_addr_mask, ETH_ALEN);
}
-static void qtnf_cmd_scan_set_dwell(struct qtnf_wmac *mac,
- struct sk_buff *cmd_skb)
+int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
{
struct cfg80211_scan_request *scan_req = mac->scan_req;
- u16 dwell_active = QTNF_SCAN_DWELL_ACTIVE_DEFAULT;
u16 dwell_passive = QTNF_SCAN_DWELL_PASSIVE_DEFAULT;
- u16 duration = QTNF_SCAN_SAMPLE_DURATION_DEFAULT;
-
- if (scan_req->duration) {
- dwell_active = scan_req->duration;
- dwell_passive = scan_req->duration;
- }
-
- pr_debug("MAC%u: %s scan dwell active=%u, passive=%u, duration=%u\n",
- mac->macid,
- scan_req->duration_mandatory ? "mandatory" : "max",
- dwell_active, dwell_passive, duration);
-
- qtnf_cmd_skb_put_tlv_u16(cmd_skb,
- QTN_TLV_ID_SCAN_DWELL_ACTIVE,
- dwell_active);
- qtnf_cmd_skb_put_tlv_u16(cmd_skb,
- QTN_TLV_ID_SCAN_DWELL_PASSIVE,
- dwell_passive);
- qtnf_cmd_skb_put_tlv_u16(cmd_skb,
- QTN_TLV_ID_SCAN_SAMPLE_DURATION,
- duration);
-}
-
-int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
-{
- struct sk_buff *cmd_skb;
+ u16 dwell_active = QTNF_SCAN_DWELL_ACTIVE_DEFAULT;
+ struct wireless_dev *wdev = scan_req->wdev;
struct ieee80211_channel *sc;
- struct cfg80211_scan_request *scan_req = mac->scan_req;
- int n_channels;
- int count = 0;
+ struct qlink_cmd_scan *cmd;
+ struct sk_buff *cmd_skb;
+ int n_channels = 0;
+ u64 flags = 0;
+ int count;
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
QLINK_CMD_SCAN,
- sizeof(struct qlink_cmd));
+ sizeof(*cmd));
if (!cmd_skb)
return -ENOMEM;
- qtnf_bus_lock(mac->bus);
+ cmd = (struct qlink_cmd_scan *)cmd_skb->data;
- if (scan_req->n_ssids != 0) {
- while (count < scan_req->n_ssids) {
- qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_SSID,
- scan_req->ssids[count].ssid,
- scan_req->ssids[count].ssid_len);
- count++;
- }
+ if (scan_req->duration) {
+ dwell_active = scan_req->duration;
+ dwell_passive = scan_req->duration;
+ } else if (wdev->iftype == NL80211_IFTYPE_STATION &&
+ wdev->current_bss) {
+ /* let device select dwell based on traffic conditions */
+ dwell_active = QTNF_SCAN_TIME_AUTO;
+ dwell_passive = QTNF_SCAN_TIME_AUTO;
+ }
+
+ cmd->n_ssids = cpu_to_le16(scan_req->n_ssids);
+ for (count = 0; count < scan_req->n_ssids; ++count) {
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_SSID,
+ scan_req->ssids[count].ssid,
+ scan_req->ssids[count].ssid_len);
}
if (scan_req->ie_len != 0)
qtnf_cmd_tlv_ie_set_add(cmd_skb, QLINK_IE_SET_PROBE_REQ,
scan_req->ie, scan_req->ie_len);
- if (scan_req->n_channels) {
- n_channels = scan_req->n_channels;
- count = 0;
-
- while (n_channels != 0) {
- sc = scan_req->channels[count];
- if (sc->flags & IEEE80211_CHAN_DISABLED) {
- n_channels--;
- continue;
- }
+ for (count = 0; count < scan_req->n_channels; ++count) {
+ sc = scan_req->channels[count];
+ if (sc->flags & IEEE80211_CHAN_DISABLED)
+ continue;
- pr_debug("MAC%u: scan chan=%d, freq=%d, flags=%#x\n",
- mac->macid, sc->hw_value, sc->center_freq,
- sc->flags);
+ pr_debug("[MAC%u] scan chan=%d, freq=%d, flags=%#x\n",
+ mac->macid, sc->hw_value, sc->center_freq,
+ sc->flags);
- qtnf_cmd_channel_tlv_add(cmd_skb, sc);
- n_channels--;
- count++;
- }
+ qtnf_cmd_channel_tlv_add(cmd_skb, sc);
+ ++n_channels;
}
- qtnf_cmd_scan_set_dwell(mac, cmd_skb);
+ if (scan_req->flags & NL80211_SCAN_FLAG_FLUSH)
+ flags |= QLINK_SCAN_FLAG_FLUSH;
+
+ if (scan_req->duration_mandatory)
+ flags |= QLINK_SCAN_FLAG_DURATION_MANDATORY;
+
+ cmd->n_channels = cpu_to_le16(n_channels);
+ cmd->active_dwell = cpu_to_le16(dwell_active);
+ cmd->passive_dwell = cpu_to_le16(dwell_passive);
+ cmd->sample_duration = cpu_to_le16(QTNF_SCAN_SAMPLE_DURATION_DEFAULT);
+ cmd->flags = cpu_to_le64(flags);
+
+ pr_debug("[MAC%u] %s scan dwell active=%u passive=%u duration=%u\n",
+ mac->macid,
+ scan_req->duration_mandatory ? "mandatory" : "max",
+ dwell_active, dwell_passive,
+ QTNF_SCAN_SAMPLE_DURATION_DEFAULT);
if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- pr_debug("MAC%u: scan with random addr=%pM, mask=%pM\n",
+ pr_debug("[MAC%u] scan with random addr=%pM, mask=%pM\n",
mac->macid,
scan_req->mac_addr, scan_req->mac_addr_mask);
-
qtnf_cmd_randmac_tlv_add(cmd_skb, scan_req->mac_addr,
scan_req->mac_addr_mask);
}
- if (scan_req->flags & NL80211_SCAN_FLAG_FLUSH) {
- pr_debug("MAC%u: flush cache before scan\n", mac->macid);
-
- qtnf_cmd_skb_put_tlv_tag(cmd_skb, QTN_TLV_ID_SCAN_FLUSH);
- }
-
+ qtnf_bus_lock(mac->bus);
ret = qtnf_cmd_send(mac->bus, cmd_skb);
- if (ret)
- goto out;
-
-out:
qtnf_bus_unlock(mac->bus);
return ret;
@@ -2396,7 +2211,7 @@ int qtnf_cmd_send_external_auth(struct qtnf_vif *vif,
cmd = (struct qlink_cmd_external_auth *)cmd_skb->data;
- ether_addr_copy(cmd->bssid, auth->bssid);
+ ether_addr_copy(cmd->peer, auth->bssid);
cmd->status = cpu_to_le16(auth->status);
qtnf_bus_lock(vif->mac->bus);
@@ -2552,8 +2367,91 @@ int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
return ret;
}
-int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
- struct qtnf_chan_stats *stats)
+static int
+qtnf_cmd_resp_proc_chan_stat_info(struct survey_info *survey,
+ const u8 *payload, size_t payload_len)
+{
+ const struct qlink_chan_stats *stats = NULL;
+ const struct qlink_tlv_hdr *tlv;
+ u16 tlv_value_len;
+ u16 tlv_type;
+ const u8 *map = NULL;
+ unsigned int map_len = 0;
+ unsigned int stats_len = 0;
+
+ qlink_for_each_tlv(tlv, payload, payload_len) {
+ tlv_type = le16_to_cpu(tlv->type);
+ tlv_value_len = le16_to_cpu(tlv->len);
+
+ switch (tlv_type) {
+ case QTN_TLV_ID_BITMAP:
+ map = tlv->val;
+ map_len = tlv_value_len;
+ break;
+ case QTN_TLV_ID_CHANNEL_STATS:
+ stats = (struct qlink_chan_stats *)tlv->val;
+ stats_len = tlv_value_len;
+ break;
+ default:
+ pr_info("Unknown TLV type: %#x\n", tlv_type);
+ break;
+ }
+ }
+
+ if (!qlink_tlv_parsing_ok(tlv, payload, payload_len)) {
+ pr_err("Malformed TLV buffer\n");
+ return -EINVAL;
+ }
+
+ if (!map || !stats)
+ return 0;
+
+#define qtnf_chan_stat_avail(stat_name, bitn) \
+ (qtnf_utils_is_bit_set(map, bitn, map_len) && \
+ (offsetofend(struct qlink_chan_stats, stat_name) <= stats_len))
+
+ if (qtnf_chan_stat_avail(time_on, QLINK_CHAN_STAT_TIME_ON)) {
+ survey->filled |= SURVEY_INFO_TIME;
+ survey->time = le64_to_cpu(stats->time_on);
+ }
+
+ if (qtnf_chan_stat_avail(time_tx, QLINK_CHAN_STAT_TIME_TX)) {
+ survey->filled |= SURVEY_INFO_TIME_TX;
+ survey->time_tx = le64_to_cpu(stats->time_tx);
+ }
+
+ if (qtnf_chan_stat_avail(time_rx, QLINK_CHAN_STAT_TIME_RX)) {
+ survey->filled |= SURVEY_INFO_TIME_RX;
+ survey->time_rx = le64_to_cpu(stats->time_rx);
+ }
+
+ if (qtnf_chan_stat_avail(cca_busy, QLINK_CHAN_STAT_CCA_BUSY)) {
+ survey->filled |= SURVEY_INFO_TIME_BUSY;
+ survey->time_busy = le64_to_cpu(stats->cca_busy);
+ }
+
+ if (qtnf_chan_stat_avail(cca_busy_ext, QLINK_CHAN_STAT_CCA_BUSY_EXT)) {
+ survey->filled |= SURVEY_INFO_TIME_EXT_BUSY;
+ survey->time_ext_busy = le64_to_cpu(stats->cca_busy_ext);
+ }
+
+ if (qtnf_chan_stat_avail(time_scan, QLINK_CHAN_STAT_TIME_SCAN)) {
+ survey->filled |= SURVEY_INFO_TIME_SCAN;
+ survey->time_scan = le64_to_cpu(stats->time_scan);
+ }
+
+ if (qtnf_chan_stat_avail(chan_noise, QLINK_CHAN_STAT_CHAN_NOISE)) {
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+ survey->noise = stats->chan_noise;
+ }
+
+#undef qtnf_chan_stat_avail
+
+ return 0;
+}
+
+int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u32 chan_freq,
+ struct survey_info *survey)
{
struct sk_buff *cmd_skb, *resp_skb = NULL;
struct qlink_cmd_get_chan_stats *cmd;
@@ -2567,22 +2465,30 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
if (!cmd_skb)
return -ENOMEM;
- qtnf_bus_lock(mac->bus);
-
cmd = (struct qlink_cmd_get_chan_stats *)cmd_skb->data;
- cmd->channel = cpu_to_le16(channel);
+ cmd->channel_freq = cpu_to_le32(chan_freq);
+ qtnf_bus_lock(mac->bus);
ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb,
sizeof(*resp), &var_data_len);
+ qtnf_bus_unlock(mac->bus);
+
if (ret)
goto out;
resp = (struct qlink_resp_get_chan_stats *)resp_skb->data;
- ret = qtnf_cmd_resp_proc_chan_stat_info(stats, resp->info,
+
+ if (le32_to_cpu(resp->chan_freq) != chan_freq) {
+ pr_err("[MAC%u] channel stats freq %u != requested %u\n",
+ mac->macid, le32_to_cpu(resp->chan_freq), chan_freq);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = qtnf_cmd_resp_proc_chan_stat_info(survey, resp->info,
var_data_len);
out:
- qtnf_bus_unlock(mac->bus);
consume_skb(resp_skb);
return ret;
@@ -2595,6 +2501,7 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
struct qlink_cmd_chan_switch *cmd;
struct sk_buff *cmd_skb;
int ret;
+ u64 flags = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, vif->vifid,
QLINK_CMD_CHAN_SWITCH,
@@ -2602,19 +2509,19 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
if (!cmd_skb)
return -ENOMEM;
- qtnf_bus_lock(mac->bus);
+ if (params->radar_required)
+ flags |= QLINK_CHAN_SW_RADAR_REQUIRED;
+
+ if (params->block_tx)
+ flags |= QLINK_CHAN_SW_BLOCK_TX;
cmd = (struct qlink_cmd_chan_switch *)cmd_skb->data;
- cmd->channel = cpu_to_le16(params->chandef.chan->hw_value);
- cmd->radar_required = params->radar_required;
- cmd->block_tx = params->block_tx;
+ qlink_chandef_cfg2q(&params->chandef, &cmd->channel);
+ cmd->flags = cpu_to_le64(flags);
cmd->beacon_count = params->count;
+ qtnf_bus_lock(mac->bus);
ret = qtnf_cmd_send(mac->bus, cmd_skb);
- if (ret)
- goto out;
-
-out:
qtnf_bus_unlock(mac->bus);
return ret;
@@ -2695,7 +2602,7 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
if (!cmd_skb)
return -ENOMEM;
- tlv = skb_put(cmd_skb, sizeof(*tlv) + acl_size);
+ tlv = skb_put(cmd_skb, sizeof(*tlv) + round_up(acl_size, QLINK_ALIGN));
tlv->type = cpu_to_le16(QTN_TLV_ID_ACL_DATA);
tlv->len = cpu_to_le16(acl_size);
qlink_acl_data_cfg2q(params, (struct qlink_acl_data *)tlv->val);
@@ -2884,3 +2791,39 @@ int qtnf_cmd_netdev_changeupper(const struct qtnf_vif *vif, int br_domain)
return ret;
}
+
+int qtnf_cmd_send_update_owe(struct qtnf_vif *vif,
+ struct cfg80211_update_owe_info *owe)
+{
+ struct qlink_cmd_update_owe *cmd;
+ struct sk_buff *cmd_skb;
+ int ret;
+
+ if (sizeof(*cmd) + owe->ie_len > QTNF_MAX_CMD_BUF_SIZE) {
+ pr_warn("VIF%u.%u: OWE update IEs too big: %zu\n",
+ vif->mac->macid, vif->vifid, owe->ie_len);
+ return -E2BIG;
+ }
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_UPDATE_OWE,
+ sizeof(*cmd));
+ if (!cmd_skb)
+ return -ENOMEM;
+
+ cmd = (struct qlink_cmd_update_owe *)cmd_skb->data;
+ ether_addr_copy(cmd->peer, owe->peer);
+ cmd->status = cpu_to_le16(owe->status);
+ if (owe->ie_len && owe->ie)
+ qtnf_cmd_skb_put_buffer(cmd_skb, owe->ie, owe->ie_len);
+
+ qtnf_bus_lock(vif->mac->bus);
+ ret = qtnf_cmd_send(vif->mac->bus, cmd_skb);
+ if (ret)
+ goto out;
+
+out:
+ qtnf_bus_unlock(vif->mac->bus);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index ab273257b078..72ad6ae5c750 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -59,8 +59,8 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif,
bool up);
int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
bool slave_radar, bool dfs_offload);
-int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
- struct qtnf_chan_stats *stats);
+int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u32 chan_freq,
+ struct survey_info *survey);
int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
struct cfg80211_csa_settings *params);
int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef);
@@ -76,5 +76,7 @@ int qtnf_cmd_set_tx_power(const struct qtnf_vif *vif,
int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
const struct cfg80211_wowlan *wowl);
int qtnf_cmd_netdev_changeupper(const struct qtnf_vif *vif, int br_domain);
+int qtnf_cmd_send_update_owe(struct qtnf_vif *vif,
+ struct cfg80211_update_owe_info *owe);
#endif /* QLINK_COMMANDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 4320180f8c07..eea777f8acea 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/if_ether.h>
+#include <linux/nospec.h>
#include "core.h"
#include "bus.h"
@@ -41,11 +42,12 @@ struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid)
{
struct qtnf_wmac *mac = NULL;
- if (unlikely(macid >= QTNF_MAX_MAC)) {
+ if (macid >= QTNF_MAX_MAC) {
pr_err("invalid MAC index %u\n", macid);
return NULL;
}
+ macid = array_index_nospec(macid, QTNF_MAX_MAC);
mac = bus->mac[macid];
if (unlikely(!mac)) {
@@ -429,18 +431,28 @@ static void qtnf_vif_send_data_high_pri(struct work_struct *work)
static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
unsigned int macid)
{
+ struct platform_device *pdev = NULL;
+ struct qtnf_wmac *mac;
struct qtnf_vif *vif;
struct wiphy *wiphy;
- struct qtnf_wmac *mac;
unsigned int i;
- wiphy = qtnf_wiphy_allocate(bus);
+ if (bus->hw_info.num_mac > 1) {
+ pdev = platform_device_register_data(bus->dev,
+ dev_name(bus->dev),
+ macid, NULL, 0);
+ if (IS_ERR(pdev))
+ return ERR_PTR(-EINVAL);
+ }
+
+ wiphy = qtnf_wiphy_allocate(bus, pdev);
if (!wiphy)
return ERR_PTR(-ENOMEM);
mac = wiphy_priv(wiphy);
mac->macid = macid;
+ mac->pdev = pdev;
mac->bus = bus;
mutex_init(&mac->mac_lock);
INIT_DELAYED_WORK(&mac->scan_timeout, qtnf_mac_scan_timeout);
@@ -491,19 +503,18 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
dev_net_set(dev, wiphy_net(wiphy));
dev->ieee80211_ptr = &vif->wdev;
ether_addr_copy(dev->dev_addr, vif->mac_addr);
- SET_NETDEV_DEV(dev, wiphy_dev(wiphy));
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
dev->watchdog_timeo = QTNF_DEF_WDOG_TIMEOUT;
dev->tx_queue_len = 100;
dev->ethtool_ops = &qtnf_ethtool_ops;
- if (mac->bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE)
+ if (qtnf_hwcap_is_set(&mac->bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE))
dev->needed_tailroom = sizeof(struct qtnf_frame_meta_info);
qdev_vif = netdev_priv(dev);
*((void **)qdev_vif) = vif;
- SET_NETDEV_DEV(dev, mac->bus->dev);
+ SET_NETDEV_DEV(dev, wiphy_dev(wiphy));
ret = register_netdevice(dev);
if (ret) {
@@ -559,6 +570,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
wiphy->bands[band] = NULL;
}
+ platform_device_unregister(mac->pdev);
qtnf_mac_iface_comb_free(mac);
qtnf_mac_ext_caps_free(mac);
kfree(mac->macinfo.wowlan);
@@ -585,16 +597,6 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
return PTR_ERR(mac);
}
- ret = qtnf_cmd_get_mac_info(mac);
- if (ret) {
- pr_err("MAC%u: failed to get info\n", macid);
- goto error;
- }
-
- /* Use MAC address of the first active radio as a unique device ID */
- if (is_zero_ether_addr(mac->bus->hw_id))
- ether_addr_copy(mac->bus->hw_id, mac->macaddr);
-
vif = qtnf_mac_get_base_vif(mac);
if (!vif) {
pr_err("MAC%u: primary VIF is not ready\n", macid);
@@ -609,12 +611,16 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
goto error;
}
- ret = qtnf_cmd_send_get_phy_params(mac);
+ ret = qtnf_cmd_get_mac_info(mac);
if (ret) {
- pr_err("MAC%u: failed to get PHY settings\n", macid);
+ pr_err("MAC%u: failed to get MAC info\n", macid);
goto error_del_vif;
}
+ /* Use MAC address of the first active radio as a unique device ID */
+ if (is_zero_ether_addr(mac->bus->hw_id))
+ ether_addr_copy(mac->bus->hw_id, mac->macaddr);
+
ret = qtnf_mac_init_bands(mac);
if (ret) {
pr_err("MAC%u: failed to init bands\n", macid);
@@ -639,7 +645,7 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
goto error_del_vif;
}
- if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) {
+ if (qtnf_hwcap_is_set(&bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE)) {
ret = qtnf_cmd_netdev_changeupper(vif, vif->netdev->ifindex);
if (ret)
goto error;
@@ -705,7 +711,8 @@ static int qtnf_core_netdevice_event(struct notifier_block *nb,
info->linking ? "add" : "del");
if (IS_ENABLED(CONFIG_NET_SWITCHDEV) &&
- (bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE)) {
+ qtnf_hwcap_is_set(&bus->hw_info,
+ QLINK_HW_CAPAB_HW_BRIDGE)) {
if (info->linking)
br_domain = brdev->ifindex;
else
@@ -756,6 +763,15 @@ int qtnf_core_attach(struct qtnf_bus *bus)
goto error;
}
+ if (QLINK_VER_MAJOR(bus->hw_info.ql_proto_ver) !=
+ QLINK_PROTO_VER_MAJOR) {
+ pr_err("qlink driver vs FW version mismatch: %u vs %u\n",
+ QLINK_PROTO_VER_MAJOR,
+ QLINK_VER_MAJOR(bus->hw_info.ql_proto_ver));
+ ret = -EPROTONOSUPPORT;
+ goto error;
+ }
+
bus->fw_state = QTNF_FW_STATE_ACTIVE;
ret = qtnf_cmd_get_hw_info(bus);
if (ret) {
@@ -763,14 +779,7 @@ int qtnf_core_attach(struct qtnf_bus *bus)
goto error;
}
- if (bus->hw_info.ql_proto_ver != QLINK_PROTO_VER) {
- pr_err("qlink version mismatch %u != %u\n",
- QLINK_PROTO_VER, bus->hw_info.ql_proto_ver);
- ret = -EPROTONOSUPPORT;
- goto error;
- }
-
- if ((bus->hw_info.hw_capab & QLINK_HW_CAPAB_HW_BRIDGE) &&
+ if (qtnf_hwcap_is_set(&bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE) &&
bus->bus_ops->data_tx_use_meta_set)
bus->bus_ops->data_tx_use_meta_set(bus, true);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index d715e1cd0006..269ce12cf8bf 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -20,9 +20,11 @@
#include <linux/ctype.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
+#include <linux/platform_device.h>
#include "qlink.h"
#include "trans.h"
+#include "qlink_util.h"
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
@@ -83,7 +85,8 @@ struct qtnf_mac_info {
u8 sretry_limit;
u8 coverage_class;
u8 radar_detect_widths;
- u32 max_acl_mac_addrs;
+ u8 max_scan_ssids;
+ u16 max_acl_mac_addrs;
struct ieee80211_ht_cap ht_cap_mod_mask;
struct ieee80211_vht_cap vht_cap_mod_mask;
struct ieee80211_iface_combination *if_comb;
@@ -94,15 +97,6 @@ struct qtnf_mac_info {
struct wiphy_wowlan_support *wowlan;
};
-struct qtnf_chan_stats {
- u32 chan_num;
- u32 cca_tx;
- u32 cca_rx;
- u32 cca_busy;
- u32 cca_try;
- s8 chan_noise;
-};
-
struct qtnf_wmac {
u8 macid;
u8 wiphy_registered;
@@ -114,19 +108,19 @@ struct qtnf_wmac {
struct mutex mac_lock; /* lock during wmac speicific ops */
struct delayed_work scan_timeout;
struct ieee80211_regdomain *rd;
+ struct platform_device *pdev;
};
struct qtnf_hw_info {
- u16 ql_proto_ver;
+ u32 ql_proto_ver;
u8 num_mac;
u8 mac_bitmap;
u32 fw_ver;
- u32 hw_capab;
u8 total_tx_chain;
u8 total_rx_chain;
char fw_version[ETHTOOL_FWVERS_LEN];
u32 hw_version;
- u8 max_scan_ssids;
+ u8 hw_capab[QLINK_HW_CAPAB_NUM / BITS_PER_BYTE + 1];
};
struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
@@ -135,12 +129,12 @@ void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac);
void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac);
bool qtnf_slave_radar_get(void);
bool qtnf_dfs_offload_get(void);
-struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus);
+struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus,
+ struct platform_device *pdev);
int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
const char *name, unsigned char name_assign_type);
void qtnf_main_work_queue(struct work_struct *work);
int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed);
-int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac);
struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid);
struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb);
@@ -160,4 +154,11 @@ static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev)
return *((void **)netdev_priv(dev));
}
+static inline bool qtnf_hwcap_is_set(const struct qtnf_hw_info *info,
+ unsigned int bit)
+{
+ return qtnf_utils_is_bit_set(info->hw_capab, bit,
+ sizeof(info->hw_capab));
+}
+
#endif /* _QTN_FMAC_CORE_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 51af93bdf06e..c775c177933b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include "cfg80211.h"
#include "core.h"
@@ -25,7 +26,6 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
size_t payload_len;
u16 tlv_type;
u16 tlv_value_len;
- size_t tlv_full_len;
const struct qlink_tlv_hdr *tlv;
int ret = 0;
@@ -58,23 +58,17 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
sinfo->generation = vif->generation;
payload_len = len - sizeof(*sta_assoc);
- tlv = (const struct qlink_tlv_hdr *)sta_assoc->ies;
- while (payload_len >= sizeof(*tlv)) {
+ qlink_for_each_tlv(tlv, sta_assoc->ies, payload_len) {
tlv_type = le16_to_cpu(tlv->type);
tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
-
- if (tlv_full_len > payload_len) {
- ret = -EINVAL;
- goto out;
- }
if (tlv_type == QTN_TLV_ID_IE_SET) {
const struct qlink_tlv_ie_set *ie_set;
unsigned int ie_len;
- if (payload_len < sizeof(*ie_set)) {
+ if (tlv_value_len <
+ (sizeof(*ie_set) - sizeof(ie_set->hdr))) {
ret = -EINVAL;
goto out;
}
@@ -88,12 +82,10 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
sinfo->assoc_req_ies_len = ie_len;
}
}
-
- payload_len -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (payload_len) {
+ if (!qlink_tlv_parsing_ok(tlv, sta_assoc->ies, payload_len)) {
+ pr_err("Malformed TLV buffer\n");
ret = -EINVAL;
goto out;
}
@@ -153,7 +145,6 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
size_t payload_len;
u16 tlv_type;
u16 tlv_value_len;
- size_t tlv_full_len;
const struct qlink_tlv_hdr *tlv;
const u8 *rsp_ies = NULL;
size_t rsp_ies_len = 0;
@@ -235,24 +226,17 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
}
payload_len = len - sizeof(*join_info);
- tlv = (struct qlink_tlv_hdr *)join_info->ies;
- while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
+ qlink_for_each_tlv(tlv, join_info->ies, payload_len) {
tlv_type = le16_to_cpu(tlv->type);
tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
-
- if (payload_len < tlv_full_len) {
- pr_warn("invalid %u TLV\n", tlv_type);
- status = WLAN_STATUS_UNSPECIFIED_FAILURE;
- goto done;
- }
if (tlv_type == QTN_TLV_ID_IE_SET) {
const struct qlink_tlv_ie_set *ie_set;
unsigned int ie_len;
- if (payload_len < sizeof(*ie_set)) {
+ if (tlv_value_len <
+ (sizeof(*ie_set) - sizeof(ie_set->hdr))) {
pr_warn("invalid IE_SET TLV\n");
status = WLAN_STATUS_UNSPECIFIED_FAILURE;
goto done;
@@ -275,15 +259,10 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif,
break;
}
}
-
- payload_len -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (payload_len)
- pr_warn("VIF%u.%u: unexpected remaining payload: %zu\n",
- vif->mac->macid, vif->vifid, payload_len);
-
+ if (!qlink_tlv_parsing_ok(tlv, join_info->ies, payload_len))
+ pr_warn("Malformed TLV buffer\n");
done:
cfg80211_connect_result(vif->netdev, join_info->bssid, NULL, 0, rsp_ies,
rsp_ies_len, status, GFP_KERNEL);
@@ -368,7 +347,6 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif,
size_t payload_len;
u16 tlv_type;
u16 tlv_value_len;
- size_t tlv_full_len;
const struct qlink_tlv_hdr *tlv;
const u8 *ies = NULL;
size_t ies_len = 0;
@@ -387,21 +365,17 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif,
}
payload_len = len - sizeof(*sr);
- tlv = (struct qlink_tlv_hdr *)sr->payload;
- while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
+ qlink_for_each_tlv(tlv, sr->payload, payload_len) {
tlv_type = le16_to_cpu(tlv->type);
tlv_value_len = le16_to_cpu(tlv->len);
- tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
-
- if (tlv_full_len > payload_len)
- return -EINVAL;
if (tlv_type == QTN_TLV_ID_IE_SET) {
const struct qlink_tlv_ie_set *ie_set;
unsigned int ie_len;
- if (payload_len < sizeof(*ie_set))
+ if (tlv_value_len <
+ (sizeof(*ie_set) - sizeof(ie_set->hdr)))
return -EINVAL;
ie_set = (const struct qlink_tlv_ie_set *)tlv;
@@ -424,12 +398,9 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif,
ies_len = ie_len;
}
}
-
- payload_len -= tlv_full_len;
- tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (payload_len)
+ if (!qlink_tlv_parsing_ok(tlv, sr->payload, payload_len))
return -EINVAL;
bss = cfg80211_inform_bss(wiphy, channel, frame_type,
@@ -607,9 +578,9 @@ qtnf_event_handle_external_auth(struct qtnf_vif *vif,
ether_addr_copy(auth.bssid, ev->bssid);
auth.action = ev->action;
- pr_info("%s: external auth bss=%pM action=%u akm=%u\n",
- vif->netdev->name, auth.bssid, auth.action,
- auth.key_mgmt_suite);
+ pr_debug("%s: external SAE processing: bss=%pM action=%u akm=%u\n",
+ vif->netdev->name, auth.bssid, auth.action,
+ auth.key_mgmt_suite);
ret = cfg80211_external_auth_request(vif->netdev, &auth, GFP_KERNEL);
if (ret)
@@ -654,6 +625,50 @@ qtnf_event_handle_mic_failure(struct qtnf_vif *vif,
return 0;
}
+static int
+qtnf_event_handle_update_owe(struct qtnf_vif *vif,
+ const struct qlink_event_update_owe *owe_ev,
+ u16 len)
+{
+ struct wiphy *wiphy = priv_to_wiphy(vif->mac);
+ struct cfg80211_update_owe_info owe_info = {};
+ const u16 ie_len = len - sizeof(*owe_ev);
+ u8 *ie;
+
+ if (len < sizeof(*owe_ev)) {
+ pr_err("VIF%u.%u: payload is too short (%u < %zu)\n",
+ vif->mac->macid, vif->vifid, len,
+ sizeof(struct qlink_event_update_owe));
+ return -EINVAL;
+ }
+
+ if (!wiphy->registered || !vif->netdev)
+ return 0;
+
+ if (vif->wdev.iftype != NL80211_IFTYPE_AP) {
+ pr_err("VIF%u.%u: UPDATE_OWE event when not in AP mode\n",
+ vif->mac->macid, vif->vifid);
+ return -EPROTO;
+ }
+
+ ie = kzalloc(ie_len, GFP_KERNEL);
+ if (!ie)
+ return -ENOMEM;
+
+ memcpy(owe_info.peer, owe_ev->peer, ETH_ALEN);
+ memcpy(ie, owe_ev->ies, ie_len);
+ owe_info.ie_len = ie_len;
+ owe_info.ie = ie;
+
+ pr_info("%s: external OWE processing: peer=%pM\n",
+ vif->netdev->name, owe_ev->peer);
+
+ cfg80211_update_owe_info_event(vif->netdev, &owe_info, GFP_KERNEL);
+ kfree(ie);
+
+ return 0;
+}
+
static int qtnf_event_parse(struct qtnf_wmac *mac,
const struct sk_buff *event_skb)
{
@@ -662,18 +677,20 @@ static int qtnf_event_parse(struct qtnf_wmac *mac,
int ret = -1;
u16 event_id;
u16 event_len;
+ u8 vifid;
event = (const struct qlink_event *)event_skb->data;
event_id = le16_to_cpu(event->event_id);
event_len = le16_to_cpu(event->mhdr.len);
- if (likely(event->vifid < QTNF_MAX_INTF)) {
- vif = &mac->iflist[event->vifid];
- } else {
+ if (event->vifid >= QTNF_MAX_INTF) {
pr_err("invalid vif(%u)\n", event->vifid);
return -EINVAL;
}
+ vifid = array_index_nospec(event->vifid, QTNF_MAX_INTF);
+ vif = &mac->iflist[vifid];
+
switch (event_id) {
case QLINK_EVENT_STA_ASSOCIATED:
ret = qtnf_event_handle_sta_assoc(mac, vif, (const void *)event,
@@ -720,6 +737,10 @@ static int qtnf_event_parse(struct qtnf_wmac *mac,
ret = qtnf_event_handle_mic_failure(vif, (const void *)event,
event_len);
break;
+ case QLINK_EVENT_UPDATE_OWE:
+ ret = qtnf_event_handle_update_owe(vif, (const void *)event,
+ event_len);
+ break;
default:
pr_warn("unknown event type: %x\n", event_id);
break;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 8e0d8018208a..dbb241106d8a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -593,7 +593,7 @@ static int qtnf_pcie_skb_send(struct qtnf_bus *bus, struct sk_buff *skb)
priv->tx_bd_w_index = i;
tx_done:
- if (ret && skb) {
+ if (ret) {
pr_err_ratelimited("drop skb\n");
if (skb->dev)
skb->dev->stats.tx_dropped++;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index b2edb03819d1..4d22a54c034f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -6,7 +6,20 @@
#include <linux/ieee80211.h>
-#define QLINK_PROTO_VER 16
+#define QLINK_PROTO_VER_MAJOR_M 0xFFFF
+#define QLINK_PROTO_VER_MAJOR_S 16
+#define QLINK_PROTO_VER_MINOR_M 0xFFFF
+#define QLINK_VER_MINOR(_ver) ((_ver) & QLINK_PROTO_VER_MINOR_M)
+#define QLINK_VER_MAJOR(_ver) \
+ (((_ver) >> QLINK_PROTO_VER_MAJOR_S) & QLINK_PROTO_VER_MAJOR_M)
+#define QLINK_VER(_maj, _min) (((_maj) << QLINK_PROTO_VER_MAJOR_S) | (_min))
+
+#define QLINK_PROTO_VER_MAJOR 18
+#define QLINK_PROTO_VER_MINOR 1
+#define QLINK_PROTO_VER \
+ QLINK_VER(QLINK_PROTO_VER_MAJOR, QLINK_PROTO_VER_MINOR)
+
+#define QLINK_ALIGN 4
#define QLINK_MACID_RSVD 0xFF
#define QLINK_VIFID_RSVD 0xFF
@@ -62,15 +75,24 @@ struct qlink_msg_header {
* @QLINK_HW_CAPAB_HW_BRIDGE: device has hardware switch capabilities.
*/
enum qlink_hw_capab {
- QLINK_HW_CAPAB_REG_UPDATE = BIT(0),
- QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1),
- QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2),
- QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3),
- QLINK_HW_CAPAB_PWR_MGMT = BIT(4),
- QLINK_HW_CAPAB_OBSS_SCAN = BIT(5),
- QLINK_HW_CAPAB_SCAN_DWELL = BIT(6),
- QLINK_HW_CAPAB_SAE = BIT(8),
- QLINK_HW_CAPAB_HW_BRIDGE = BIT(9),
+ QLINK_HW_CAPAB_REG_UPDATE = 0,
+ QLINK_HW_CAPAB_STA_INACT_TIMEOUT,
+ QLINK_HW_CAPAB_DFS_OFFLOAD,
+ QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR,
+ QLINK_HW_CAPAB_PWR_MGMT,
+ QLINK_HW_CAPAB_OBSS_SCAN,
+ QLINK_HW_CAPAB_SCAN_DWELL,
+ QLINK_HW_CAPAB_SAE,
+ QLINK_HW_CAPAB_HW_BRIDGE,
+ QLINK_HW_CAPAB_NUM
+};
+
+/**
+ * enum qlink_driver_capab - host driver capabilities.
+ *
+ */
+enum qlink_driver_capab {
+ QLINK_DRV_CAPAB_NUM = 0
};
enum qlink_iface_type {
@@ -164,7 +186,7 @@ struct qlink_chandef {
__le16 center_freq1;
__le16 center_freq2;
u8 width;
- u8 rsvd;
+ u8 rsvd[3];
} __packed;
#define QLINK_MAX_NR_CIPHER_SUITES 5
@@ -269,7 +291,6 @@ enum qlink_cmd_type {
QLINK_CMD_REGISTER_MGMT = 0x0003,
QLINK_CMD_SEND_FRAME = 0x0004,
QLINK_CMD_MGMT_SET_APPIE = 0x0005,
- QLINK_CMD_PHY_PARAMS_GET = 0x0011,
QLINK_CMD_PHY_PARAMS_SET = 0x0012,
QLINK_CMD_GET_HW_INFO = 0x0013,
QLINK_CMD_MAC_INFO = 0x0014,
@@ -301,6 +322,7 @@ enum qlink_cmd_type {
QLINK_CMD_WOWLAN_SET = 0x0063,
QLINK_CMD_EXTERNAL_AUTH = 0x0066,
QLINK_CMD_TXPWR = 0x0067,
+ QLINK_CMD_UPDATE_OWE = 0x0068,
};
/**
@@ -321,9 +343,26 @@ struct qlink_cmd {
struct qlink_msg_header mhdr;
__le16 cmd_id;
__le16 seq_num;
- u8 rsvd[2];
u8 macid;
u8 vifid;
+ u8 rsvd[2];
+} __packed;
+
+/**
+ * struct qlink_cmd_init_fw - data for QLINK_CMD_FW_INIT
+ *
+ * Initialize firmware based on specified host configuration. This is the first
+ * command sent to wifi card and it's fixed part should never be changed, any
+ * additions must be done by appending TLVs.
+ * If wifi card can not operate with a specified parameters it will return
+ * error.
+ *
+ * @qlink_proto_ver: QLINK protocol version used by host driver.
+ */
+struct qlink_cmd_init_fw {
+ struct qlink_cmd chdr;
+ __le32 qlink_proto_ver;
+ u8 var_info[0];
} __packed;
/**
@@ -368,6 +407,7 @@ struct qlink_cmd_mgmt_frame_register {
struct qlink_cmd chdr;
__le16 frame_type;
u8 do_register;
+ u8 rsvd[1];
} __packed;
/**
@@ -405,6 +445,7 @@ struct qlink_cmd_frame_tx {
struct qlink_cmd_get_sta_info {
struct qlink_cmd chdr;
u8 sta_addr[ETH_ALEN];
+ u8 rsvd[2];
} __packed;
/**
@@ -424,6 +465,7 @@ struct qlink_cmd_add_key {
u8 addr[ETH_ALEN];
__le32 cipher;
__le16 vlanid;
+ u8 rsvd[2];
u8 key_data[0];
} __packed;
@@ -453,6 +495,7 @@ struct qlink_cmd_set_def_key {
u8 key_index;
u8 unicast;
u8 multicast;
+ u8 rsvd[1];
} __packed;
/**
@@ -463,6 +506,7 @@ struct qlink_cmd_set_def_key {
struct qlink_cmd_set_def_mgmt_key {
struct qlink_cmd chdr;
u8 key_index;
+ u8 rsvd[3];
} __packed;
/**
@@ -479,6 +523,7 @@ struct qlink_cmd_change_sta {
__le16 if_type;
__le16 vlanid;
u8 sta_addr[ETH_ALEN];
+ u8 rsvd[2];
} __packed;
/**
@@ -489,8 +534,9 @@ struct qlink_cmd_change_sta {
struct qlink_cmd_del_sta {
struct qlink_cmd chdr;
__le16 reason_code;
- u8 subtype;
u8 sta_addr[ETH_ALEN];
+ u8 subtype;
+ u8 rsvd[3];
} __packed;
enum qlink_sta_connect_flags {
@@ -544,7 +590,7 @@ struct qlink_cmd_connect {
*/
struct qlink_cmd_external_auth {
struct qlink_cmd chdr;
- u8 bssid[ETH_ALEN];
+ u8 peer[ETH_ALEN];
__le16 status;
u8 payload[0];
} __packed;
@@ -557,6 +603,7 @@ struct qlink_cmd_external_auth {
struct qlink_cmd_disconnect {
struct qlink_cmd chdr;
__le16 reason;
+ u8 rsvd[2];
} __packed;
/**
@@ -568,6 +615,7 @@ struct qlink_cmd_disconnect {
struct qlink_cmd_updown {
struct qlink_cmd chdr;
u8 if_up;
+ u8 rsvd[3];
} __packed;
/**
@@ -591,16 +639,17 @@ enum qlink_band {
struct qlink_cmd_band_info_get {
struct qlink_cmd chdr;
u8 band;
+ u8 rsvd[3];
} __packed;
/**
* struct qlink_cmd_get_chan_stats - data for QLINK_CMD_CHAN_STATS command
*
- * @channel: channel number according to 802.11 17.3.8.3.2 and Annex J
+ * @channel_freq: channel center frequency
*/
struct qlink_cmd_get_chan_stats {
struct qlink_cmd chdr;
- __le16 channel;
+ __le32 channel_freq;
} __packed;
/**
@@ -653,19 +702,33 @@ struct qlink_cmd_reg_notify {
} __packed;
/**
+ * enum qlink_chan_sw_flags - channel switch control flags
+ *
+ * @QLINK_CHAN_SW_RADAR_REQUIRED: whether radar detection is required on a new
+ * channel.
+ * @QLINK_CHAN_SW_BLOCK_TX: whether transmissions should be blocked while
+ * changing a channel.
+ */
+enum qlink_chan_sw_flags {
+ QLINK_CHAN_SW_RADAR_REQUIRED = BIT(0),
+ QLINK_CHAN_SW_BLOCK_TX = BIT(1),
+};
+
+/**
* struct qlink_cmd_chan_switch - data for QLINK_CMD_CHAN_SWITCH command
*
- * @channel: channel number according to 802.11 17.3.8.3.2 and Annex J
- * @radar_required: whether radar detection is required on the new channel
- * @block_tx: whether transmissions should be blocked while changing
+ * @channel: channel to switch to.
+ * @flags: flags to control channel switch, bitmap of &enum qlink_chan_sw_flags.
* @beacon_count: number of beacons until switch
*/
struct qlink_cmd_chan_switch {
struct qlink_cmd chdr;
- __le16 channel;
- u8 radar_required;
- u8 block_tx;
+ struct qlink_chandef channel;
+ __le64 flags;
+ __le32 n_counter_offsets_beacon;
+ __le32 n_counter_offsets_presp;
u8 beacon_count;
+ u8 rsvd[3];
} __packed;
/**
@@ -769,6 +832,7 @@ struct qlink_cmd_pm_set {
struct qlink_cmd chdr;
__le32 pm_standby_timer;
u8 pm_mode;
+ u8 rsvd[3];
} __packed;
/**
@@ -857,6 +921,60 @@ struct qlink_cmd_ndev_changeupper {
u8 rsvd[1];
} __packed;
+/**
+ * enum qlink_scan_flags - scan request control flags
+ *
+ * Scan flags are used to control QLINK_CMD_SCAN behavior.
+ *
+ * @QLINK_SCAN_FLAG_FLUSH: flush cache before scanning.
+ */
+enum qlink_scan_flags {
+ QLINK_SCAN_FLAG_FLUSH = BIT(0),
+ QLINK_SCAN_FLAG_DURATION_MANDATORY = BIT(1),
+};
+
+/**
+ * struct qlink_cmd_scan - data for QLINK_CMD_SCAN command
+ *
+ * @flags: scan flags, a bitmap of &enum qlink_scan_flags.
+ * @n_ssids: number of WLAN_EID_SSID TLVs expected in variable portion of the
+ * command.
+ * @n_channels: number of QTN_TLV_ID_CHANNEL TLVs expected in variable payload.
+ * @active_dwell: time spent on a single channel for an active scan.
+ * @passive_dwell: time spent on a single channel for a passive scan.
+ * @sample_duration: total duration of sampling a single channel during a scan
+ * including off-channel dwell time and operating channel time.
+ * @bssid: specific BSSID to scan for or a broadcast BSSID.
+ * @scan_width: channel width to use, one of &enum qlink_channel_width.
+ */
+struct qlink_cmd_scan {
+ struct qlink_cmd chdr;
+ __le64 flags;
+ __le16 n_ssids;
+ __le16 n_channels;
+ __le16 active_dwell;
+ __le16 passive_dwell;
+ __le16 sample_duration;
+ u8 bssid[ETH_ALEN];
+ u8 scan_width;
+ u8 rsvd[3];
+ u8 var_info[0];
+} __packed;
+
+/**
+ * struct qlink_cmd_update_owe - data for QLINK_CMD_UPDATE_OWE_INFO command
+ *
+ * @peer: MAC of the peer device for which OWE processing has been completed
+ * @status: OWE external processing status code
+ * @ies: IEs for the peer constructed by the user space
+ */
+struct qlink_cmd_update_owe {
+ struct qlink_cmd chdr;
+ u8 peer[ETH_ALEN];
+ __le16 status;
+ u8 ies[0];
+} __packed;
+
/* QLINK Command Responses messages related definitions
*/
@@ -896,6 +1014,16 @@ struct qlink_resp {
} __packed;
/**
+ * struct qlink_resp_init_fw - response for QLINK_CMD_FW_INIT
+ *
+ * @qlink_proto_ver: QLINK protocol version used by wifi card firmware.
+ */
+struct qlink_resp_init_fw {
+ struct qlink_resp rhdr;
+ __le32 qlink_proto_ver;
+} __packed;
+
+/**
* enum qlink_dfs_regions - regulatory DFS regions
*
* Corresponds to &enum nl80211_dfs_regions.
@@ -919,6 +1047,7 @@ enum qlink_dfs_regions {
* @num_rx_chain: Number of receive chains used by WMAC.
* @vht_cap_mod_mask: mask specifying which VHT capabilities can be altered.
* @ht_cap_mod_mask: mask specifying which HT capabilities can be altered.
+ * @max_scan_ssids: maximum number of SSIDs the device can scan for in any scan.
* @bands_cap: wireless bands WMAC can operate in, bitmap of &enum qlink_band.
* @max_ap_assoc_sta: Maximum number of associations supported by WMAC.
* @radar_detect_widths: bitmask of channels BW for which WMAC can detect radar.
@@ -935,14 +1064,48 @@ struct qlink_resp_get_mac_info {
u8 num_rx_chain;
struct ieee80211_vht_cap vht_cap_mod_mask;
struct ieee80211_ht_cap ht_cap_mod_mask;
+
__le16 max_ap_assoc_sta;
+ __le32 hw_version;
+ __le32 probe_resp_offload;
+ __le32 bss_select_support;
+ __le16 n_addresses;
__le16 radar_detect_widths;
- __le32 max_acl_mac_addrs;
+ __le16 max_remain_on_channel_duration;
+ __le16 max_acl_mac_addrs;
+
+ __le32 frag_threshold;
+ __le32 rts_threshold;
+ u8 retry_short;
+ u8 retry_long;
+ u8 coverage_class;
+
+ u8 max_scan_ssids;
+ u8 max_sched_scan_reqs;
+ u8 max_sched_scan_ssids;
+ u8 max_match_sets;
+ u8 max_adj_channel_rssi_comp;
+
+ __le16 max_scan_ie_len;
+ __le16 max_sched_scan_ie_len;
+ __le32 max_sched_scan_plans;
+ __le32 max_sched_scan_plan_interval;
+ __le32 max_sched_scan_plan_iterations;
+
+ u8 n_cipher_suites;
+ u8 n_akm_suites;
+ u8 max_num_pmkids;
+ u8 num_iftype_ext_capab;
+ u8 extended_capabilities_len;
+ u8 max_data_retry_count;
+ u8 n_iface_combinations;
+ u8 max_num_csa_counters;
+
u8 bands_cap;
u8 alpha2[2];
u8 n_reg_rules;
u8 dfs_region;
- u8 rsvd[1];
+ u8 rsvd[3];
u8 var_info[0];
} __packed;
@@ -952,8 +1115,6 @@ struct qlink_resp_get_mac_info {
* Description of wireless hardware capabilities and features.
*
* @fw_ver: wireless hardware firmware version.
- * @hw_capab: Bitmap of capabilities supported by firmware.
- * @ql_proto_ver: Version of QLINK protocol used by firmware.
* @num_mac: Number of separate physical radio devices provided by hardware.
* @mac_bitmap: Bitmap of MAC IDs that are active and can be used in firmware.
* @total_tx_chains: total number of transmit chains used by device.
@@ -963,11 +1124,9 @@ struct qlink_resp_get_mac_info {
struct qlink_resp_get_hw_info {
struct qlink_resp rhdr;
__le32 fw_ver;
- __le32 hw_capab;
__le32 bld_tmstamp;
__le32 plat_id;
__le32 hw_ver;
- __le16 ql_proto_ver;
u8 num_mac;
u8 mac_bitmap;
u8 total_tx_chain;
@@ -1001,8 +1160,6 @@ enum qlink_sta_info_rate_flags {
*
* Response data containing statistics for specified STA.
*
- * @filled: a bitmask of &enum qlink_sta_info, specifies which info in response
- * is valid.
* @sta_addr: MAC address of STA the response carries statistic for.
* @info: variable statistics for specified STA.
*/
@@ -1031,22 +1188,14 @@ struct qlink_resp_band_info_get {
} __packed;
/**
- * struct qlink_resp_phy_params - response for QLINK_CMD_PHY_PARAMS_GET command
- *
- * @info: variable-length array of PHY params.
- */
-struct qlink_resp_phy_params {
- struct qlink_resp rhdr;
- u8 info[0];
-} __packed;
-
-/**
* struct qlink_resp_get_chan_stats - response for QLINK_CMD_CHAN_STATS cmd
*
+ * @chan_freq: center frequency for a channel the report is sent for.
* @info: variable-length channel info.
*/
struct qlink_resp_get_chan_stats {
- struct qlink_cmd rhdr;
+ struct qlink_resp rhdr;
+ __le32 chan_freq;
u8 info[0];
} __packed;
@@ -1088,6 +1237,7 @@ enum qlink_event_type {
QLINK_EVENT_RADAR = 0x0029,
QLINK_EVENT_EXTERNAL_AUTH = 0x0030,
QLINK_EVENT_MIC_FAILURE = 0x0031,
+ QLINK_EVENT_UPDATE_OWE = 0x0032,
};
/**
@@ -1158,6 +1308,7 @@ struct qlink_event_bss_join {
struct qlink_event_bss_leave {
struct qlink_event ehdr;
__le16 reason;
+ u8 rsvd[2];
} __packed;
/**
@@ -1274,10 +1425,10 @@ struct qlink_event_radar {
*/
struct qlink_event_external_auth {
struct qlink_event ehdr;
+ __le32 akm_suite;
u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 ssid_len;
u8 bssid[ETH_ALEN];
- __le32 akm_suite;
+ u8 ssid_len;
u8 action;
} __packed;
@@ -1295,26 +1446,36 @@ struct qlink_event_mic_failure {
u8 pairwise;
} __packed;
+/**
+ * struct qlink_event_update_owe - data for QLINK_EVENT_UPDATE_OWE event
+ *
+ * @peer: MAC addr of the peer device for which OWE processing needs to be done
+ * @ies: IEs from the peer
+ */
+struct qlink_event_update_owe {
+ struct qlink_event ehdr;
+ u8 peer[ETH_ALEN];
+ u8 rsvd[2];
+ u8 ies[0];
+} __packed;
+
/* QLINK TLVs (Type-Length Values) definitions
*/
/**
* enum qlink_tlv_id - list of TLVs that Qlink messages can carry
*
- * @QTN_TLV_ID_STA_STATS_MAP: a bitmap of &enum qlink_sta_info, used to
- * indicate which statistic carried in QTN_TLV_ID_STA_STATS is valid.
+ * @QTN_TLV_ID_BITMAP: a data representing a bitmap that is used together with
+ * other TLVs:
+ * &enum qlink_sta_info used to indicate which statistic carried in
+ * QTN_TLV_ID_STA_STATS is valid.
+ * &enum qlink_hw_capab listing wireless card capabilities.
+ * &enum qlink_driver_capab listing driver/host system capabilities.
+ * &enum qlink_chan_stat used to indicate which statistic carried in
+ * QTN_TLV_ID_CHANNEL_STATS is valid.
* @QTN_TLV_ID_STA_STATS: per-STA statistics as defined by
* &struct qlink_sta_stats. Valid values are marked as such in a bitmap
- * carried by QTN_TLV_ID_STA_STATS_MAP.
- * @QTN_TLV_ID_MAX_SCAN_SSIDS: maximum number of SSIDs the device can scan
- * for in any given scan.
- * @QTN_TLV_ID_SCAN_DWELL_ACTIVE: time spent on a single channel for an active
- * scan.
- * @QTN_TLV_ID_SCAN_DWELL_PASSIVE: time spent on a single channel for a passive
- * scan.
- * @QTN_TLV_ID_SCAN_SAMPLE_DURATION: total duration of sampling a single channel
- * during a scan including off-channel dwell time and operating channel
- * time.
+ * carried by QTN_TLV_ID_BITMAP.
* @QTN_TLV_ID_IFTYPE_DATA: supported band data.
*/
enum qlink_tlv_id {
@@ -1325,11 +1486,10 @@ enum qlink_tlv_id {
QTN_TLV_ID_REG_RULE = 0x0207,
QTN_TLV_ID_CHANNEL = 0x020F,
QTN_TLV_ID_CHANDEF = 0x0210,
- QTN_TLV_ID_STA_STATS_MAP = 0x0211,
+ QTN_TLV_ID_BITMAP = 0x0211,
QTN_TLV_ID_STA_STATS = 0x0212,
QTN_TLV_ID_COVERAGE_CLASS = 0x0213,
QTN_TLV_ID_IFACE_LIMIT = 0x0214,
- QTN_TLV_ID_NUM_IFACE_COMB = 0x0215,
QTN_TLV_ID_CHANNEL_STATS = 0x0216,
QTN_TLV_ID_KEY = 0x0302,
QTN_TLV_ID_SEQ = 0x0303,
@@ -1344,13 +1504,8 @@ enum qlink_tlv_id {
QTN_TLV_ID_CALIBRATION_VER = 0x0406,
QTN_TLV_ID_UBOOT_VER = 0x0407,
QTN_TLV_ID_RANDOM_MAC_ADDR = 0x0408,
- QTN_TLV_ID_MAX_SCAN_SSIDS = 0x0409,
QTN_TLV_ID_WOWLAN_CAPAB = 0x0410,
QTN_TLV_ID_WOWLAN_PATTERN = 0x0411,
- QTN_TLV_ID_SCAN_FLUSH = 0x0412,
- QTN_TLV_ID_SCAN_DWELL_ACTIVE = 0x0413,
- QTN_TLV_ID_SCAN_DWELL_PASSIVE = 0x0416,
- QTN_TLV_ID_SCAN_SAMPLE_DURATION = 0x0417,
QTN_TLV_ID_IFTYPE_DATA = 0x0418,
};
@@ -1360,10 +1515,6 @@ struct qlink_tlv_hdr {
u8 val[0];
} __packed;
-struct qlink_iface_comb_num {
- __le32 iface_comb_num;
-} __packed;
-
struct qlink_iface_limit {
__le16 max_num;
__le16 type;
@@ -1378,21 +1529,6 @@ struct qlink_iface_limit_record {
#define QLINK_RSSI_OFFSET 120
-struct qlink_tlv_frag_rts_thr {
- struct qlink_tlv_hdr hdr;
- __le32 thr;
-} __packed;
-
-struct qlink_tlv_rlimit {
- struct qlink_tlv_hdr hdr;
- u8 rlimit;
-} __packed;
-
-struct qlink_tlv_cclass {
- struct qlink_tlv_hdr hdr;
- u8 cclass;
-} __packed;
-
/**
* enum qlink_reg_rule_flags - regulatory rule flags
*
@@ -1510,6 +1646,7 @@ struct qlink_tlv_ie_set {
struct qlink_tlv_hdr hdr;
u8 type;
u8 flags;
+ u8 rsvd[2];
u8 ie_data[0];
} __packed;
@@ -1522,6 +1659,7 @@ struct qlink_tlv_ie_set {
struct qlink_tlv_ext_ie {
struct qlink_tlv_hdr hdr;
u8 eid_ext;
+ u8 rsvd[3];
u8 ie_data[0];
} __packed;
@@ -1546,13 +1684,57 @@ struct qlink_tlv_iftype_data {
struct qlink_sband_iftype_data iftype_data[0];
} __packed;
+/**
+ * enum qlink_chan_stat - channel statistics bitmap
+ *
+ * Used to indicate which statistics values in &struct qlink_chan_stats
+ * are valid. Individual values are used to fill a bitmap carried in a
+ * payload of QTN_TLV_ID_BITMAP.
+ *
+ * @QLINK_CHAN_STAT_TIME_ON: time_on value is valid.
+ * @QLINK_CHAN_STAT_TIME_TX: time_tx value is valid.
+ * @QLINK_CHAN_STAT_TIME_RX: time_rx value is valid.
+ * @QLINK_CHAN_STAT_CCA_BUSY: cca_busy value is valid.
+ * @QLINK_CHAN_STAT_CCA_BUSY_EXT: cca_busy_ext value is valid.
+ * @QLINK_CHAN_STAT_TIME_SCAN: time_scan value is valid.
+ * @QLINK_CHAN_STAT_CHAN_NOISE: chan_noise value is valid.
+ */
+enum qlink_chan_stat {
+ QLINK_CHAN_STAT_TIME_ON,
+ QLINK_CHAN_STAT_TIME_TX,
+ QLINK_CHAN_STAT_TIME_RX,
+ QLINK_CHAN_STAT_CCA_BUSY,
+ QLINK_CHAN_STAT_CCA_BUSY_EXT,
+ QLINK_CHAN_STAT_TIME_SCAN,
+ QLINK_CHAN_STAT_CHAN_NOISE,
+ QLINK_CHAN_STAT_NUM,
+};
+
+/**
+ * struct qlink_chan_stats - data for QTN_TLV_ID_CHANNEL_STATS
+ *
+ * Carries a per-channel statistics. Not all fields may be filled with
+ * valid values. Valid fields should be indicated as such using a bitmap of
+ * &enum qlink_chan_stat. Bitmap is carried separately in a payload of
+ * QTN_TLV_ID_BITMAP.
+ *
+ * @time_on: amount of time radio operated on that channel.
+ * @time_tx: amount of time radio spent transmitting on the channel.
+ * @time_rx: amount of time radio spent receiving on the channel.
+ * @cca_busy: amount of time the the primary channel was busy.
+ * @cca_busy_ext: amount of time the the secondary channel was busy.
+ * @time_scan: amount of radio spent scanning on the channel.
+ * @chan_noise: channel noise.
+ */
struct qlink_chan_stats {
- __le32 chan_num;
- __le32 cca_tx;
- __le32 cca_rx;
- __le32 cca_busy;
- __le32 cca_try;
+ __le64 time_on;
+ __le64 time_tx;
+ __le64 time_rx;
+ __le64 cca_busy;
+ __le64 cca_busy_ext;
+ __le64 time_scan;
s8 chan_noise;
+ u8 rsvd[3];
} __packed;
/**
@@ -1560,7 +1742,7 @@ struct qlink_chan_stats {
*
* Used to indicate which statistics values in &struct qlink_sta_stats
* are valid. Individual values are used to fill a bitmap carried in a
- * payload of QTN_TLV_ID_STA_STATS_MAP.
+ * payload of QTN_TLV_ID_BITMAP.
*
* @QLINK_STA_INFO_CONNECTED_TIME: connected_time value is valid.
* @QLINK_STA_INFO_INACTIVE_TIME: inactive_time value is valid.
@@ -1624,7 +1806,7 @@ struct qlink_sta_info_rate {
* Carries statistics of a STA. Not all fields may be filled with
* valid values. Valid fields should be indicated as such using a bitmap of
* &enum qlink_sta_info. Bitmap is carried separately in a payload of
- * QTN_TLV_ID_STA_STATS_MAP.
+ * QTN_TLV_ID_BITMAP.
*/
struct qlink_sta_stats {
__le64 rx_bytes;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
index 1a972bce7b8b..30b60d6ae546 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
@@ -124,6 +124,8 @@ void qlink_chandef_q2cfg(struct wiphy *wiphy,
chdef->center_freq1 = le16_to_cpu(qch->center_freq1);
chdef->center_freq2 = le16_to_cpu(qch->center_freq2);
chdef->width = qlink_chanwidth_to_nl(qch->width);
+ chdef->edmg.bw_config = 0;
+ chdef->edmg.channels = 0;
}
void qlink_chandef_cfg2q(const struct cfg80211_chan_def *chdef,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
index f873beed2ae7..230a10a41c7a 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
@@ -20,42 +20,14 @@ static inline void qtnf_cmd_skb_put_tlv_arr(struct sk_buff *skb,
u16 tlv_id, const u8 arr[],
size_t arr_len)
{
- struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + arr_len);
+ struct qlink_tlv_hdr *hdr;
+ hdr = skb_put(skb, sizeof(*hdr) + round_up(arr_len, QLINK_ALIGN));
hdr->type = cpu_to_le16(tlv_id);
hdr->len = cpu_to_le16(arr_len);
memcpy(hdr->val, arr, arr_len);
}
-static inline void qtnf_cmd_skb_put_tlv_tag(struct sk_buff *skb, u16 tlv_id)
-{
- struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr));
-
- hdr->type = cpu_to_le16(tlv_id);
- hdr->len = cpu_to_le16(0);
-}
-
-static inline void qtnf_cmd_skb_put_tlv_u8(struct sk_buff *skb, u16 tlv_id,
- u8 value)
-{
- struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + sizeof(value));
-
- hdr->type = cpu_to_le16(tlv_id);
- hdr->len = cpu_to_le16(sizeof(value));
- *hdr->val = value;
-}
-
-static inline void qtnf_cmd_skb_put_tlv_u16(struct sk_buff *skb,
- u16 tlv_id, u16 value)
-{
- struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + sizeof(value));
- __le16 tmp = cpu_to_le16(value);
-
- hdr->type = cpu_to_le16(tlv_id);
- hdr->len = cpu_to_le16(sizeof(value));
- memcpy(hdr->val, &tmp, sizeof(tmp));
-}
-
static inline void qtnf_cmd_skb_put_tlv_u32(struct sk_buff *skb,
u16 tlv_id, u32 value)
{
@@ -85,4 +57,17 @@ u32 qlink_utils_chflags_cfg2q(u32 cfgflags);
void qlink_utils_regrule_q2nl(struct ieee80211_reg_rule *rule,
const struct qlink_tlv_reg_rule *tlv_rule);
+#define qlink_for_each_tlv(_tlv, _start, _datalen) \
+ for (_tlv = (const struct qlink_tlv_hdr *)(_start); \
+ (const u8 *)(_start) + (_datalen) - (const u8 *)_tlv >= \
+ (int)sizeof(*_tlv) && \
+ (const u8 *)(_start) + (_datalen) - (const u8 *)_tlv >= \
+ (int)sizeof(*_tlv) + le16_to_cpu(_tlv->len); \
+ _tlv = (const struct qlink_tlv_hdr *)(_tlv->val + \
+ round_up(le16_to_cpu(_tlv->len), QLINK_ALIGN)))
+
+#define qlink_tlv_parsing_ok(_tlv_last, _start, _datalen) \
+ ((const u8 *)(_tlv_last) == \
+ (const u8 *)(_start) + round_up(_datalen, QLINK_ALIGN))
+
#endif /* _QTN_FMAC_QLINK_UTIL_H_ */
diff --git a/drivers/net/wireless/rayctl.h b/drivers/net/wireless/rayctl.h
index 668444f6bf07..2b0f332043d7 100644
--- a/drivers/net/wireless/rayctl.h
+++ b/drivers/net/wireless/rayctl.h
@@ -570,7 +570,7 @@ struct phy_header {
};
struct ray_rx_msg {
struct mac_header mac;
- UCHAR var[0];
+ UCHAR var[];
};
struct tx_msg {
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 6598c8d786ea..d6d1be4169e5 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -627,7 +627,7 @@ struct rtl8xxxu_firmware_header {
u32 reserved4;
u32 reserved5;
- u8 data[0];
+ u8 data[];
};
/*
@@ -1133,6 +1133,15 @@ enum bt_mp_oper_opcode_8723b {
BT_MP_OP_ENABLE_CFO_TRACKING = 0x24,
};
+enum rtl8xxxu_bw_mode {
+ RTL8XXXU_CHANNEL_WIDTH_20 = 0,
+ RTL8XXXU_CHANNEL_WIDTH_40 = 1,
+ RTL8XXXU_CHANNEL_WIDTH_80 = 2,
+ RTL8XXXU_CHANNEL_WIDTH_160 = 3,
+ RTL8XXXU_CHANNEL_WIDTH_80_80 = 4,
+ RTL8XXXU_CHANNEL_WIDTH_MAX = 5,
+};
+
struct rtl8723bu_c2h {
u8 id;
u8 seq;
@@ -1174,13 +1183,16 @@ struct rtl8723bu_c2h {
} __packed bt_info;
struct {
u8 rate:7;
- u8 dummy0_0:1;
+ u8 sgi:1;
u8 macid;
u8 ldpc:1;
u8 txbf:1;
u8 noisy_state:1;
u8 dummy2_0:5;
u8 dummy3_0;
+ u8 dummy4_0;
+ u8 dummy5_0;
+ u8 bw;
} __packed ra_report;
};
};
@@ -1260,6 +1272,12 @@ struct rtl8xxxu_btcoex {
#define RTL8XXXU_SNR_THRESH_HIGH 50
#define RTL8XXXU_SNR_THRESH_LOW 20
+struct rtl8xxxu_ra_report {
+ struct rate_info txrate;
+ u32 bit_rate;
+ u8 desc_rate;
+};
+
struct rtl8xxxu_priv {
struct ieee80211_hw *hw;
struct usb_device *udev;
@@ -1375,6 +1393,7 @@ struct rtl8xxxu_priv {
struct sk_buff_head c2hcmd_queue;
spinlock_t c2hcmd_lock;
struct rtl8xxxu_btcoex bt_coex;
+ struct rtl8xxxu_ra_report ra_report;
};
struct rtl8xxxu_rx_urb {
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 54a1a4ea107b..19efae462a24 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -4328,7 +4328,7 @@ void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
u32 ramask, u8 rateid, int sgi)
{
struct h2c_cmd h2c;
- u8 bw = 0;
+ u8 bw = RTL8XXXU_CHANNEL_WIDTH_20;
memset(&h2c, 0, sizeof(struct h2c_cmd));
@@ -4816,8 +4816,8 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
rate = tx_rate->hw_value;
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
- dev_info(dev, "%s: TX rate: %d, pkt size %d\n",
- __func__, rate, cpu_to_le16(tx_desc->pkt_size));
+ dev_info(dev, "%s: TX rate: %d, pkt size %u\n",
+ __func__, rate, le16_to_cpu(tx_desc->pkt_size));
seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
@@ -4889,8 +4889,8 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
rate = tx_rate->hw_value;
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
- dev_info(dev, "%s: TX rate: %d, pkt size %d\n",
- __func__, rate, cpu_to_le16(tx_desc40->pkt_size));
+ dev_info(dev, "%s: TX rate: %d, pkt size %u\n",
+ __func__, rate, le16_to_cpu(tx_desc40->pkt_size));
seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
@@ -5389,6 +5389,35 @@ void rtl8723bu_handle_bt_info(struct rtl8xxxu_priv *priv)
}
}
+static struct ieee80211_rate rtl8xxxu_legacy_ratetable[] = {
+ {.bitrate = 10, .hw_value = 0x00,},
+ {.bitrate = 20, .hw_value = 0x01,},
+ {.bitrate = 55, .hw_value = 0x02,},
+ {.bitrate = 110, .hw_value = 0x03,},
+ {.bitrate = 60, .hw_value = 0x04,},
+ {.bitrate = 90, .hw_value = 0x05,},
+ {.bitrate = 120, .hw_value = 0x06,},
+ {.bitrate = 180, .hw_value = 0x07,},
+ {.bitrate = 240, .hw_value = 0x08,},
+ {.bitrate = 360, .hw_value = 0x09,},
+ {.bitrate = 480, .hw_value = 0x0a,},
+ {.bitrate = 540, .hw_value = 0x0b,},
+};
+
+static void rtl8xxxu_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss)
+{
+ if (rate <= DESC_RATE_54M)
+ return;
+
+ if (rate >= DESC_RATE_MCS0 && rate <= DESC_RATE_MCS15) {
+ if (rate < DESC_RATE_MCS8)
+ *nss = 1;
+ else
+ *nss = 2;
+ *mcs = rate - DESC_RATE_MCS0;
+ }
+}
+
static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
{
struct rtl8xxxu_priv *priv;
@@ -5397,9 +5426,14 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
unsigned long flags;
u8 bt_info = 0;
struct rtl8xxxu_btcoex *btcoex;
+ struct rtl8xxxu_ra_report *rarpt;
+ u8 rate, sgi, bw;
+ u32 bit_rate;
+ u8 mcs = 0, nss = 0;
priv = container_of(work, struct rtl8xxxu_priv, c2hcmd_work);
btcoex = &priv->bt_coex;
+ rarpt = &priv->ra_report;
if (priv->rf_paths > 1)
goto out;
@@ -5422,6 +5456,34 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
}
rtl8723bu_handle_bt_info(priv);
break;
+ case C2H_8723B_RA_REPORT:
+ rarpt->txrate.flags = 0;
+ rate = c2h->ra_report.rate;
+ sgi = c2h->ra_report.sgi;
+ bw = c2h->ra_report.bw;
+
+ if (rate < DESC_RATE_MCS0) {
+ rarpt->txrate.legacy =
+ rtl8xxxu_legacy_ratetable[rate].bitrate;
+ } else {
+ rtl8xxxu_desc_to_mcsrate(rate, &mcs, &nss);
+ rarpt->txrate.flags |= RATE_INFO_FLAGS_MCS;
+
+ rarpt->txrate.mcs = mcs;
+ rarpt->txrate.nss = nss;
+
+ if (sgi) {
+ rarpt->txrate.flags |=
+ RATE_INFO_FLAGS_SHORT_GI;
+ }
+
+ if (bw == RATE_INFO_BW_20)
+ rarpt->txrate.bw |= RATE_INFO_BW_20;
+ }
+ bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate);
+ rarpt->bit_rate = bit_rate;
+ rarpt->desc_rate = rate;
+ break;
default:
break;
}
@@ -5465,7 +5527,7 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
case C2H_8723B_RA_REPORT:
dev_dbg(dev,
"C2H RA RPT: rate %02x, unk %i, macid %02x, noise %i\n",
- c2h->ra_report.rate, c2h->ra_report.dummy0_0,
+ c2h->ra_report.rate, c2h->ra_report.sgi,
c2h->ra_report.macid, c2h->ra_report.noisy_state);
break;
default:
@@ -6069,6 +6131,16 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return 0;
}
+static void
+rtl8xxxu_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct station_info *sinfo)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+
+ sinfo->txrate = priv->ra_report.txrate;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+}
+
static u8 rtl8xxxu_signal_to_snr(int signal)
{
if (signal < RTL8XXXU_NOISE_FLOOR_MIN)
@@ -6371,6 +6443,7 @@ static const struct ieee80211_ops rtl8xxxu_ops = {
.sw_scan_complete = rtl8xxxu_sw_scan_complete,
.set_key = rtl8xxxu_set_key,
.ampdu_action = rtl8xxxu_ampdu_action,
+ .sta_statistics = rtl8xxxu_sta_statistics,
};
static int rtl8xxxu_parse_usb(struct rtl8xxxu_priv *priv,
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 1cff9f07c9e9..13421cf2d201 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1051,13 +1051,13 @@ struct rtl_hdr_3addr {
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
__le16 seq_ctl;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct rtl_info_element {
u8 id;
u8 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct rtl_probe_rsp {
@@ -1068,7 +1068,7 @@ struct rtl_probe_rsp {
/*SSID, supported rates, FH params, DS params,
* CF params, IBSS params, TIM (if beacon), RSN
*/
- struct rtl_info_element info_element[0];
+ struct rtl_info_element info_element[];
} __packed;
/*LED related.*/
diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c
index fda771d23f71..b6d1d71f4d30 100644
--- a/drivers/net/wireless/realtek/rtw88/bf.c
+++ b/drivers/net/wireless/realtek/rtw88/bf.c
@@ -41,7 +41,6 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_sta_vht_cap *ic_vht_cap;
const u8 *bssid = bss_conf->bssid;
u32 sound_dim;
- u8 bfee_role = RTW_BFEE_NONE;
u8 i;
if (!(chip->band & RTW_BAND_5G))
@@ -67,7 +66,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
}
ether_addr_copy(bfee->mac_addr, bssid);
- bfee_role = RTW_BFEE_MU;
+ bfee->role = RTW_BFEE_MU;
bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7);
bfee->aid = bss_conf->aid;
bfinfo->bfer_mu_cnt++;
@@ -85,7 +84,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
ether_addr_copy(bfee->mac_addr, bssid);
- bfee_role = RTW_BFEE_SU;
+ bfee->role = RTW_BFEE_SU;
bfee->sound_dim = (u8)sound_dim;
bfee->g_id = 0;
bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7);
@@ -102,7 +101,6 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
}
out_unlock:
- bfee->role = bfee_role;
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index 4dfb2ec395ee..567372fb4e12 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -1904,6 +1904,9 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
lockdep_assert_held(&rtwdev->mutex);
+ if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
+ return;
+
coex_dm->reason = reason;
/* update wifi_link_info_ext variable */
@@ -2500,3 +2503,495 @@ void rtw_coex_defreeze_work(struct work_struct *work)
rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS);
mutex_unlock(&rtwdev->mutex);
}
+
+#ifdef CONFIG_RTW88_DEBUGFS
+#define INFO_SIZE 80
+
+#define case_BTINFO(src) \
+ case COEX_BTINFO_SRC_##src: return #src
+
+static const char *rtw_coex_get_bt_info_src_string(u8 bt_info_src)
+{
+ switch (bt_info_src) {
+ case_BTINFO(WL_FW);
+ case_BTINFO(BT_RSP);
+ case_BTINFO(BT_ACT);
+ default:
+ return "Unknown";
+ }
+}
+
+#define case_RSN(src) \
+ case COEX_RSN_##src: return #src
+
+static const char *rtw_coex_get_reason_string(u8 reason)
+{
+ switch (reason) {
+ case_RSN(2GSCANSTART);
+ case_RSN(5GSCANSTART);
+ case_RSN(SCANFINISH);
+ case_RSN(2GSWITCHBAND);
+ case_RSN(5GSWITCHBAND);
+ case_RSN(2GCONSTART);
+ case_RSN(5GCONSTART);
+ case_RSN(2GCONFINISH);
+ case_RSN(5GCONFINISH);
+ case_RSN(2GMEDIA);
+ case_RSN(5GMEDIA);
+ case_RSN(MEDIADISCON);
+ case_RSN(BTINFO);
+ case_RSN(LPS);
+ case_RSN(WLSTATUS);
+ default:
+ return "Unknown";
+ }
+}
+
+static int rtw_coex_addr_info(struct rtw_dev *rtwdev,
+ const struct rtw_reg_domain *reg,
+ char addr_info[], int n)
+{
+ const char *rf_prefix = "";
+ const char *sep = n == 0 ? "" : "/ ";
+ int ffs, fls;
+ int max_fls;
+
+ if (INFO_SIZE - n <= 0)
+ return 0;
+
+ switch (reg->domain) {
+ case RTW_REG_DOMAIN_MAC32:
+ max_fls = 31;
+ break;
+ case RTW_REG_DOMAIN_MAC16:
+ max_fls = 15;
+ break;
+ case RTW_REG_DOMAIN_MAC8:
+ max_fls = 7;
+ break;
+ case RTW_REG_DOMAIN_RF_A:
+ case RTW_REG_DOMAIN_RF_B:
+ rf_prefix = "RF_";
+ max_fls = 19;
+ break;
+ default:
+ return 0;
+ }
+
+ ffs = __ffs(reg->mask);
+ fls = __fls(reg->mask);
+
+ if (ffs == 0 && fls == max_fls)
+ return scnprintf(addr_info + n, INFO_SIZE - n, "%s%s%x",
+ sep, rf_prefix, reg->addr);
+ else if (ffs == fls)
+ return scnprintf(addr_info + n, INFO_SIZE - n, "%s%s%x[%d]",
+ sep, rf_prefix, reg->addr, ffs);
+ else
+ return scnprintf(addr_info + n, INFO_SIZE - n, "%s%s%x[%d:%d]",
+ sep, rf_prefix, reg->addr, fls, ffs);
+}
+
+static int rtw_coex_val_info(struct rtw_dev *rtwdev,
+ const struct rtw_reg_domain *reg,
+ char val_info[], int n)
+{
+ const char *sep = n == 0 ? "" : "/ ";
+ u8 rf_path;
+
+ if (INFO_SIZE - n <= 0)
+ return 0;
+
+ switch (reg->domain) {
+ case RTW_REG_DOMAIN_MAC32:
+ return scnprintf(val_info + n, INFO_SIZE - n, "%s0x%x", sep,
+ rtw_read32_mask(rtwdev, reg->addr, reg->mask));
+ case RTW_REG_DOMAIN_MAC16:
+ return scnprintf(val_info + n, INFO_SIZE - n, "%s0x%x", sep,
+ rtw_read16_mask(rtwdev, reg->addr, reg->mask));
+ case RTW_REG_DOMAIN_MAC8:
+ return scnprintf(val_info + n, INFO_SIZE - n, "%s0x%x", sep,
+ rtw_read8_mask(rtwdev, reg->addr, reg->mask));
+ case RTW_REG_DOMAIN_RF_A:
+ rf_path = RF_PATH_A;
+ break;
+ case RTW_REG_DOMAIN_RF_B:
+ rf_path = RF_PATH_B;
+ break;
+ default:
+ return 0;
+ }
+
+ /* only RF go through here */
+ return scnprintf(val_info + n, INFO_SIZE - n, "%s0x%x", sep,
+ rtw_read_rf(rtwdev, rf_path, reg->addr, reg->mask));
+}
+
+static void rtw_coex_set_coexinfo_hw(struct rtw_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_reg_domain *reg;
+ char addr_info[INFO_SIZE];
+ int n_addr = 0;
+ char val_info[INFO_SIZE];
+ int n_val = 0;
+ int i;
+
+ for (i = 0; i < chip->coex_info_hw_regs_num; i++) {
+ reg = &chip->coex_info_hw_regs[i];
+
+ n_addr += rtw_coex_addr_info(rtwdev, reg, addr_info, n_addr);
+ n_val += rtw_coex_val_info(rtwdev, reg, val_info, n_val);
+
+ if (reg->domain == RTW_REG_DOMAIN_NL) {
+ seq_printf(m, "%-40s = %s\n", addr_info, val_info);
+ n_addr = 0;
+ n_val = 0;
+ }
+ }
+
+ if (n_addr != 0 && n_val != 0)
+ seq_printf(m, "%-40s = %s\n", addr_info, val_info);
+}
+
+static bool rtw_coex_get_bt_reg(struct rtw_dev *rtwdev,
+ u8 type, u16 addr, u16 *val)
+{
+ struct rtw_coex_info_req req = {0};
+ struct sk_buff *skb;
+ __le16 le_addr;
+ u8 *payload;
+
+ le_addr = cpu_to_le16(addr);
+ req.op_code = BT_MP_INFO_OP_READ_REG;
+ req.para1 = type;
+ req.para2 = le16_get_bits(le_addr, GENMASK(7, 0));
+ req.para3 = le16_get_bits(le_addr, GENMASK(15, 8));
+ skb = rtw_coex_info_request(rtwdev, &req);
+ if (!skb) {
+ *val = 0xeaea;
+ return false;
+ }
+
+ payload = get_payload_from_coex_resp(skb);
+ *val = GET_COEX_RESP_BT_REG_VAL(payload);
+
+ return true;
+}
+
+static bool rtw_coex_get_bt_patch_version(struct rtw_dev *rtwdev,
+ u32 *patch_version)
+{
+ struct rtw_coex_info_req req = {0};
+ struct sk_buff *skb;
+ u8 *payload;
+ bool ret = false;
+
+ req.op_code = BT_MP_INFO_OP_PATCH_VER;
+ skb = rtw_coex_info_request(rtwdev, &req);
+ if (!skb)
+ goto out;
+
+ payload = get_payload_from_coex_resp(skb);
+ *patch_version = GET_COEX_RESP_BT_PATCH_VER(payload);
+ ret = true;
+
+out:
+ return ret;
+}
+
+static bool rtw_coex_get_bt_supported_version(struct rtw_dev *rtwdev,
+ u32 *supported_version)
+{
+ struct rtw_coex_info_req req = {0};
+ struct sk_buff *skb;
+ u8 *payload;
+ bool ret = false;
+
+ req.op_code = BT_MP_INFO_OP_SUPP_VER;
+ skb = rtw_coex_info_request(rtwdev, &req);
+ if (!skb)
+ goto out;
+
+ payload = get_payload_from_coex_resp(skb);
+ *supported_version = GET_COEX_RESP_BT_SUPP_VER(payload);
+ ret = true;
+
+out:
+ return ret;
+}
+
+static bool rtw_coex_get_bt_supported_feature(struct rtw_dev *rtwdev,
+ u32 *supported_feature)
+{
+ struct rtw_coex_info_req req = {0};
+ struct sk_buff *skb;
+ u8 *payload;
+ bool ret = false;
+
+ req.op_code = BT_MP_INFO_OP_SUPP_FEAT;
+ skb = rtw_coex_info_request(rtwdev, &req);
+ if (!skb)
+ goto out;
+
+ payload = get_payload_from_coex_resp(skb);
+ *supported_feature = GET_COEX_RESP_BT_SUPP_FEAT(payload);
+ ret = true;
+
+out:
+ return ret;
+}
+
+struct rtw_coex_sta_stat_iter_data {
+ struct rtw_vif *rtwvif;
+ struct seq_file *file;
+};
+
+static void rtw_coex_sta_stat_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_coex_sta_stat_iter_data *sta_iter_data = data;
+ struct rtw_vif *rtwvif = sta_iter_data->rtwvif;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ struct seq_file *m = sta_iter_data->file;
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ u8 rssi;
+
+ if (si->vif != vif)
+ return;
+
+ rssi = ewma_rssi_read(&si->avg_rssi);
+ seq_printf(m, "\tPeer %3d\n", si->mac_id);
+ seq_printf(m, "\t\t%-24s = %d\n", "RSSI", rssi);
+ seq_printf(m, "\t\t%-24s = %d\n", "BW mode", si->bw_mode);
+}
+
+struct rtw_coex_vif_stat_iter_data {
+ struct rtw_dev *rtwdev;
+ struct seq_file *file;
+};
+
+static void rtw_coex_vif_stat_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_coex_vif_stat_iter_data *vif_iter_data = data;
+ struct rtw_coex_sta_stat_iter_data sta_iter_data;
+ struct rtw_dev *rtwdev = vif_iter_data->rtwdev;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct seq_file *m = vif_iter_data->file;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ seq_printf(m, "Iface on Port (%d)\n", rtwvif->port);
+ seq_printf(m, "\t%-32s = %d\n",
+ "Beacon interval", bss_conf->beacon_int);
+ seq_printf(m, "\t%-32s = %d\n",
+ "Network Type", rtwvif->net_type);
+
+ sta_iter_data.rtwvif = rtwvif;
+ sta_iter_data.file = m;
+ rtw_iterate_stas_atomic(rtwdev, rtw_coex_sta_stat_iter,
+ &sta_iter_data);
+}
+
+void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_fw_state *fw = &rtwdev->fw;
+ struct rtw_coex_vif_stat_iter_data vif_iter_data;
+ u8 reason = coex_dm->reason;
+ u8 sys_lte;
+ u16 score_board_WB, score_board_BW;
+ u32 wl_reg_6c0, wl_reg_6c4, wl_reg_6c8, wl_reg_778, wl_reg_6cc;
+ u32 lte_coex, bt_coex;
+ u32 bt_hi_pri, bt_lo_pri;
+ int i;
+
+ score_board_BW = rtw_coex_read_scbd(rtwdev);
+ score_board_WB = coex_stat->score_board;
+ wl_reg_6c0 = rtw_read32(rtwdev, 0x6c0);
+ wl_reg_6c4 = rtw_read32(rtwdev, 0x6c4);
+ wl_reg_6c8 = rtw_read32(rtwdev, 0x6c8);
+ wl_reg_6cc = rtw_read32(rtwdev, 0x6cc);
+ wl_reg_778 = rtw_read32(rtwdev, 0x778);
+ bt_hi_pri = rtw_read32(rtwdev, 0x770);
+ bt_lo_pri = rtw_read32(rtwdev, 0x774);
+ rtw_write8(rtwdev, 0x76e, 0xc);
+ sys_lte = rtw_read8(rtwdev, 0x73);
+ lte_coex = rtw_coex_read_indirect_reg(rtwdev, 0x38);
+ bt_coex = rtw_coex_read_indirect_reg(rtwdev, 0x54);
+
+ if (!coex_stat->bt_disabled && !coex_stat->bt_mailbox_reply) {
+ rtw_coex_get_bt_supported_version(rtwdev,
+ &coex_stat->bt_supported_version);
+ rtw_coex_get_bt_patch_version(rtwdev, &coex_stat->patch_ver);
+ rtw_coex_get_bt_supported_feature(rtwdev,
+ &coex_stat->bt_supported_feature);
+ rtw_coex_get_bt_reg(rtwdev, 3, 0xae, &coex_stat->bt_reg_vendor_ae);
+ rtw_coex_get_bt_reg(rtwdev, 3, 0xac, &coex_stat->bt_reg_vendor_ac);
+
+ if (coex_stat->patch_ver != 0)
+ coex_stat->bt_mailbox_reply = true;
+ }
+
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "\t\tBT Coexist info %x\n", chip->id);
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "%-40s = %s/ %d\n",
+ "Mech/ RFE",
+ efuse->share_ant ? "Shared" : "Non-Shared",
+ efuse->rfe_option);
+ seq_printf(m, "%-40s = %08x/ 0x%02x/ 0x%08x %s\n",
+ "Coex Ver/ BT Dez/ BT Rpt",
+ chip->coex_para_ver, chip->bt_desired_ver,
+ coex_stat->bt_supported_version,
+ coex_stat->bt_disabled ? "(BT disabled)" :
+ coex_stat->bt_supported_version >= chip->bt_desired_ver ?
+ "(Match)" : "(Mismatch)");
+ seq_printf(m, "%-40s = %s/ %u/ %d\n",
+ "Role/ RoleSwCnt/ IgnWL/ Feature",
+ coex_stat->bt_slave ? "Slave" : "Master",
+ coex_stat->cnt_bt[COEX_CNT_BT_ROLESWITCH],
+ coex_dm->ignore_wl_act);
+ seq_printf(m, "%-40s = %u.%u/ 0x%x/ %c\n",
+ "WL FW/ BT FW/ KT",
+ fw->version, fw->sub_version,
+ coex_stat->patch_ver, coex_stat->kt_ver + 65);
+ seq_printf(m, "%-40s = %u/ %u/ %u/ ch-(%u)\n",
+ "AFH Map",
+ coex_dm->wl_ch_info[0], coex_dm->wl_ch_info[1],
+ coex_dm->wl_ch_info[2], hal->current_channel);
+
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "\t\tBT Status\n");
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "%-40s = %s/ %ddBm/ %u/ %u\n",
+ "BT status/ rssi/ retry/ pop",
+ coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE ? "non-conn" :
+ coex_dm->bt_status == COEX_BTSTATUS_CON_IDLE ? "conn-idle" : "busy",
+ coex_stat->bt_rssi - 100,
+ coex_stat->cnt_bt[COEX_CNT_BT_RETRY],
+ coex_stat->cnt_bt[COEX_CNT_BT_POPEVENT]);
+ seq_printf(m, "%-40s = %s%s%s%s%s (multi-link %d)\n",
+ "Profiles",
+ coex_stat->bt_a2dp_exist ? (coex_stat->bt_a2dp_sink ?
+ "A2DP sink," : "A2DP,") : "",
+ coex_stat->bt_hfp_exist ? "HFP," : "",
+ coex_stat->bt_hid_exist ?
+ (coex_stat->bt_ble_exist ? "HID(RCU)," :
+ coex_stat->bt_hid_slot >= 2 ? "HID(4/18)" :
+ "HID(2/18),") : "",
+ coex_stat->bt_pan_exist ? coex_stat->bt_opp_exist ?
+ "OPP," : "PAN," : "",
+ coex_stat->bt_ble_voice ? "Voice," : "",
+ coex_stat->bt_multi_link);
+ seq_printf(m, "%-40s = %u/ %u/ %u/ 0x%08x\n",
+ "Reinit/ Relink/ IgnWl/ Feature",
+ coex_stat->cnt_bt[COEX_CNT_BT_REINIT],
+ coex_stat->cnt_bt[COEX_CNT_BT_SETUPLINK],
+ coex_stat->cnt_bt[COEX_CNT_BT_IGNWLANACT],
+ coex_stat->bt_supported_feature);
+ seq_printf(m, "%-40s = %u/ %u/ %u/ %u\n",
+ "Page/ Inq/ iqk/ iqk fail",
+ coex_stat->cnt_bt[COEX_CNT_BT_PAGE],
+ coex_stat->cnt_bt[COEX_CNT_BT_INQ],
+ coex_stat->cnt_bt[COEX_CNT_BT_IQK],
+ coex_stat->cnt_bt[COEX_CNT_BT_IQKFAIL]);
+ seq_printf(m, "%-40s = 0x%04x/ 0x%04x/ 0x%04x/ 0x%04x\n",
+ "0xae/ 0xac/ score board (W->B)/ (B->W)",
+ coex_stat->bt_reg_vendor_ae,
+ coex_stat->bt_reg_vendor_ac,
+ score_board_WB, score_board_BW);
+ seq_printf(m, "%-40s = %u/%u, %u/%u\n",
+ "Hi-Pri TX/RX, Lo-Pri TX/RX",
+ bt_hi_pri & 0xffff, bt_hi_pri >> 16,
+ bt_lo_pri & 0xffff, bt_lo_pri >> 16);
+ for (i = 0; i < COEX_BTINFO_SRC_BT_IQK; i++)
+ seq_printf(m, "%-40s = %7ph\n",
+ rtw_coex_get_bt_info_src_string(i),
+ coex_stat->bt_info_c2h[i]);
+
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "\t\tWiFi Status\n");
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "%-40s = %d\n",
+ "Scanning", test_bit(RTW_FLAG_SCANNING, rtwdev->flags));
+ seq_printf(m, "%-40s = %u/ TX %d Mbps/ RX %d Mbps\n",
+ "G_busy/ TX/ RX",
+ coex_stat->wl_gl_busy,
+ rtwdev->stats.tx_throughput, rtwdev->stats.rx_throughput);
+ seq_printf(m, "%-40s = %u/ %u/ %u\n",
+ "IPS/ Low Power/ PS mode",
+ test_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags),
+ test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags),
+ rtwdev->lps_conf.mode);
+
+ vif_iter_data.rtwdev = rtwdev;
+ vif_iter_data.file = m;
+ rtw_iterate_vifs_atomic(rtwdev, rtw_coex_vif_stat_iter, &vif_iter_data);
+
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "\t\tMechanism\n");
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "%-40s = %5ph (case-%d)\n",
+ "TDMA",
+ coex_dm->ps_tdma_para, coex_dm->cur_ps_tdma);
+ seq_printf(m, "%-40s = %d\n",
+ "Timer base", coex_stat->tdma_timer_base);
+ seq_printf(m, "%-40s = %d/ 0x%08x/ 0x%08x/ 0x%08x\n",
+ "Table/ 0x6c0/ 0x6c4/ 0x6c8",
+ coex_dm->cur_table, wl_reg_6c0, wl_reg_6c4, wl_reg_6c8);
+ seq_printf(m, "%-40s = 0x%08x/ 0x%08x/ reason (%s)\n",
+ "0x778/ 0x6cc/ Reason",
+ wl_reg_778, wl_reg_6cc, rtw_coex_get_reason_string(reason));
+ seq_printf(m, "%-40s = %u/ %u/ %u/ %u/ %u\n",
+ "Null All/ Retry/ Ack/ BT Empty/ BT Late",
+ coex_stat->wl_fw_dbg_info[1], coex_stat->wl_fw_dbg_info[2],
+ coex_stat->wl_fw_dbg_info[3], coex_stat->wl_fw_dbg_info[4],
+ coex_stat->wl_fw_dbg_info[5]);
+ seq_printf(m, "%-40s = %u/ %u/ %s/ %u\n",
+ "Cnt TDMA Toggle/ Lk 5ms/ Lk 5ms on/ FW",
+ coex_stat->wl_fw_dbg_info[6],
+ coex_stat->wl_fw_dbg_info[7],
+ coex_stat->wl_slot_extend ? "Yes" : "No",
+ coex_stat->cnt_wl[COEX_CNT_WL_FW_NOTIFY]);
+
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "\t\tHW setting\n");
+ seq_printf(m, "**********************************************\n");
+ seq_printf(m, "%-40s = %s/ %s\n",
+ "LTE Coex/ Path Owner",
+ lte_coex & BIT(7) ? "ON" : "OFF",
+ sys_lte & BIT(2) ? "WL" : "BT");
+ seq_printf(m, "%-40s = RF:%s_BB:%s/ RF:%s_BB:%s/ %s\n",
+ "GNT_WL_CTRL/ GNT_BT_CTRL/ Dbg",
+ lte_coex & BIT(12) ? "SW" : "HW",
+ lte_coex & BIT(8) ? "SW" : "HW",
+ lte_coex & BIT(14) ? "SW" : "HW",
+ lte_coex & BIT(10) ? "SW" : "HW",
+ sys_lte & BIT(3) ? "On" : "Off");
+ seq_printf(m, "%-40s = %lu/ %lu\n",
+ "GNT_WL/ GNT_BT",
+ (bt_coex & BIT(2)) >> 2, (bt_coex & BIT(3)) >> 3);
+ seq_printf(m, "%-40s = %u/ %u/ %u/ %u\n",
+ "CRC OK CCK/ OFDM/ HT/ VHT",
+ dm_info->cck_ok_cnt, dm_info->ofdm_ok_cnt,
+ dm_info->ht_ok_cnt, dm_info->vht_ok_cnt);
+ seq_printf(m, "%-40s = %u/ %u/ %u/ %u\n",
+ "CRC ERR CCK/ OFDM/ HT/ VHT",
+ dm_info->cck_err_cnt, dm_info->ofdm_err_cnt,
+ dm_info->ht_err_cnt, dm_info->vht_err_cnt);
+ seq_printf(m, "%-40s = %s/ %s/ %s/ %u\n",
+ "HiPr/ Locking/ Locked/ Noisy",
+ coex_stat->wl_hi_pri_task1 ? "Y" : "N",
+ coex_stat->wl_cck_lock ? "Y" : "N",
+ coex_stat->wl_cck_lock_ever ? "Y" : "N",
+ coex_stat->wl_noisy_level);
+
+ rtw_coex_set_coexinfo_hw(rtwdev, m);
+}
+#endif /* CONFIG_RTW88_DEBUGFS */
diff --git a/drivers/net/wireless/realtek/rtw88/coex.h b/drivers/net/wireless/realtek/rtw88/coex.h
index 008d1af5996b..4c3a01968f5e 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.h
+++ b/drivers/net/wireless/realtek/rtw88/coex.h
@@ -46,6 +46,14 @@
(__rssi__ == COEX_RSSI_STATE_LOW || \
__rssi__ == COEX_RSSI_STATE_STAY_LOW ? true : false); })
+#define GET_COEX_RESP_BT_SUPP_VER(payload) \
+ le64_get_bits(*((__le64 *)(payload)), GENMASK_ULL(39, 32))
+#define GET_COEX_RESP_BT_SUPP_FEAT(payload) \
+ le64_get_bits(*((__le64 *)(payload)), GENMASK_ULL(39, 24))
+#define GET_COEX_RESP_BT_PATCH_VER(payload) \
+ le64_get_bits(*((__le64 *)(payload)), GENMASK_ULL(55, 24))
+#define GET_COEX_RESP_BT_REG_VAL(payload) \
+ le64_get_bits(*((__le64 *)(payload)), GENMASK_ULL(39, 24))
#define GET_COEX_RESP_BT_SCAN_TYPE(payload) \
le64_get_bits(*((__le64 *)(payload)), GENMASK(31, 24))
@@ -367,4 +375,6 @@ void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length);
void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type);
void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev);
+void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m);
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 5a181e01ebef..b4964306de61 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -5,6 +5,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include "main.h"
+#include "coex.h"
#include "sec.h"
#include "fw.h"
#include "debug.h"
@@ -691,6 +692,56 @@ static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v)
dm_info->ht_ok_cnt, dm_info->ht_err_cnt);
seq_printf(m, " * VHT cnt (ok, err) = (%u, %u)\n",
dm_info->vht_ok_cnt, dm_info->vht_err_cnt);
+
+ return 0;
+}
+
+static int rtw_debugfs_get_coex_info(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+
+ rtw_coex_display_coex_info(rtwdev, m);
+
+ return 0;
+}
+
+static ssize_t rtw_debugfs_set_coex_enable(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_coex *coex = &rtwdev->coex;
+ char tmp[32 + 1];
+ bool enable;
+ int ret;
+
+ rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+
+ ret = kstrtobool(tmp, &enable);
+ if (ret) {
+ rtw_warn(rtwdev, "invalid arguments\n");
+ return ret;
+ }
+
+ mutex_lock(&rtwdev->mutex);
+ coex->stop_dm = enable == 0;
+ mutex_unlock(&rtwdev->mutex);
+
+ return count;
+}
+
+static int rtw_debugfs_get_coex_enable(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_coex *coex = &rtwdev->coex;
+
+ seq_printf(m, "coex mechanism %s\n",
+ coex->stop_dm ? "disabled" : "enabled");
+
return 0;
}
@@ -784,6 +835,15 @@ static struct rtw_debugfs_priv rtw_debug_priv_phy_info = {
.cb_read = rtw_debugfs_get_phy_info,
};
+static struct rtw_debugfs_priv rtw_debug_priv_coex_enable = {
+ .cb_write = rtw_debugfs_set_coex_enable,
+ .cb_read = rtw_debugfs_get_coex_enable,
+};
+
+static struct rtw_debugfs_priv rtw_debug_priv_coex_info = {
+ .cb_read = rtw_debugfs_get_coex_info,
+};
+
#define rtw_debugfs_add_core(name, mode, fopname, parent) \
do { \
rtw_debug_priv_ ##name.rtwdev = rtwdev; \
@@ -814,6 +874,8 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
rtw_debugfs_add_rw(dump_cam);
rtw_debugfs_add_rw(rsvd_page);
rtw_debugfs_add_r(phy_info);
+ rtw_debugfs_add_r(coex_info);
+ rtw_debugfs_add_rw(coex_enable);
rtw_debugfs_add_r(mac_0);
rtw_debugfs_add_r(mac_1);
rtw_debugfs_add_r(mac_2);
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 243441453ead..05c430b3489c 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -136,6 +136,9 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
mutex_lock(&rtwdev->mutex);
+ if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
+ goto unlock;
+
switch (c2h->id) {
case C2H_BT_INFO:
rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
@@ -153,6 +156,7 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
break;
}
+unlock:
mutex_unlock(&rtwdev->mutex);
}
@@ -579,7 +583,7 @@ static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
struct rtw_rsvd_page *rsvd_pkt;
u8 location = 0;
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
if (type == rsvd_pkt->type)
location = rsvd_pkt->page;
}
@@ -632,7 +636,7 @@ u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
struct rtw_rsvd_page *rsvd_pkt;
u8 location = 0;
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
if (rsvd_pkt->type != RSVD_PROBE_REQ)
continue;
if ((!ssid && !rsvd_pkt->ssid) ||
@@ -649,7 +653,7 @@ u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
struct rtw_rsvd_page *rsvd_pkt;
u16 size = 0;
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
if (rsvd_pkt->type != RSVD_PROBE_REQ)
continue;
if ((!ssid && !rsvd_pkt->ssid) ||
@@ -686,25 +690,6 @@ void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
-static struct sk_buff *
-rtw_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct sk_buff *skb_new;
-
- if (vif->type != NL80211_IFTYPE_AP &&
- vif->type != NL80211_IFTYPE_ADHOC &&
- !ieee80211_vif_is_mesh(vif)) {
- skb_new = alloc_skb(1, GFP_KERNEL);
- if (!skb_new)
- return NULL;
- skb_put(skb_new, 1);
- } else {
- skb_new = ieee80211_beacon_get(hw, vif);
- }
-
- return skb_new;
-}
-
static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
@@ -745,7 +730,7 @@ static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
if (!loc) {
rtw_err(rtwdev, "failed to get probe req rsvd loc\n");
- kfree(skb);
+ kfree_skb(skb);
return NULL;
}
nlo_hdr->location[i] = loc;
@@ -819,8 +804,7 @@ static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
return skb;
}
-static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_chip_info *chip = rtwdev->chip;
@@ -850,15 +834,31 @@ static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw,
}
static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
struct rtw_rsvd_page *rsvd_pkt)
{
+ struct ieee80211_vif *vif;
+ struct rtw_vif *rtwvif;
struct sk_buff *skb_new;
struct cfg80211_ssid *ssid;
+ if (rsvd_pkt->type == RSVD_DUMMY) {
+ skb_new = alloc_skb(1, GFP_KERNEL);
+ if (!skb_new)
+ return NULL;
+
+ skb_put(skb_new, 1);
+ return skb_new;
+ }
+
+ rtwvif = rsvd_pkt->rtwvif;
+ if (!rtwvif)
+ return NULL;
+
+ vif = rtwvif_to_vif(rtwvif);
+
switch (rsvd_pkt->type) {
case RSVD_BEACON:
- skb_new = rtw_beacon_get(hw, vif);
+ skb_new = ieee80211_beacon_get(hw, vif);
break;
case RSVD_PS_POLL:
skb_new = ieee80211_pspoll_get(hw, vif);
@@ -876,7 +876,7 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
skb_new = rtw_lps_pg_dpk_get(hw);
break;
case RSVD_LPS_PG_INFO:
- skb_new = rtw_lps_pg_info_get(hw, vif);
+ skb_new = rtw_lps_pg_info_get(hw);
break;
case RSVD_PROBE_REQ:
ssid = (struct cfg80211_ssid *)rsvd_pkt->ssid;
@@ -945,6 +945,8 @@ static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev,
if (!rsvd_pkt)
return NULL;
+ INIT_LIST_HEAD(&rsvd_pkt->vif_list);
+ INIT_LIST_HEAD(&rsvd_pkt->build_list);
rsvd_pkt->type = type;
rsvd_pkt->add_txdesc = txdesc;
@@ -952,51 +954,124 @@ static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev,
}
static void rtw_insert_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif,
struct rtw_rsvd_page *rsvd_pkt)
{
lockdep_assert_held(&rtwdev->mutex);
- list_add_tail(&rsvd_pkt->list, &rtwdev->rsvd_page_list);
+
+ list_add_tail(&rsvd_pkt->vif_list, &rtwvif->rsvd_page_list);
}
-void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
- bool txdesc)
+static void rtw_add_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif,
+ enum rtw_rsvd_packet_type type,
+ bool txdesc)
{
struct rtw_rsvd_page *rsvd_pkt;
rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, type, txdesc);
- if (!rsvd_pkt)
+ if (!rsvd_pkt) {
+ rtw_err(rtwdev, "failed to alloc rsvd page %d\n", type);
return;
+ }
- rtw_insert_rsvd_page(rtwdev, rsvd_pkt);
+ rsvd_pkt->rtwvif = rtwvif;
+ rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
}
-void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
- struct cfg80211_ssid *ssid)
+static void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif,
+ struct cfg80211_ssid *ssid)
{
struct rtw_rsvd_page *rsvd_pkt;
rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_PROBE_REQ, true);
- if (!rsvd_pkt)
+ if (!rsvd_pkt) {
+ rtw_err(rtwdev, "failed to alloc probe req rsvd page\n");
return;
+ }
+ rsvd_pkt->rtwvif = rtwvif;
rsvd_pkt->ssid = ssid;
- rtw_insert_rsvd_page(rtwdev, rsvd_pkt);
+ rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
}
-void rtw_reset_rsvd_page(struct rtw_dev *rtwdev)
+void rtw_remove_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
{
struct rtw_rsvd_page *rsvd_pkt, *tmp;
lockdep_assert_held(&rtwdev->mutex);
- list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, list) {
- if (rsvd_pkt->type == RSVD_BEACON)
- continue;
- list_del(&rsvd_pkt->list);
+ /* remove all of the rsvd pages for vif */
+ list_for_each_entry_safe(rsvd_pkt, tmp, &rtwvif->rsvd_page_list,
+ vif_list) {
+ list_del(&rsvd_pkt->vif_list);
+ if (!list_empty(&rsvd_pkt->build_list))
+ list_del(&rsvd_pkt->build_list);
kfree(rsvd_pkt);
}
}
+void rtw_add_rsvd_page_bcn(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+
+ if (vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC &&
+ vif->type != NL80211_IFTYPE_MESH_POINT) {
+ rtw_warn(rtwdev, "Cannot add beacon rsvd page for %d\n",
+ vif->type);
+ return;
+ }
+
+ rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_BEACON, false);
+}
+
+void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct rtw_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw_pno_request *rtw_pno_req = &rtw_wow->pno_req;
+ struct cfg80211_ssid *ssid;
+ int i;
+
+ if (vif->type != NL80211_IFTYPE_STATION) {
+ rtw_warn(rtwdev, "Cannot add PNO rsvd page for %d\n",
+ vif->type);
+ return;
+ }
+
+ for (i = 0 ; i < rtw_pno_req->match_set_cnt; i++) {
+ ssid = &rtw_pno_req->match_sets[i].ssid;
+ rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, ssid);
+ }
+
+ rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, NULL);
+ rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NLO_INFO, false);
+ rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_CH_INFO, true);
+}
+
+void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+
+ if (vif->type != NL80211_IFTYPE_STATION) {
+ rtw_warn(rtwdev, "Cannot add sta rsvd page for %d\n",
+ vif->type);
+ return;
+ }
+
+ rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_PS_POLL, true);
+ rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_QOS_NULL, true);
+ rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NULL, true);
+ rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_DPK, true);
+ rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_INFO, true);
+}
+
int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
u8 *buf, u32 size)
{
@@ -1060,8 +1135,72 @@ static int rtw_download_drv_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
return rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
}
-static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
- struct ieee80211_vif *vif, u32 *size)
+static void __rtw_build_rsvd_page_reset(struct rtw_dev *rtwdev)
+{
+ struct rtw_rsvd_page *rsvd_pkt, *tmp;
+
+ list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list,
+ build_list) {
+ list_del_init(&rsvd_pkt->build_list);
+
+ /* Don't free except for the dummy rsvd page,
+ * others will be freed when removing vif
+ */
+ if (rsvd_pkt->type == RSVD_DUMMY)
+ kfree(rsvd_pkt);
+ }
+}
+
+static void rtw_build_rsvd_page_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = data;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ struct rtw_rsvd_page *rsvd_pkt;
+
+ list_for_each_entry(rsvd_pkt, &rtwvif->rsvd_page_list, vif_list) {
+ if (rsvd_pkt->type == RSVD_BEACON)
+ list_add(&rsvd_pkt->build_list,
+ &rtwdev->rsvd_page_list);
+ else
+ list_add_tail(&rsvd_pkt->build_list,
+ &rtwdev->rsvd_page_list);
+ }
+}
+
+static int __rtw_build_rsvd_page_from_vifs(struct rtw_dev *rtwdev)
+{
+ struct rtw_rsvd_page *rsvd_pkt;
+
+ __rtw_build_rsvd_page_reset(rtwdev);
+
+ /* gather rsvd page from vifs */
+ rtw_iterate_vifs_atomic(rtwdev, rtw_build_rsvd_page_iter, rtwdev);
+
+ rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
+ struct rtw_rsvd_page, build_list);
+ if (!rsvd_pkt) {
+ WARN(1, "Should not have an empty reserved page\n");
+ return -EINVAL;
+ }
+
+ /* the first rsvd should be beacon, otherwise add a dummy one */
+ if (rsvd_pkt->type != RSVD_BEACON) {
+ struct rtw_rsvd_page *dummy_pkt;
+
+ dummy_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_DUMMY, false);
+ if (!dummy_pkt) {
+ rtw_err(rtwdev, "failed to alloc dummy rsvd page\n");
+ return -ENOMEM;
+ }
+
+ list_add(&dummy_pkt->build_list, &rtwdev->rsvd_page_list);
+ }
+
+ return 0;
+}
+
+static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size)
{
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw_chip_info *chip = rtwdev->chip;
@@ -1071,13 +1210,21 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
u8 total_page = 0;
u8 page_size, page_margin, tx_desc_sz;
u8 *buf;
+ int ret;
page_size = chip->page_size;
tx_desc_sz = chip->tx_pkt_desc_sz;
page_margin = page_size - tx_desc_sz;
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
- iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt);
+ ret = __rtw_build_rsvd_page_from_vifs(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev,
+ "failed to build rsvd page from vifs, ret %d\n", ret);
+ return NULL;
+ }
+
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
+ iter = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
if (!iter) {
rtw_err(rtwdev, "failed to build rsvd packet\n");
goto release_skb;
@@ -1101,7 +1248,8 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
* is smaller than the actual size of the whole rsvd_page
*/
if (total_page == 0) {
- if (rsvd_pkt->type != RSVD_BEACON) {
+ if (rsvd_pkt->type != RSVD_BEACON &&
+ rsvd_pkt->type != RSVD_DUMMY) {
rtw_err(rtwdev, "first page should be a beacon\n");
goto release_skb;
}
@@ -1129,7 +1277,7 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
* And that rsvd_pkt does not require tx_desc because when it goes
* through TX path, the TX path will generate one for it.
*/
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
page, buf, rsvd_pkt);
if (page == 0)
@@ -1145,7 +1293,7 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
return buf;
release_skb:
- list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
kfree_skb(rsvd_pkt->skb);
rsvd_pkt->skb = NULL;
}
@@ -1153,18 +1301,31 @@ release_skb:
return NULL;
}
-static int
-rtw_download_beacon(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
+static int rtw_download_beacon(struct rtw_dev *rtwdev)
{
struct ieee80211_hw *hw = rtwdev->hw;
+ struct rtw_rsvd_page *rsvd_pkt;
struct sk_buff *skb;
int ret = 0;
- skb = rtw_beacon_get(hw, vif);
+ rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
+ struct rtw_rsvd_page, build_list);
+ if (!rsvd_pkt) {
+ rtw_err(rtwdev, "failed to get rsvd page from build list\n");
+ return -ENOENT;
+ }
+
+ if (rsvd_pkt->type != RSVD_BEACON &&
+ rsvd_pkt->type != RSVD_DUMMY) {
+ rtw_err(rtwdev, "invalid rsvd page type %d, should be beacon or dummy\n",
+ rsvd_pkt->type);
+ return -EINVAL;
+ }
+
+ skb = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
if (!skb) {
rtw_err(rtwdev, "failed to get beacon skb\n");
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len);
@@ -1173,17 +1334,16 @@ rtw_download_beacon(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
dev_kfree_skb(skb);
-out:
return ret;
}
-int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
+int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev)
{
u8 *buf;
u32 size;
int ret;
- buf = rtw_build_rsvd_page(rtwdev, vif, &size);
+ buf = rtw_build_rsvd_page(rtwdev, &size);
if (!buf) {
rtw_err(rtwdev, "failed to build rsvd page pkt\n");
return -ENOMEM;
@@ -1200,7 +1360,7 @@ int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
* the beacon again to replace the TX desc header, and we will get
* a correct tx_desc for the beacon in the rsvd page.
*/
- ret = rtw_download_beacon(rtwdev, vif);
+ ret = rtw_download_beacon(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to download beacon\n");
goto free;
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index ccd27bd45775..cdd244857048 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -36,11 +36,12 @@ enum rtw_c2h_cmd_id_ext {
struct rtw_c2h_cmd {
u8 id;
u8 seq;
- u8 payload[0];
+ u8 payload[];
} __packed;
enum rtw_rsvd_packet_type {
RSVD_BEACON,
+ RSVD_DUMMY,
RSVD_PS_POLL,
RSVD_PROBE_RESP,
RSVD_NULL,
@@ -98,7 +99,13 @@ struct rtw_lps_pg_info_hdr {
} __packed;
struct rtw_rsvd_page {
- struct list_head list;
+ /* associated with each vif */
+ struct list_head vif_list;
+ struct rtw_vif *rtwvif;
+
+ /* associated when build rsvd page */
+ struct list_head build_list;
+
struct sk_buff *skb;
enum rtw_rsvd_packet_type type;
u8 page;
@@ -502,15 +509,17 @@ void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data);
void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn);
-void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
- bool txdesc);
-void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
- struct cfg80211_ssid *ssid);
int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
u8 *buf, u32 size);
-void rtw_reset_rsvd_page(struct rtw_dev *rtwdev);
-int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev,
- struct ieee80211_vif *vif);
+void rtw_remove_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif);
+void rtw_add_rsvd_page_bcn(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif);
+void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif);
+void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif);
+int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev);
void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev);
int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
u32 offset, u32 size, u32 *buf);
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
index 85a81a578fd5..2cba327e6218 100644
--- a/drivers/net/wireless/realtek/rtw88/hci.h
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -7,9 +7,10 @@
/* ops for PCI, USB and SDIO */
struct rtw_hci_ops {
- int (*tx)(struct rtw_dev *rtwdev,
- struct rtw_tx_pkt_info *pkt_info,
- struct sk_buff *skb);
+ int (*tx_write)(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb);
+ void (*tx_kick_off)(struct rtw_dev *rtwdev);
int (*setup)(struct rtw_dev *rtwdev);
int (*start)(struct rtw_dev *rtwdev);
void (*stop)(struct rtw_dev *rtwdev);
@@ -28,11 +29,16 @@ struct rtw_hci_ops {
void (*write32)(struct rtw_dev *rtwdev, u32 addr, u32 val);
};
-static inline int rtw_hci_tx(struct rtw_dev *rtwdev,
- struct rtw_tx_pkt_info *pkt_info,
- struct sk_buff *skb)
+static inline int rtw_hci_tx_write(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb)
{
- return rtwdev->hci.ops->tx(rtwdev, pkt_info, skb);
+ return rtwdev->hci.ops->tx_write(rtwdev, pkt_info, skb);
+}
+
+static inline void rtw_hci_tx_kick_off(struct rtw_dev *rtwdev)
+{
+ return rtwdev->hci.ops->tx_kick_off(rtwdev);
}
static inline int rtw_hci_setup(struct rtw_dev *rtwdev)
@@ -193,6 +199,32 @@ rtw_read32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
return ret;
}
+static inline u16
+rtw_read16_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
+{
+ u32 shift = __ffs(mask);
+ u32 orig;
+ u32 ret;
+
+ orig = rtw_read16(rtwdev, addr);
+ ret = (orig & mask) >> shift;
+
+ return ret;
+}
+
+static inline u8
+rtw_read8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
+{
+ u32 shift = __ffs(mask);
+ u32 orig;
+ u32 ret;
+
+ orig = rtw_read8(rtwdev, addr);
+ ret = (orig & mask) >> shift;
+
+ return ret;
+}
+
static inline void
rtw_write32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
{
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index cadf0abbe16b..7b245779ff90 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -17,10 +17,10 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
txsc20 = primary_ch_idx;
if (bw == RTW_CHANNEL_WIDTH_80) {
- if (txsc20 == 1 || txsc20 == 3)
- txsc40 = 9;
+ if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST)
+ txsc40 = RTW_SC_40_UPPER;
else
- txsc40 = 10;
+ txsc40 = RTW_SC_40_LOWER;
}
rtw_write8(rtwdev, REG_DATA_SC,
BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
@@ -101,7 +101,7 @@ static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
}
static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
- struct rtw_pwr_seq_cmd *cmd)
+ const struct rtw_pwr_seq_cmd *cmd)
{
u8 value;
u8 flag = 0;
@@ -139,9 +139,10 @@ static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
}
static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
- u8 cut_mask, struct rtw_pwr_seq_cmd *cmd)
+ u8 cut_mask,
+ const struct rtw_pwr_seq_cmd *cmd)
{
- struct rtw_pwr_seq_cmd *cur_cmd;
+ const struct rtw_pwr_seq_cmd *cur_cmd;
u32 offset;
u8 value;
@@ -183,13 +184,13 @@ static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
}
static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
- struct rtw_pwr_seq_cmd **cmd_seq)
+ const struct rtw_pwr_seq_cmd **cmd_seq)
{
u8 cut_mask;
u8 intf_mask;
u8 cut;
u32 idx = 0;
- struct rtw_pwr_seq_cmd *cmd;
+ const struct rtw_pwr_seq_cmd *cmd;
int ret;
cut = rtwdev->hal.cut_version;
@@ -223,7 +224,7 @@ static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
{
struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_pwr_seq_cmd **pwr_seq;
+ const struct rtw_pwr_seq_cmd **pwr_seq;
u8 rpwm;
bool cur_pwr;
@@ -705,7 +706,7 @@ dlfw_fail:
static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
{
- struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
+ const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
u32 prio_queues = 0;
if (queues & BIT(IEEE80211_AC_VO))
@@ -793,7 +794,7 @@ void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
static int txdma_queue_mapping(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_rqpn *rqpn = NULL;
+ const struct rtw_rqpn *rqpn = NULL;
u16 txdma_pq_map = 0;
switch (rtw_hci_type(rtwdev)) {
@@ -882,7 +883,7 @@ static int priority_queue_cfg(struct rtw_dev *rtwdev)
{
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_page_table *pg_tbl = NULL;
+ const struct rtw_page_table *pg_tbl = NULL;
u16 pubq_num;
int ret;
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 6fc33e11d08c..d7d02e4c0184 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -75,15 +75,12 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
rtw_leave_lps_deep(rtwdev);
- if (changed & IEEE80211_CONF_CHANGE_IDLE) {
- if (hw->conf.flags & IEEE80211_CONF_IDLE) {
- rtw_enter_ips(rtwdev);
- } else {
- ret = rtw_leave_ips(rtwdev);
- if (ret) {
- rtw_err(rtwdev, "failed to leave idle state\n");
- goto out;
- }
+ if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+ !(hw->conf.flags & IEEE80211_CONF_IDLE)) {
+ ret = rtw_leave_ips(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to leave idle state\n");
+ goto out;
}
}
@@ -99,6 +96,10 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
rtw_set_channel(rtwdev);
+ if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+ (hw->conf.flags & IEEE80211_CONF_IDLE))
+ rtw_enter_ips(rtwdev);
+
out:
mutex_unlock(&rtwdev->mutex);
return ret;
@@ -160,6 +161,7 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
memset(&rtwvif->bfee, 0, sizeof(struct rtw_bfee));
rtwvif->conf = &rtw_vif_port[port];
rtw_txq_init(rtwdev, vif->txq);
+ INIT_LIST_HEAD(&rtwvif->rsvd_page_list);
mutex_lock(&rtwdev->mutex);
@@ -168,18 +170,24 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
+ rtw_add_rsvd_page_bcn(rtwdev, rtwvif);
net_type = RTW_NET_AP_MODE;
bcn_ctrl = BIT_EN_BCN_FUNCTION | BIT_DIS_TSF_UDT;
break;
case NL80211_IFTYPE_ADHOC:
+ rtw_add_rsvd_page_bcn(rtwdev, rtwvif);
net_type = RTW_NET_AD_HOC;
bcn_ctrl = BIT_EN_BCN_FUNCTION | BIT_DIS_TSF_UDT;
break;
case NL80211_IFTYPE_STATION:
- default:
+ rtw_add_rsvd_page_sta(rtwdev, rtwvif);
net_type = RTW_NET_NO_LINK;
bcn_ctrl = BIT_EN_BCN_FUNCTION;
break;
+ default:
+ WARN_ON(1);
+ mutex_unlock(&rtwdev->mutex);
+ return -EINVAL;
}
ether_addr_copy(rtwvif->mac_addr, vif->addr);
@@ -210,6 +218,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
rtw_leave_lps_deep(rtwdev);
rtw_txq_cleanup(rtwdev, vif->txq);
+ rtw_remove_rsvd_page(rtwdev, rtwvif);
eth_zero_addr(rtwvif->mac_addr);
config |= PORT_SET_MAC_ADDR;
@@ -341,12 +350,7 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
chip->ops->phy_calibration(rtwdev);
rtwvif->aid = conf->aid;
- rtw_add_rsvd_page(rtwdev, RSVD_PS_POLL, true);
- rtw_add_rsvd_page(rtwdev, RSVD_QOS_NULL, true);
- rtw_add_rsvd_page(rtwdev, RSVD_NULL, true);
- rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_DPK, true);
- rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_INFO, true);
- rtw_fw_download_rsvd_page(rtwdev, vif);
+ rtw_fw_download_rsvd_page(rtwdev);
rtw_send_rsvd_page_h2c(rtwdev);
rtw_coex_media_status_notify(rtwdev, conf->assoc);
if (rtw_bf_support)
@@ -355,7 +359,6 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_leave_lps(rtwdev);
net_type = RTW_NET_NO_LINK;
rtwvif->aid = 0;
- rtw_reset_rsvd_page(rtwdev);
rtw_bf_disassoc(rtwdev, vif, conf);
}
@@ -370,7 +373,7 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_BEACON)
- rtw_fw_download_rsvd_page(rtwdev, vif);
+ rtw_fw_download_rsvd_page(rtwdev);
if (changed & BSS_CHANGED_MU_GROUPS) {
struct rtw_chip_info *chip = rtwdev->chip;
@@ -514,6 +517,9 @@ static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
/* suppress error messages */
return -EOPNOTSUPP;
default:
@@ -552,7 +558,7 @@ static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
/* download new cam settings for PG to backup */
if (rtw_fw_lps_deep_mode == LPS_DEEP_MODE_PG)
- rtw_fw_download_rsvd_page(rtwdev, vif);
+ rtw_fw_download_rsvd_page(rtwdev);
out:
mutex_unlock(&rtwdev->mutex);
@@ -592,6 +598,20 @@ static int rtw_ops_ampdu_action(struct ieee80211_hw *hw,
return 0;
}
+static bool rtw_ops_can_aggregate_in_amsdu(struct ieee80211_hw *hw,
+ struct sk_buff *head,
+ struct sk_buff *skb)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ /* we don't want to enable TX AMSDU on 2.4G */
+ if (hal->current_band_type == RTW_BAND_2G)
+ return false;
+
+ return true;
+}
+
static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const u8 *mac_addr)
@@ -787,6 +807,7 @@ const struct ieee80211_ops rtw_ops = {
.sta_remove = rtw_ops_sta_remove,
.set_key = rtw_ops_set_key,
.ampdu_action = rtw_ops_ampdu_action,
+ .can_aggregate_in_amsdu = rtw_ops_can_aggregate_in_amsdu,
.sw_scan_start = rtw_ops_sw_scan_start,
.sw_scan_complete = rtw_ops_sw_scan_complete,
.mgd_prepare_tx = rtw_ops_mgd_prepare_tx,
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 2845d2838f7b..7640e97706f5 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -317,15 +317,15 @@ void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
bandwidth = RTW_CHANNEL_WIDTH_20;
- primary_chan_idx = 0;
+ primary_chan_idx = RTW_SC_DONT_CARE;
break;
case NL80211_CHAN_WIDTH_40:
bandwidth = RTW_CHANNEL_WIDTH_40;
if (primary_freq > center_freq) {
- primary_chan_idx = 1;
+ primary_chan_idx = RTW_SC_20_UPPER;
center_chan -= 2;
} else {
- primary_chan_idx = 2;
+ primary_chan_idx = RTW_SC_20_LOWER;
center_chan += 2;
}
break;
@@ -333,10 +333,10 @@ void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
bandwidth = RTW_CHANNEL_WIDTH_80;
if (primary_freq > center_freq) {
if (primary_freq - center_freq == 10) {
- primary_chan_idx = 1;
+ primary_chan_idx = RTW_SC_20_UPPER;
center_chan -= 2;
} else {
- primary_chan_idx = 3;
+ primary_chan_idx = RTW_SC_20_UPMOST;
center_chan -= 6;
}
/* assign the center channel used
@@ -345,10 +345,10 @@ void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan + 4;
} else {
if (center_freq - primary_freq == 10) {
- primary_chan_idx = 2;
+ primary_chan_idx = RTW_SC_20_LOWER;
center_chan += 2;
} else {
- primary_chan_idx = 4;
+ primary_chan_idx = RTW_SC_20_LOWEST;
center_chan += 6;
}
/* assign the center channel used
@@ -909,11 +909,16 @@ void rtw_core_stop(struct rtw_dev *rtwdev)
clear_bit(RTW_FLAG_RUNNING, rtwdev->flags);
clear_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
+ mutex_unlock(&rtwdev->mutex);
+
+ cancel_work_sync(&rtwdev->c2h_work);
cancel_delayed_work_sync(&rtwdev->watch_dog_work);
cancel_delayed_work_sync(&coex->bt_relink_work);
cancel_delayed_work_sync(&coex->bt_reenable_work);
cancel_delayed_work_sync(&coex->defreeze_work);
+ mutex_lock(&rtwdev->mutex);
+
rtw_power_off(rtwdev);
}
@@ -1113,7 +1118,6 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
}
hal->chip_version = rtw_read32(rtwdev, REG_SYS_CFG1);
- hal->fab_version = BIT_GET_VENDOR_ID(hal->chip_version) >> 2;
hal->cut_version = BIT_GET_CHIP_VER(hal->chip_version);
hal->mp_chip = (hal->chip_version & BIT_RTL_ID) ? 0 : 1;
if (hal->chip_version & BIT_RF_TYPE_ID) {
@@ -1128,11 +1132,6 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
hal->antenna_rx = BB_PATH_A;
}
- if (hal->fab_version == 2)
- hal->fab_version = 1;
- else if (hal->fab_version == 1)
- hal->fab_version = 2;
-
efuse->physical_size = chip->phy_efuse_size;
efuse->logical_size = chip->log_efuse_size;
efuse->protect_size = chip->ptct_efuse_size;
@@ -1395,10 +1394,6 @@ int rtw_core_init(struct rtw_dev *rtwdev)
else
rtwdev->lps_conf.deep_mode = rtw_fw_lps_deep_mode;
- mutex_lock(&rtwdev->mutex);
- rtw_add_rsvd_page(rtwdev, RSVD_BEACON, false);
- mutex_unlock(&rtwdev->mutex);
-
rtw_stats_init(rtwdev);
/* default rx filter setting */
@@ -1441,8 +1436,9 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
skb_queue_purge(&rtwdev->tx_report.queue);
spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags);
- list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, list) {
- list_del(&rsvd_pkt->list);
+ list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list,
+ build_list) {
+ list_del(&rsvd_pkt->build_list);
kfree(rsvd_pkt);
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index f334d201bfb5..c6b590fdb573 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -99,6 +99,16 @@ enum rtw_bandwidth {
RTW_CHANNEL_WIDTH_10 = 6,
};
+enum rtw_sc_offset {
+ RTW_SC_DONT_CARE = 0,
+ RTW_SC_20_UPPER = 1,
+ RTW_SC_20_LOWER = 2,
+ RTW_SC_20_UPMOST = 3,
+ RTW_SC_20_LOWEST = 4,
+ RTW_SC_40_UPPER = 9,
+ RTW_SC_40_LOWER = 10,
+};
+
enum rtw_net_type {
RTW_NET_NO_LINK = 0,
RTW_NET_AD_HOC = 1,
@@ -505,6 +515,18 @@ struct rtw_hw_reg {
u32 mask;
};
+struct rtw_reg_domain {
+ u32 addr;
+ u32 mask;
+#define RTW_REG_DOMAIN_MAC32 0
+#define RTW_REG_DOMAIN_MAC16 1
+#define RTW_REG_DOMAIN_MAC8 2
+#define RTW_REG_DOMAIN_RF_A 3
+#define RTW_REG_DOMAIN_RF_B 4
+#define RTW_REG_DOMAIN_NL 0xFF
+ u8 domain;
+};
+
struct rtw_backup_info {
u8 len;
u32 reg;
@@ -552,6 +574,9 @@ struct rtw_tx_pkt_info {
bool short_gi;
bool report;
bool rts;
+ bool dis_qselseq;
+ bool en_hwseq;
+ u8 hw_ssn_sel;
};
struct rtw_rx_pkt_stat {
@@ -742,6 +767,7 @@ struct rtw_vif {
u8 bssid[ETH_ALEN];
u8 port;
u8 bcn_ctrl;
+ struct list_head rsvd_page_list;
struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
const struct rtw_vif_port *conf;
@@ -948,10 +974,10 @@ struct rtw_wow_param {
};
struct rtw_intf_phy_para_table {
- struct rtw_intf_phy_para *usb2_para;
- struct rtw_intf_phy_para *usb3_para;
- struct rtw_intf_phy_para *gen1_para;
- struct rtw_intf_phy_para *gen2_para;
+ const struct rtw_intf_phy_para *usb2_para;
+ const struct rtw_intf_phy_para *usb3_para;
+ const struct rtw_intf_phy_para *gen1_para;
+ const struct rtw_intf_phy_para *gen2_para;
u8 n_usb2_para;
u8 n_usb3_para;
u8 n_gen1_para;
@@ -1048,13 +1074,13 @@ struct rtw_chip_info {
/* init values */
u8 sys_func_en;
- struct rtw_pwr_seq_cmd **pwr_on_seq;
- struct rtw_pwr_seq_cmd **pwr_off_seq;
- struct rtw_rqpn *rqpn_table;
- struct rtw_page_table *page_table;
- struct rtw_intf_phy_para_table *intf_table;
+ const struct rtw_pwr_seq_cmd **pwr_on_seq;
+ const struct rtw_pwr_seq_cmd **pwr_off_seq;
+ const struct rtw_rqpn *rqpn_table;
+ const struct rtw_page_table *page_table;
+ const struct rtw_intf_phy_para_table *intf_table;
- struct rtw_hw_reg *dig;
+ const struct rtw_hw_reg *dig;
u32 rf_base_addr[2];
u32 rf_sipi_addr[2];
@@ -1096,6 +1122,7 @@ struct rtw_chip_info {
u8 bt_afh_span_bw40;
u8 afh_5g_num;
u8 wl_rf_para_num;
+ u8 coex_info_hw_regs_num;
const u8 *bt_rssi_step;
const u8 *wl_rssi_step;
const struct coex_table_para *table_nsant;
@@ -1105,6 +1132,7 @@ struct rtw_chip_info {
const struct coex_rf_para *wl_rf_para_tx;
const struct coex_rf_para *wl_rf_para_rx;
const struct coex_5g_afh_map *afh_5g;
+ const struct rtw_reg_domain *coex_info_hw_regs;
};
enum rtw_coex_bt_state_cnt {
@@ -1151,6 +1179,7 @@ struct rtw_coex_rfe {
struct rtw_coex_dm {
bool cur_ps_tdma_on;
bool cur_wl_rx_low_gain_en;
+ bool ignore_wl_act;
u8 reason;
u8 bt_rssi_state[4];
@@ -1231,6 +1260,9 @@ struct rtw_coex_stat {
u32 bt_supported_version;
u32 bt_supported_feature;
+ u32 patch_ver;
+ u16 bt_reg_vendor_ae;
+ u16 bt_reg_vendor_ac;
s8 bt_rssi;
u8 kt_ver;
u8 gnt_workaround_state;
@@ -1500,7 +1532,7 @@ struct rtw_fifo_conf {
u16 rsvd_cpu_instr_addr;
u16 rsvd_fw_txbuf_addr;
u16 rsvd_csibuf_addr;
- struct rtw_rqpn *rqpn;
+ const struct rtw_rqpn *rqpn;
};
struct rtw_fw_state {
@@ -1517,7 +1549,6 @@ struct rtw_hal {
u32 rcr;
u32 chip_version;
- u8 fab_version;
u8 cut_version;
u8 mp_chip;
u8 oem_id;
@@ -1631,7 +1662,7 @@ struct rtw_dev {
struct rtw_wow_param wow;
/* hci related data, must be last */
- u8 priv[0] __aligned(sizeof(void *));
+ u8 priv[] __aligned(sizeof(void *));
};
#include "hci.h"
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 1fbc14c149ec..e37c71495c0d 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -186,6 +186,11 @@ static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
dma_addr_t dma;
u8 *head;
+ if (len > TRX_BD_IDX_MASK) {
+ rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
+ return -EINVAL;
+ }
+
head = pci_zalloc_consistent(pdev, ring_sz, &dma);
if (!head) {
rtw_err(rtwdev, "failed to allocate tx ring\n");
@@ -259,6 +264,11 @@ static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
int i, allocated;
int ret = 0;
+ if (len > TRX_BD_IDX_MASK) {
+ rtw_err(rtwdev, "len %d exceeds maximum RX entries\n", len);
+ return -EINVAL;
+ }
+
head = pci_zalloc_consistent(pdev, ring_sz, &dma);
if (!head) {
rtw_err(rtwdev, "failed to allocate rx ring\n");
@@ -382,6 +392,7 @@ static int rtw_pci_init(struct rtw_dev *rtwdev)
rtwpci->irq_mask[3] = IMR_H2CDOK |
0;
spin_lock_init(&rtwpci->irq_lock);
+ spin_lock_init(&rtwpci->hwirq_lock);
ret = rtw_pci_init_trx_ring(rtwdev);
return ret;
@@ -404,56 +415,56 @@ static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len);
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len);
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len);
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len);
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len);
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len);
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len);
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
- rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & 0xfff);
+ rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
/* reset read/write point */
@@ -472,19 +483,35 @@ static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
struct rtw_pci *rtwpci)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
+
rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
rtwpci->irq_enabled = true;
+
+ spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
}
static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
struct rtw_pci *rtwpci)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
+
+ if (!rtwpci->irq_enabled)
+ goto out;
+
rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
rtwpci->irq_enabled = false;
+
+out:
+ spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
}
static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
@@ -520,11 +547,10 @@ static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
static int rtw_pci_start(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- unsigned long flags;
- spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ spin_lock_bh(&rtwpci->irq_lock);
rtw_pci_enable_interrupt(rtwdev, rtwpci);
- spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+ spin_unlock_bh(&rtwpci->irq_lock);
return 0;
}
@@ -532,12 +558,11 @@ static int rtw_pci_start(struct rtw_dev *rtwdev)
static void rtw_pci_stop(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- unsigned long flags;
- spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ spin_lock_bh(&rtwpci->irq_lock);
rtw_pci_disable_interrupt(rtwdev, rtwpci);
rtw_pci_dma_release(rtwdev, rtwpci);
- spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+ spin_unlock_bh(&rtwpci->irq_lock);
}
static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
@@ -590,9 +615,8 @@ static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- unsigned long flags;
- spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ spin_lock_bh(&rtwpci->irq_lock);
if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
rtw_pci_deep_ps_enter(rtwdev);
@@ -600,7 +624,7 @@ static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
rtw_pci_deep_ps_leave(rtwdev);
- spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+ spin_unlock_bh(&rtwpci->irq_lock);
}
static u8 ac_to_hwq[] = {
@@ -667,9 +691,34 @@ static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
}
-static int rtw_pci_xmit(struct rtw_dev *rtwdev,
- struct rtw_tx_pkt_info *pkt_info,
- struct sk_buff *skb, u8 queue)
+static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_pci_tx_ring *ring;
+ u32 bd_idx;
+
+ ring = &rtwpci->tx_rings[queue];
+ bd_idx = rtw_pci_tx_queue_idx_addr[queue];
+
+ spin_lock_bh(&rtwpci->irq_lock);
+ rtw_pci_deep_ps_leave(rtwdev);
+ rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
+ spin_unlock_bh(&rtwpci->irq_lock);
+}
+
+static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ u8 queue;
+
+ for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
+ if (test_and_clear_bit(queue, rtwpci->tx_queued))
+ rtw_pci_tx_kick_off_queue(rtwdev, queue);
+}
+
+static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb, u8 queue)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_chip_info *chip = rtwdev->chip;
@@ -682,8 +731,6 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev,
u32 psb_len;
u8 *pkt_desc;
struct rtw_pci_tx_buffer_desc *buf_desc;
- u32 bd_idx;
- unsigned long flags;
ring = &rtwpci->tx_rings[queue];
@@ -720,25 +767,20 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev,
tx_data->dma = dma;
tx_data->sn = pkt_info->sn;
- spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ spin_lock_bh(&rtwpci->irq_lock);
- rtw_pci_deep_ps_leave(rtwdev);
skb_queue_tail(&ring->queue, skb);
- /* kick off tx queue */
- if (queue != RTW_TX_QUEUE_BCN) {
- if (++ring->r.wp >= ring->r.len)
- ring->r.wp = 0;
- bd_idx = rtw_pci_tx_queue_idx_addr[queue];
- rtw_write16(rtwdev, bd_idx, ring->r.wp & 0xfff);
- } else {
- u32 reg_bcn_work;
-
- reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
- reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
- rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
- }
- spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+ if (queue == RTW_TX_QUEUE_BCN)
+ goto out_unlock;
+
+ /* update write-index, and kick it off later */
+ set_bit(queue, rtwpci->tx_queued);
+ if (++ring->r.wp >= ring->r.len)
+ ring->r.wp = 0;
+
+out_unlock:
+ spin_unlock_bh(&rtwpci->irq_lock);
return 0;
}
@@ -747,56 +789,59 @@ static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
u32 size)
{
struct sk_buff *skb;
- struct rtw_tx_pkt_info pkt_info;
- u32 tx_pkt_desc_sz;
- u32 length;
+ struct rtw_tx_pkt_info pkt_info = {0};
+ u8 reg_bcn_work;
+ int ret;
- tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
- length = size + tx_pkt_desc_sz;
- skb = dev_alloc_skb(length);
+ skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
if (!skb)
return -ENOMEM;
- skb_reserve(skb, tx_pkt_desc_sz);
- memcpy((u8 *)skb_put(skb, size), buf, size);
- memset(&pkt_info, 0, sizeof(pkt_info));
- pkt_info.tx_pkt_size = size;
- pkt_info.offset = tx_pkt_desc_sz;
+ ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
+ if (ret) {
+ rtw_err(rtwdev, "failed to write rsvd page data\n");
+ return ret;
+ }
+
+ /* reserved pages go through beacon queue */
+ reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
+ reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
+ rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
- return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
+ return 0;
}
static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
{
struct sk_buff *skb;
- struct rtw_tx_pkt_info pkt_info;
- u32 tx_pkt_desc_sz;
- u32 length;
+ struct rtw_tx_pkt_info pkt_info = {0};
+ int ret;
- tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
- length = size + tx_pkt_desc_sz;
- skb = dev_alloc_skb(length);
+ skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
if (!skb)
return -ENOMEM;
- skb_reserve(skb, tx_pkt_desc_sz);
- memcpy((u8 *)skb_put(skb, size), buf, size);
- memset(&pkt_info, 0, sizeof(pkt_info));
- pkt_info.tx_pkt_size = size;
+ ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
+ if (ret) {
+ rtw_err(rtwdev, "failed to write h2c data\n");
+ return ret;
+ }
- return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
+ rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
+
+ return 0;
}
-static int rtw_pci_tx(struct rtw_dev *rtwdev,
- struct rtw_tx_pkt_info *pkt_info,
- struct sk_buff *skb)
+static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_pci_tx_ring *ring;
u8 queue = rtw_hw_queue_mapping(skb);
int ret;
- ret = rtw_pci_xmit(rtwdev, pkt_info, skb, queue);
+ ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
if (ret)
return ret;
@@ -827,7 +872,7 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
bd_idx = rtw_read32(rtwdev, bd_idx_addr);
cur_rp = bd_idx >> 16;
- cur_rp &= 0xfff;
+ cur_rp &= TRX_BD_IDX_MASK;
if (cur_rp >= ring->r.rp)
count = cur_rp - ring->r.rp;
else
@@ -901,7 +946,7 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
cur_wp = tmp >> 16;
- cur_wp &= 0xfff;
+ cur_wp &= TRX_BD_IDX_MASK;
if (cur_wp >= ring->r.wp)
count = cur_wp - ring->r.wp;
else
@@ -961,6 +1006,10 @@ next_rp:
static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
struct rtw_pci *rtwpci, u32 *irq_status)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
+
irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
@@ -970,6 +1019,8 @@ static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
+
+ spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
}
static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
@@ -977,10 +1028,6 @@ static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
struct rtw_dev *rtwdev = dev;
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- spin_lock(&rtwpci->irq_lock);
- if (!rtwpci->irq_enabled)
- goto out;
-
/* disable RTW PCI interrupt to avoid more interrupts before the end of
* thread function
*
@@ -990,8 +1037,6 @@ static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
* a new HISR flag is set.
*/
rtw_pci_disable_interrupt(rtwdev, rtwpci);
-out:
- spin_unlock(&rtwpci->irq_lock);
return IRQ_WAKE_THREAD;
}
@@ -1000,10 +1045,9 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
{
struct rtw_dev *rtwdev = dev;
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- unsigned long flags;
u32 irq_status[4];
- spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ spin_lock_bh(&rtwpci->irq_lock);
rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
if (irq_status[0] & IMR_MGNTDOK)
@@ -1025,7 +1069,7 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
/* all of the jobs for this interrupt have been done */
rtw_pci_enable_interrupt(rtwdev, rtwpci);
- spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+ spin_unlock_bh(&rtwpci->irq_lock);
return IRQ_HANDLED;
}
@@ -1197,11 +1241,18 @@ static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
{
+ struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct pci_dev *pdev = rtwpci->pdev;
u16 link_ctrl;
int ret;
+ /* RTL8822CE has enabled REFCLK auto calibration, it does not need
+ * to add clock delay to cover the REFCLK timing gap.
+ */
+ if (chip->id == RTW_CHIP_TYPE_8822C)
+ rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
+
/* Though there is standard PCIE configuration space to set the
* link control register, but by Realtek's design, driver should
* check if host supports CLKREQ/ASPM to enable the HW module.
@@ -1248,7 +1299,7 @@ static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_intf_phy_para *para;
+ const struct rtw_intf_phy_para *para;
u16 cut;
u16 value;
u16 offset;
@@ -1364,7 +1415,8 @@ static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
}
static struct rtw_hci_ops rtw_pci_ops = {
- .tx = rtw_pci_tx,
+ .tx_write = rtw_pci_tx_write,
+ .tx_kick_off = rtw_pci_tx_kick_off,
.setup = rtw_pci_setup,
.start = rtw_pci_start,
.stop = rtw_pci_stop,
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index 1580cfc57361..3ac4fb328d31 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -39,6 +39,7 @@
#define RTK_PCIE_LINK_CFG 0x0719
#define BIT_CLKREQ_SW_EN BIT(4)
#define BIT_L1_SW_EN BIT(3)
+#define RTK_PCIE_CLKDLY_CTRL 0x0725
#define BIT_PCI_BCNQ_FLAG BIT(4)
#define RTK_PCI_TXBD_DESA_BCNQ 0x308
@@ -51,6 +52,8 @@
#define RTK_PCI_TXBD_DESA_HI0Q 0x340
#define RTK_PCI_RXBD_DESA_MPDUQ 0x338
+#define TRX_BD_IDX_MASK GENMASK(11, 0)
+
/* BCNQ is specialized for rsvd page, does not need to specify a number */
#define RTK_PCI_TXBD_NUM_H2CQ 0x1328
#define RTK_PCI_TXBD_NUM_MGMTQ 0x380
@@ -197,12 +200,15 @@ struct rtw_pci_rx_ring {
struct rtw_pci {
struct pci_dev *pdev;
- /* used for pci interrupt */
+ /* Used for PCI interrupt. */
+ spinlock_t hwirq_lock;
+ /* Used for PCI TX queueing. */
spinlock_t irq_lock;
u32 irq_mask[4];
bool irq_enabled;
u16 rx_tag;
+ DECLARE_BITMAP(tx_queued, RTK_MAX_TX_QUEUE_NUM);
struct rtw_pci_tx_ring tx_rings[RTK_MAX_TX_QUEUE_NUM];
struct rtw_pci_rx_ring rx_rings[RTK_MAX_RX_QUEUE_NUM];
u16 link_ctrl;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index eea9d888fbf1..8793dd22188f 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -749,20 +749,10 @@ bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
direct_addr = base_addr[rf_path] + (addr << 2);
mask &= RFREG_MASK;
- if (addr == RF_CFGCH) {
- rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, DISABLE_PI);
- rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, DISABLE_PI);
- }
-
rtw_write32_mask(rtwdev, direct_addr, mask, data);
udelay(1);
- if (addr == RF_CFGCH) {
- rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, ENABLE_PI);
- rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, ENABLE_PI);
- }
-
return true;
}
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 4bc14b1a6340..4dd7d4143b04 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -645,7 +645,7 @@ static void rtw8822b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
break;
case RTW_CHANNEL_WIDTH_40:
- if (primary_ch_idx == 1)
+ if (primary_ch_idx == RTW_SC_20_UPPER)
rtw_write32_set(rtwdev, REG_RXSB, BIT(4));
else
rtw_write32_clr(rtwdev, REG_RXSB, BIT(4));
@@ -1543,7 +1543,7 @@ static void rtw8822b_bf_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif,
rtw_warn(rtwdev, "wrong bfee role\n");
}
-static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
+static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
@@ -1581,7 +1581,7 @@ static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822b[] = {
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822b[] = {
{0x0012,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
@@ -1714,7 +1714,7 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822b[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822b[] = {
+static const struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822b[] = {
{0x0003,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
@@ -1787,7 +1787,7 @@ static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822b[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822b[] = {
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822b[] = {
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
@@ -1905,26 +1905,26 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822b[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd *card_enable_flow_8822b[] = {
+static const struct rtw_pwr_seq_cmd *card_enable_flow_8822b[] = {
trans_carddis_to_cardemu_8822b,
trans_cardemu_to_act_8822b,
NULL
};
-static struct rtw_pwr_seq_cmd *card_disable_flow_8822b[] = {
+static const struct rtw_pwr_seq_cmd *card_disable_flow_8822b[] = {
trans_act_to_cardemu_8822b,
trans_cardemu_to_carddis_8822b,
NULL
};
-static struct rtw_intf_phy_para usb2_param_8822b[] = {
+static const struct rtw_intf_phy_para usb2_param_8822b[] = {
{0xFFFF, 0x00,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para usb3_param_8822b[] = {
+static const struct rtw_intf_phy_para usb3_param_8822b[] = {
{0x0001, 0xA841,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_D,
@@ -1935,7 +1935,7 @@ static struct rtw_intf_phy_para usb3_param_8822b[] = {
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para pcie_gen1_param_8822b[] = {
+static const struct rtw_intf_phy_para pcie_gen1_param_8822b[] = {
{0x0001, 0xA841,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
@@ -1982,7 +1982,7 @@ static struct rtw_intf_phy_para pcie_gen1_param_8822b[] = {
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para pcie_gen2_param_8822b[] = {
+static const struct rtw_intf_phy_para pcie_gen2_param_8822b[] = {
{0x0001, 0xA841,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
@@ -2029,7 +2029,7 @@ static struct rtw_intf_phy_para pcie_gen2_param_8822b[] = {
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para_table phy_para_table_8822b = {
+static const struct rtw_intf_phy_para_table phy_para_table_8822b = {
.usb2_para = usb2_param_8822b,
.usb3_para = usb3_param_8822b,
.gen1_para = pcie_gen1_param_8822b,
@@ -2046,12 +2046,12 @@ static const struct rtw_rfe_def rtw8822b_rfe_defs[] = {
[5] = RTW_DEF_RFE(8822b, 5, 5),
};
-static struct rtw_hw_reg rtw8822b_dig[] = {
+static const struct rtw_hw_reg rtw8822b_dig[] = {
[0] = { .addr = 0xc50, .mask = 0x7f },
[1] = { .addr = 0xe50, .mask = 0x7f },
};
-static struct rtw_page_table page_table_8822b[] = {
+static const struct rtw_page_table page_table_8822b[] = {
{64, 64, 64, 64, 1},
{64, 64, 64, 64, 1},
{64, 64, 0, 0, 1},
@@ -2059,7 +2059,7 @@ static struct rtw_page_table page_table_8822b[] = {
{64, 64, 64, 64, 1},
};
-static struct rtw_rqpn rqpn_table_8822b[] = {
+static const struct rtw_rqpn rqpn_table_8822b[] = {
{RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
@@ -2371,6 +2371,33 @@ static const struct rtw_pwr_track_tbl rtw8822b_rtw_pwr_track_tbl = {
.pwrtrk_2g_ccka_p = rtw8822b_pwrtrk_2g_cck_a_p,
};
+static const struct rtw_reg_domain coex_info_hw_regs_8822b[] = {
+ {0xcb0, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0xcb4, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0xcba, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+ {0xcbd, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+ {0xc58, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+ {0xcbd, BIT(0), RTW_REG_DOMAIN_MAC8},
+ {0, 0, RTW_REG_DOMAIN_NL},
+ {0x430, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0x434, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0x42a, MASKLWORD, RTW_REG_DOMAIN_MAC16},
+ {0x426, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+ {0x45e, BIT(3), RTW_REG_DOMAIN_MAC8},
+ {0x454, MASKLWORD, RTW_REG_DOMAIN_MAC16},
+ {0, 0, RTW_REG_DOMAIN_NL},
+ {0x4c, BIT(24) | BIT(23), RTW_REG_DOMAIN_MAC32},
+ {0x64, BIT(0), RTW_REG_DOMAIN_MAC8},
+ {0x4c6, BIT(4), RTW_REG_DOMAIN_MAC8},
+ {0x40, BIT(5), RTW_REG_DOMAIN_MAC8},
+ {0x1, RFREG_MASK, RTW_REG_DOMAIN_RF_B},
+ {0, 0, RTW_REG_DOMAIN_NL},
+ {0x550, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0x522, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+ {0x953, BIT(1), RTW_REG_DOMAIN_MAC8},
+ {0xc50, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+};
+
struct rtw_chip_info rtw8822b_hw_spec = {
.ops = &rtw8822b_ops,
.id = RTW_CHIP_TYPE_8822B,
@@ -2439,6 +2466,9 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.bt_afh_span_bw40 = 0x36,
.afh_5g_num = ARRAY_SIZE(afh_5g_8822b),
.afh_5g = afh_5g_8822b,
+
+ .coex_info_hw_regs_num = ARRAY_SIZE(coex_info_hw_regs_8822b),
+ .coex_info_hw_regs = coex_info_hw_regs_8822b,
};
EXPORT_SYMBOL(rtw8822b_hw_spec);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 3865097696d4..dc07e6be38e8 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -1289,6 +1289,17 @@ static int rtw8822c_mac_init(struct rtw_dev *rtwdev)
return 0;
}
+static void rtw8822c_rstb_3wire(struct rtw_dev *rtwdev, bool enable)
+{
+ if (enable) {
+ rtw_write32_mask(rtwdev, REG_RSTB, BIT_RSTB_3WIRE, 0x1);
+ rtw_write32_mask(rtwdev, REG_ANAPAR_A, BIT_ANAPAR_UPDATE, 0x1);
+ rtw_write32_mask(rtwdev, REG_ANAPAR_B, BIT_ANAPAR_UPDATE, 0x1);
+ } else {
+ rtw_write32_mask(rtwdev, REG_RSTB, BIT_RSTB_3WIRE, 0x0);
+ }
+}
+
static void rtw8822c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
{
#define RF18_BAND_MASK (BIT(16) | BIT(9) | BIT(8))
@@ -1337,6 +1348,8 @@ static void rtw8822c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
break;
}
+ rtw8822c_rstb_3wire(rtwdev, false);
+
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, 0x04, 0x01);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, 0x1f, 0x12);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, 0xfffff, rf_rxbb);
@@ -1349,6 +1362,8 @@ static void rtw8822c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, rf_reg18);
rtw_write_rf(rtwdev, RF_PATH_B, RF_CFGCH, RFREG_MASK, rf_reg18);
+
+ rtw8822c_rstb_3wire(rtwdev, true);
}
static void rtw8822c_toggle_igi(struct rtw_dev *rtwdev)
@@ -1482,7 +1497,7 @@ static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
break;
case RTW_CHANNEL_WIDTH_40:
rtw_write32_mask(rtwdev, REG_CCKSB, BIT(4),
- (primary_ch_idx == 1 ? 1 : 0));
+ (primary_ch_idx == RTW_SC_20_UPPER ? 1 : 0));
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0x5);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xc0, 0x0);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xff00,
@@ -3399,7 +3414,7 @@ static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
dm_info->pwr_trk_triggered = false;
}
-static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
+static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
@@ -3442,7 +3457,7 @@ static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
{0x0000,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
@@ -3544,6 +3559,11 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
+ {0x1064,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
{0xFFFF,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
@@ -3551,7 +3571,7 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822c[] = {
+static const struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822c[] = {
{0x0093,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
@@ -3614,7 +3634,7 @@ static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822c[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822c[] = {
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822c[] = {
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
@@ -3677,47 +3697,47 @@ static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822c[] = {
RTW_PWR_CMD_END, 0, 0},
};
-static struct rtw_pwr_seq_cmd *card_enable_flow_8822c[] = {
+static const struct rtw_pwr_seq_cmd *card_enable_flow_8822c[] = {
trans_carddis_to_cardemu_8822c,
trans_cardemu_to_act_8822c,
NULL
};
-static struct rtw_pwr_seq_cmd *card_disable_flow_8822c[] = {
+static const struct rtw_pwr_seq_cmd *card_disable_flow_8822c[] = {
trans_act_to_cardemu_8822c,
trans_cardemu_to_carddis_8822c,
NULL
};
-static struct rtw_intf_phy_para usb2_param_8822c[] = {
+static const struct rtw_intf_phy_para usb2_param_8822c[] = {
{0xFFFF, 0x00,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para usb3_param_8822c[] = {
+static const struct rtw_intf_phy_para usb3_param_8822c[] = {
{0xFFFF, 0x0000,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para pcie_gen1_param_8822c[] = {
+static const struct rtw_intf_phy_para pcie_gen1_param_8822c[] = {
{0xFFFF, 0x0000,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para pcie_gen2_param_8822c[] = {
+static const struct rtw_intf_phy_para pcie_gen2_param_8822c[] = {
{0xFFFF, 0x0000,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
-static struct rtw_intf_phy_para_table phy_para_table_8822c = {
+static const struct rtw_intf_phy_para_table phy_para_table_8822c = {
.usb2_para = usb2_param_8822c,
.usb3_para = usb3_param_8822c,
.gen1_para = pcie_gen1_param_8822c,
@@ -3734,12 +3754,12 @@ static const struct rtw_rfe_def rtw8822c_rfe_defs[] = {
[2] = RTW_DEF_RFE(8822c, 0, 0),
};
-static struct rtw_hw_reg rtw8822c_dig[] = {
+static const struct rtw_hw_reg rtw8822c_dig[] = {
[0] = { .addr = 0x1d70, .mask = 0x7f },
[1] = { .addr = 0x1d70, .mask = 0x7f00 },
};
-static struct rtw_page_table page_table_8822c[] = {
+static const struct rtw_page_table page_table_8822c[] = {
{64, 64, 64, 64, 1},
{64, 64, 64, 64, 1},
{64, 64, 0, 0, 1},
@@ -3747,7 +3767,7 @@ static struct rtw_page_table page_table_8822c[] = {
{64, 64, 64, 64, 1},
};
-static struct rtw_rqpn rqpn_table_8822c[] = {
+static const struct rtw_rqpn rqpn_table_8822c[] = {
{RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
@@ -4072,6 +4092,31 @@ static const struct wiphy_wowlan_support rtw_wowlan_stub_8822c = {
};
#endif
+static const struct rtw_reg_domain coex_info_hw_regs_8822c[] = {
+ {0x1860, BIT(3), RTW_REG_DOMAIN_MAC8},
+ {0x4160, BIT(3), RTW_REG_DOMAIN_MAC8},
+ {0x1c32, BIT(6), RTW_REG_DOMAIN_MAC8},
+ {0x1c38, BIT(28), RTW_REG_DOMAIN_MAC32},
+ {0, 0, RTW_REG_DOMAIN_NL},
+ {0x430, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0x434, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0x42a, MASKLWORD, RTW_REG_DOMAIN_MAC16},
+ {0x426, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+ {0x45e, BIT(3), RTW_REG_DOMAIN_MAC8},
+ {0x454, MASKLWORD, RTW_REG_DOMAIN_MAC16},
+ {0, 0, RTW_REG_DOMAIN_NL},
+ {0x4c, BIT(24) | BIT(23), RTW_REG_DOMAIN_MAC32},
+ {0x64, BIT(0), RTW_REG_DOMAIN_MAC8},
+ {0x4c6, BIT(4), RTW_REG_DOMAIN_MAC8},
+ {0x40, BIT(5), RTW_REG_DOMAIN_MAC8},
+ {0x1, RFREG_MASK, RTW_REG_DOMAIN_RF_B},
+ {0, 0, RTW_REG_DOMAIN_NL},
+ {0x550, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+ {0x522, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+ {0x953, BIT(1), RTW_REG_DOMAIN_MAC8},
+ {0xc50, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+};
+
struct rtw_chip_info rtw8822c_hw_spec = {
.ops = &rtw8822c_ops,
.id = RTW_CHIP_TYPE_8822C,
@@ -4108,7 +4153,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.agc_tbl = &rtw8822c_agc_tbl,
.bb_tbl = &rtw8822c_bb_tbl,
.rfk_init_tbl = &rtw8822c_array_mp_cal_init_tbl,
- .rf_tbl = {&rtw8822c_rf_a_tbl, &rtw8822c_rf_b_tbl},
+ .rf_tbl = {&rtw8822c_rf_b_tbl, &rtw8822c_rf_a_tbl},
.rfe_defs = rtw8822c_rfe_defs,
.rfe_defs_size = ARRAY_SIZE(rtw8822c_rfe_defs),
.en_dis_dpd = true,
@@ -4148,6 +4193,9 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.bt_afh_span_bw40 = 0x36,
.afh_5g_num = ARRAY_SIZE(afh_5g_8822c),
.afh_5g = afh_5g_8822c,
+
+ .coex_info_hw_regs_num = ARRAY_SIZE(coex_info_hw_regs_8822c),
+ .coex_info_hw_regs = coex_info_hw_regs_8822c,
};
EXPORT_SYMBOL(rtw8822c_hw_spec);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index abd9f300bedd..dfd8662a0c0e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -190,6 +190,8 @@ const struct rtw_table name ## _tbl = { \
#define BIT_3WIRE_TX_EN BIT(0)
#define BIT_3WIRE_RX_EN BIT(1)
#define BIT_3WIRE_PI_ON BIT(28)
+#define REG_ANAPAR_A 0x1830
+#define BIT_ANAPAR_UPDATE BIT(29)
#define REG_RXAGCCTL0 0x18ac
#define BITS_RXAGC_CCK GENMASK(15, 12)
#define BITS_RXAGC_OFDM GENMASK(8, 4)
@@ -223,6 +225,8 @@ const struct rtw_table name ## _tbl = { \
#define BIT_CCK_BLK_EN BIT(1)
#define BIT_CCK_OFDM_BLK_EN (BIT(0) | BIT(1))
#define REG_CCAMSK 0x1c80
+#define REG_RSTB 0x1c90
+#define BIT_RSTB_3WIRE BIT(8)
#define REG_RX_BREAK 0x1d2c
#define BIT_COM_RX_GCK_EN BIT(31)
#define REG_RXFNCTL 0x1d30
@@ -243,6 +247,7 @@ const struct rtw_table name ## _tbl = { \
#define REG_OFDM_TXCNT 0x2de0
#define REG_ORITXCODE2 0x4100
#define REG_3WIRE2 0x410c
+#define REG_ANAPAR_B 0x4130
#define REG_RXAGCCTL 0x41ac
#define REG_DCKB_I_0 0x41bc
#define REG_DCKB_I_1 0x41c0
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index 24c39c60c99a..60989987f67b 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -6,6 +6,7 @@
#include "tx.h"
#include "fw.h"
#include "ps.h"
+#include "debug.h"
static
void rtw_tx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
@@ -57,6 +58,9 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
SET_TX_DESC_SPE_RPT(txdesc, pkt_info->report);
SET_TX_DESC_SW_DEFINE(txdesc, pkt_info->sn);
SET_TX_DESC_USE_RTS(txdesc, pkt_info->rts);
+ SET_TX_DESC_DISQSELSEQ(txdesc, pkt_info->dis_qselseq);
+ SET_TX_DESC_EN_HWSEQ(txdesc, pkt_info->en_hwseq);
+ SET_TX_DESC_HW_SSN_SEL(txdesc, pkt_info->hw_ssn_sel);
}
EXPORT_SYMBOL(rtw_tx_fill_tx_desc);
@@ -220,20 +224,22 @@ void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
- struct ieee80211_tx_control *control,
+ struct ieee80211_sta *sta,
struct sk_buff *skb)
{
pkt_info->use_rate = true;
pkt_info->rate_id = 6;
pkt_info->dis_rate_fallback = true;
+ pkt_info->dis_qselseq = true;
+ pkt_info->en_hwseq = true;
+ pkt_info->hw_ssn_sel = 0;
}
static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
- struct ieee80211_tx_control *control,
+ struct ieee80211_sta *sta,
struct sk_buff *skb)
{
- struct ieee80211_sta *sta = control->sta;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct rtw_sta_info *si;
@@ -292,7 +298,7 @@ out:
void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
- struct ieee80211_tx_control *control,
+ struct ieee80211_sta *sta,
struct sk_buff *skb)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -304,15 +310,15 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
u8 sec_type = 0;
bool bmc;
- if (control->sta) {
- si = (struct rtw_sta_info *)control->sta->drv_priv;
+ if (sta) {
+ si = (struct rtw_sta_info *)sta->drv_priv;
vif = si->vif;
}
if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc))
- rtw_tx_mgmt_pkt_info_update(rtwdev, pkt_info, control, skb);
+ rtw_tx_mgmt_pkt_info_update(rtwdev, pkt_info, sta, skb);
else if (ieee80211_is_data(fc))
- rtw_tx_data_pkt_info_update(rtwdev, pkt_info, control, skb);
+ rtw_tx_data_pkt_info_update(rtwdev, pkt_info, sta, skb);
if (info->control.hw_key) {
struct ieee80211_key_conf *key = info->control.hw_key;
@@ -368,15 +374,74 @@ void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
pkt_info->ls = true;
}
+struct sk_buff *
+rtw_tx_write_data_rsvd_page_get(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ u8 *buf, u32 size)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct sk_buff *skb;
+ u32 tx_pkt_desc_sz;
+ u32 length;
+
+ tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
+ length = size + tx_pkt_desc_sz;
+ skb = dev_alloc_skb(length);
+ if (!skb) {
+ rtw_err(rtwdev, "failed to alloc write data rsvd page skb\n");
+ return NULL;
+ }
+
+ skb_reserve(skb, tx_pkt_desc_sz);
+ skb_put_data(skb, buf, size);
+ pkt_info->tx_pkt_size = size;
+ pkt_info->offset = tx_pkt_desc_sz;
+
+ return skb;
+}
+EXPORT_SYMBOL(rtw_tx_write_data_rsvd_page_get);
+
+struct sk_buff *
+rtw_tx_write_data_h2c_get(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ u8 *buf, u32 size)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct sk_buff *skb;
+ u32 tx_pkt_desc_sz;
+ u32 length;
+
+ tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
+ length = size + tx_pkt_desc_sz;
+ skb = dev_alloc_skb(length);
+ if (!skb) {
+ rtw_err(rtwdev, "failed to alloc write data h2c skb\n");
+ return NULL;
+ }
+
+ skb_reserve(skb, tx_pkt_desc_sz);
+ skb_put_data(skb, buf, size);
+ pkt_info->tx_pkt_size = size;
+
+ return skb;
+}
+EXPORT_SYMBOL(rtw_tx_write_data_h2c_get);
+
void rtw_tx(struct rtw_dev *rtwdev,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct rtw_tx_pkt_info pkt_info = {0};
+ int ret;
- rtw_tx_pkt_info_update(rtwdev, &pkt_info, control, skb);
- if (rtw_hci_tx(rtwdev, &pkt_info, skb))
+ rtw_tx_pkt_info_update(rtwdev, &pkt_info, control->sta, skb);
+ ret = rtw_hci_tx_write(rtwdev, &pkt_info, skb);
+ if (ret) {
+ rtw_err(rtwdev, "failed to write TX skb to HCI\n");
goto out;
+ }
+
+ rtw_hci_tx_kick_off(rtwdev);
return;
@@ -416,38 +481,62 @@ static void rtw_txq_check_agg(struct rtw_dev *rtwdev,
ieee80211_queue_work(rtwdev->hw, &rtwdev->ba_work);
}
-static bool rtw_txq_dequeue(struct rtw_dev *rtwdev,
- struct rtw_txq *rtwtxq)
+static int rtw_txq_push_skb(struct rtw_dev *rtwdev,
+ struct rtw_txq *rtwtxq,
+ struct sk_buff *skb)
{
struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
- struct ieee80211_tx_control control;
- struct sk_buff *skb;
-
- skb = ieee80211_tx_dequeue(rtwdev->hw, txq);
- if (!skb)
- return false;
+ struct rtw_tx_pkt_info pkt_info = {0};
+ int ret;
rtw_txq_check_agg(rtwdev, rtwtxq, skb);
- control.sta = txq->sta;
- rtw_tx(rtwdev, &control, skb);
+ rtw_tx_pkt_info_update(rtwdev, &pkt_info, txq->sta, skb);
+ ret = rtw_hci_tx_write(rtwdev, &pkt_info, skb);
+ if (ret) {
+ rtw_err(rtwdev, "failed to write TX skb to HCI\n");
+ return ret;
+ }
rtwtxq->last_push = jiffies;
- return true;
+ return 0;
+}
+
+static struct sk_buff *rtw_txq_dequeue(struct rtw_dev *rtwdev,
+ struct rtw_txq *rtwtxq)
+{
+ struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
+ struct sk_buff *skb;
+
+ skb = ieee80211_tx_dequeue(rtwdev->hw, txq);
+ if (!skb)
+ return NULL;
+
+ return skb;
}
static void rtw_txq_push(struct rtw_dev *rtwdev,
struct rtw_txq *rtwtxq,
unsigned long frames)
{
+ struct sk_buff *skb;
+ int ret;
int i;
rcu_read_lock();
- for (i = 0; i < frames; i++)
- if (!rtw_txq_dequeue(rtwdev, rtwtxq))
+ for (i = 0; i < frames; i++) {
+ skb = rtw_txq_dequeue(rtwdev, rtwtxq);
+ if (!skb)
break;
+ ret = rtw_txq_push_skb(rtwdev, rtwtxq, skb);
+ if (ret) {
+ rtw_err(rtwdev, "failed to pusk skb, ret %d\n", ret);
+ break;
+ }
+ }
+
rcu_read_unlock();
}
@@ -469,6 +558,8 @@ void rtw_tx_tasklet(unsigned long data)
list_del_init(&rtwtxq->list);
}
+ rtw_hci_tx_kick_off(rtwdev);
+
spin_unlock_bh(&rtwdev->txq_lock);
}
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index 9ca4f74a501b..b973de0f4dc0 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -53,6 +53,12 @@
le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, BIT(19))
#define SET_TX_DESC_SW_DEFINE(tx_desc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x06, value, GENMASK(11, 0))
+#define SET_TX_DESC_DISQSELSEQ(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x00, value, BIT(31))
+#define SET_TX_DESC_EN_HWSEQ(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x08, value, BIT(15))
+#define SET_TX_DESC_HW_SSN_SEL(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, GENMASK(7, 6))
enum rtw_tx_desc_queue_select {
TX_DESC_QSEL_TID0 = 0,
@@ -85,7 +91,7 @@ void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
void rtw_tx_tasklet(unsigned long data);
void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
- struct ieee80211_tx_control *control,
+ struct ieee80211_sta *sta,
struct sk_buff *skb);
void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb);
void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn);
@@ -93,5 +99,13 @@ void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct sk_buff *skb);
+struct sk_buff *
+rtw_tx_write_data_rsvd_page_get(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ u8 *buf, u32 size);
+struct sk_buff *
+rtw_tx_write_data_h2c_get(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ u8 *buf, u32 size);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c
index 4820dca958dd..2fcdf70a3a77 100644
--- a/drivers/net/wireless/realtek/rtw88/wow.c
+++ b/drivers/net/wireless/realtek/rtw88/wow.c
@@ -431,50 +431,39 @@ static void rtw_wow_fw_media_status(struct rtw_dev *rtwdev, bool connect)
rtw_iterate_stas_atomic(rtwdev, rtw_wow_fw_media_status_iter, &data);
}
-static void rtw_wow_config_pno_rsvd_page(struct rtw_dev *rtwdev)
+static void rtw_wow_config_pno_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
{
- struct rtw_wow_param *rtw_wow = &rtwdev->wow;
- struct rtw_pno_request *rtw_pno_req = &rtw_wow->pno_req;
- struct cfg80211_ssid *ssid;
- int i;
-
- for (i = 0 ; i < rtw_pno_req->match_set_cnt; i++) {
- ssid = &rtw_pno_req->match_sets[i].ssid;
- rtw_add_rsvd_page_probe_req(rtwdev, ssid);
- }
- rtw_add_rsvd_page_probe_req(rtwdev, NULL);
- rtw_add_rsvd_page(rtwdev, RSVD_NLO_INFO, false);
- rtw_add_rsvd_page(rtwdev, RSVD_CH_INFO, true);
+ rtw_add_rsvd_page_pno(rtwdev, rtwvif);
}
-static void rtw_wow_config_linked_rsvd_page(struct rtw_dev *rtwdev)
+static void rtw_wow_config_linked_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
{
- rtw_add_rsvd_page(rtwdev, RSVD_PS_POLL, true);
- rtw_add_rsvd_page(rtwdev, RSVD_QOS_NULL, true);
- rtw_add_rsvd_page(rtwdev, RSVD_NULL, true);
- rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_DPK, true);
- rtw_add_rsvd_page(rtwdev, RSVD_LPS_PG_INFO, true);
+ rtw_add_rsvd_page_sta(rtwdev, rtwvif);
}
-static void rtw_wow_config_rsvd_page(struct rtw_dev *rtwdev)
+static void rtw_wow_config_rsvd_page(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif)
{
- rtw_reset_rsvd_page(rtwdev);
+ rtw_remove_rsvd_page(rtwdev, rtwvif);
if (rtw_wow_mgd_linked(rtwdev)) {
- rtw_wow_config_linked_rsvd_page(rtwdev);
+ rtw_wow_config_linked_rsvd_page(rtwdev, rtwvif);
} else if (test_bit(RTW_FLAG_WOWLAN, rtwdev->flags) &&
rtw_wow_no_link(rtwdev)) {
- rtw_wow_config_pno_rsvd_page(rtwdev);
+ rtw_wow_config_pno_rsvd_page(rtwdev, rtwvif);
}
}
static int rtw_wow_dl_fw_rsvd_page(struct rtw_dev *rtwdev)
{
struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)wow_vif->drv_priv;
- rtw_wow_config_rsvd_page(rtwdev);
+ rtw_wow_config_rsvd_page(rtwdev, rtwvif);
- return rtw_fw_download_rsvd_page(rtwdev, wow_vif);
+ return rtw_fw_download_rsvd_page(rtwdev);
}
static int rtw_wow_swap_fw(struct rtw_dev *rtwdev, enum rtw_fw_type type)
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 1bebba4e8527..5d6143a55187 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -1468,12 +1468,15 @@ static void rsi_shutdown(struct device *dev)
struct rsi_91x_sdiodev *sdev =
(struct rsi_91x_sdiodev *)adapter->rsi_dev;
struct ieee80211_hw *hw = adapter->hw;
- struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config;
rsi_dbg(ERR_ZONE, "SDIO Bus shutdown =====>\n");
- if (rsi_config_wowlan(adapter, wowlan))
- rsi_dbg(ERR_ZONE, "Failed to configure WoWLAN\n");
+ if (hw) {
+ struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config;
+
+ if (rsi_config_wowlan(adapter, wowlan))
+ rsi_dbg(ERR_ZONE, "Failed to configure WoWLAN\n");
+ }
if (IS_ENABLED(CONFIG_RSI_COEX) && adapter->priv->coex_mode > 1 &&
adapter->priv->bt_adapter) {
diff --git a/drivers/net/wireless/st/cw1200/wsm.h b/drivers/net/wireless/st/cw1200/wsm.h
index ddea57f8c8ab..1ffa47994bb9 100644
--- a/drivers/net/wireless/st/cw1200/wsm.h
+++ b/drivers/net/wireless/st/cw1200/wsm.h
@@ -1623,7 +1623,7 @@ struct wsm_p2p_device_info {
u8 local_devname[D11_MAX_SSID_LEN];
u8 reserved2[3];
u8 num_secdev_supported;
- struct wsm_p2p_device_type secdevs[0];
+ struct wsm_p2p_device_type secdevs[];
} __packed;
/* 4.36 SetWCDMABand - WO */
diff --git a/drivers/net/wireless/ti/wl1251/cmd.h b/drivers/net/wireless/ti/wl1251/cmd.h
index 1c1a591c6055..e5874186f9d7 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.h
+++ b/drivers/net/wireless/ti/wl1251/cmd.h
@@ -90,7 +90,7 @@ struct wl1251_cmd_header {
u16 id;
u16 status;
/* payload */
- u8 data[0];
+ u8 data[];
} __packed;
struct wl1251_command {
@@ -281,7 +281,7 @@ struct wl1251_cmd_packet_template {
struct wl1251_cmd_header header;
__le16 size;
- u8 data[0];
+ u8 data[];
} __packed;
#define TIM_ELE_ID 5
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 94569cd695c8..c9a4e9a43400 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -15,9 +15,7 @@
#include <linux/wl12xx.h>
#include <linux/irq.h>
#include <linux/pm_runtime.h>
-#include <linux/gpio.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include "wl1251.h"
@@ -160,15 +158,6 @@ static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
int ret;
if (enable) {
- /*
- * Power is controlled by runtime PM, but we still call board
- * callback in case it wants to do any additional setup,
- * for example enabling clock buffer for the module.
- */
- if (gpio_is_valid(wl->power_gpio))
- gpio_set_value(wl->power_gpio, true);
-
-
ret = pm_runtime_get_sync(&func->dev);
if (ret < 0) {
pm_runtime_put_sync(&func->dev);
@@ -186,9 +175,6 @@ static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
ret = pm_runtime_put_sync(&func->dev);
if (ret < 0)
goto out;
-
- if (gpio_is_valid(wl->power_gpio))
- gpio_set_value(wl->power_gpio, false);
}
out:
@@ -241,31 +227,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
wl1251_board_data = wl1251_get_platform_data();
if (!IS_ERR(wl1251_board_data)) {
- wl->power_gpio = wl1251_board_data->power_gpio;
wl->irq = wl1251_board_data->irq;
wl->use_eeprom = wl1251_board_data->use_eeprom;
} else if (np) {
- wl->use_eeprom = of_property_read_bool(np,
- "ti,wl1251-has-eeprom");
- wl->power_gpio = of_get_named_gpio(np, "ti,power-gpio", 0);
+ wl->use_eeprom = of_property_read_bool(np, "ti,wl1251-has-eeprom");
wl->irq = of_irq_get(np, 0);
-
- if (wl->power_gpio == -EPROBE_DEFER ||
- wl->irq == -EPROBE_DEFER) {
+ if (wl->irq == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto disable;
}
}
- if (gpio_is_valid(wl->power_gpio)) {
- ret = devm_gpio_request(&func->dev, wl->power_gpio,
- "wl1251 power");
- if (ret) {
- wl1251_error("Failed to request gpio: %d\n", ret);
- goto disable;
- }
- }
-
if (wl->irq) {
irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
diff --git a/drivers/net/wireless/ti/wl1251/wl12xx_80211.h b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
index 7fabe702c4cc..7e28fe435b43 100644
--- a/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
+++ b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
@@ -65,7 +65,7 @@ struct ieee80211_header {
u8 sa[ETH_ALEN];
u8 bssid[ETH_ALEN];
__le16 seq_ctl;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct wl12xx_ie_header {
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index a265fba0cb4c..c725f5855c13 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -938,7 +938,7 @@ struct acx_rx_filter_cfg {
u8 action;
u8 num_fields;
- u8 fields[0];
+ u8 fields[];
} __packed;
struct acx_roaming_stats {
diff --git a/drivers/net/wireless/ti/wlcore/boot.h b/drivers/net/wireless/ti/wlcore/boot.h
index 14b367e98dce..24a2dfcb41ea 100644
--- a/drivers/net/wireless/ti/wlcore/boot.h
+++ b/drivers/net/wireless/ti/wlcore/boot.h
@@ -26,7 +26,7 @@ struct wl1271_static_data {
u8 fw_version[WL1271_FW_VERSION_MAX_LEN];
u32 hw_version;
u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS];
- u8 priv[0];
+ u8 priv[];
};
/* number of times we try to read the INIT interrupt */
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index bfad7b5a1ac6..f2609d5b6bf7 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -209,7 +209,7 @@ struct wl1271_cmd_header {
__le16 id;
__le16 status;
/* payload */
- u8 data[0];
+ u8 data[];
} __packed;
#define WL1271_CMD_MAX_PARAMS 572
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index 6116383ee248..31be425f2332 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -1150,7 +1150,7 @@ struct wlcore_conf {
struct wlcore_conf_file {
struct wlcore_conf_header header;
struct wlcore_conf core;
- u8 priv[0];
+ u8 priv[];
} __packed;
#endif
diff --git a/drivers/net/wireless/ti/wlcore/wl12xx_80211.h b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
index 181be725eff8..1dd7ecc11f86 100644
--- a/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
+++ b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
@@ -66,7 +66,7 @@ struct ieee80211_header {
u8 sa[ETH_ALEN];
u8 bssid[ETH_ALEN];
__le16 seq_ctl;
- u8 payload[0];
+ u8 payload[];
} __packed;
struct wl12xx_ie_header {
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 01305ba2d3aa..c878097f0dda 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -436,10 +436,18 @@ static int virt_wifi_net_device_stop(struct net_device *dev)
return 0;
}
+static int virt_wifi_net_device_get_iflink(const struct net_device *dev)
+{
+ struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
+
+ return priv->lowerdev->ifindex;
+}
+
static const struct net_device_ops virt_wifi_ops = {
.ndo_start_xmit = virt_wifi_start_xmit,
- .ndo_open = virt_wifi_net_device_open,
- .ndo_stop = virt_wifi_net_device_stop,
+ .ndo_open = virt_wifi_net_device_open,
+ .ndo_stop = virt_wifi_net_device_stop,
+ .ndo_get_iflink = virt_wifi_net_device_get_iflink,
};
/* Invoked as part of rtnl lock release. */
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index efdce9ae36ea..b446cb369557 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -231,7 +231,7 @@ struct iw_mgmt_info_element {
u8 id; /* one of enum iw_mgmt_info_element_ids,
but sizeof(enum) > sizeof(u8) :-( */
u8 len;
- u8 data[0];
+ u8 data[];
} __packed;
struct iw_mgmt_essid_pset {
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.h b/drivers/net/wireless/zydas/zd1211rw/zd_usb.h
index a52ee323a142..8f03b09a602c 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.h
@@ -69,7 +69,7 @@ enum control_requests {
struct usb_req_read_regs {
__le16 id;
- __le16 addr[0];
+ __le16 addr[];
} __packed;
struct reg_data {
@@ -79,7 +79,7 @@ struct reg_data {
struct usb_req_write_regs {
__le16 id;
- struct reg_data reg_writes[0];
+ struct reg_data reg_writes[];
} __packed;
enum {
@@ -95,7 +95,7 @@ struct usb_req_rfwrite {
/* 2: other (default) */
__le16 bits;
/* RF2595: 24 */
- __le16 bit_values[0];
+ __le16 bit_values[];
/* (ZD_CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */
} __packed;
@@ -118,7 +118,7 @@ struct usb_int_header {
struct usb_int_regs {
struct usb_int_header hdr;
- struct reg_data regs[0];
+ struct reg_data regs[];
} __packed;
struct usb_int_retry_fail {
diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
index ed2123129e0e..4dc7bd7e02b6 100644
--- a/drivers/nfc/fdp/fdp.c
+++ b/drivers/nfc/fdp/fdp.c
@@ -76,7 +76,7 @@ static u8 nci_core_get_config_otp_ram_version[5] = {
struct nci_core_get_config_rsp {
u8 status;
u8 count;
- u8 data[0];
+ u8 data[];
};
static int fdp_nci_create_conn(struct nci_dev *ndev)
diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c
index 60acdfd1cb8c..a1d69f9b2d4a 100644
--- a/drivers/nfc/st21nfca/dep.c
+++ b/drivers/nfc/st21nfca/dep.c
@@ -66,7 +66,7 @@ struct st21nfca_atr_req {
u8 bsi;
u8 bri;
u8 ppi;
- u8 gbi[0];
+ u8 gbi[];
} __packed;
struct st21nfca_atr_res {
@@ -79,7 +79,7 @@ struct st21nfca_atr_res {
u8 bri;
u8 to;
u8 ppi;
- u8 gbi[0];
+ u8 gbi[];
} __packed;
struct st21nfca_psl_req {
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index fffa77093c08..4f4f54bc732e 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -50,12 +50,7 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
(PCI_STATUS_CAP_LIST | PCI_STATUS_66MHZ |
PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MASK) << 16),
.rsvd = GENMASK(15, 10) | ((BIT(6) | GENMASK(3, 0)) << 16),
- .w1c = (PCI_STATUS_PARITY |
- PCI_STATUS_SIG_TARGET_ABORT |
- PCI_STATUS_REC_TARGET_ABORT |
- PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_SIG_SYSTEM_ERROR |
- PCI_STATUS_DETECTED_PARITY) << 16,
+ .w1c = PCI_STATUS_ERROR_BITS << 16,
},
[PCI_CLASS_REVISION / 4] = { .ro = ~0 },
@@ -100,12 +95,7 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
PCI_STATUS_DEVSEL_MASK) << 16) |
GENMASK(11, 8) | GENMASK(3, 0)),
- .w1c = (PCI_STATUS_PARITY |
- PCI_STATUS_SIG_TARGET_ABORT |
- PCI_STATUS_REC_TARGET_ABORT |
- PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_SIG_SYSTEM_ERROR |
- PCI_STATUS_DETECTED_PARITY) << 16,
+ .w1c = PCI_STATUS_ERROR_BITS << 16,
.rsvd = ((BIT(6) | GENMASK(4, 0)) << 16),
},
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index d828ca835a98..86821313c007 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -173,6 +173,29 @@ unsigned char pci_bus_max_busnr(struct pci_bus *bus)
}
EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
+/**
+ * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
+ * @pdev: the PCI device
+ *
+ * Returns error bits set in PCI_STATUS and clears them.
+ */
+int pci_status_get_and_clear_errors(struct pci_dev *pdev)
+{
+ u16 status;
+ int ret;
+
+ ret = pci_read_config_word(pdev, PCI_STATUS, &status);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return -EIO;
+
+ status &= PCI_STATUS_ERROR_BITS;
+ if (status)
+ pci_write_config_word(pdev, PCI_STATUS, status);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
+
#ifdef CONFIG_HAS_IOMEM
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
{
@@ -557,6 +580,40 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap)
}
EXPORT_SYMBOL_GPL(pci_find_ext_capability);
+/**
+ * pci_get_dsn - Read and return the 8-byte Device Serial Number
+ * @dev: PCI device to query
+ *
+ * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
+ * Number.
+ *
+ * Returns the DSN, or zero if the capability does not exist.
+ */
+u64 pci_get_dsn(struct pci_dev *dev)
+{
+ u32 dword;
+ u64 dsn;
+ int pos;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
+ if (!pos)
+ return 0;
+
+ /*
+ * The Device Serial Number is two dwords offset 4 bytes from the
+ * capability position. The specification says that the first dword is
+ * the lower half, and the second dword is the upper half.
+ */
+ pos += 4;
+ pci_read_config_dword(dev, pos, &dword);
+ dsn = (u64)dword;
+ pci_read_config_dword(dev, pos + 4, &dword);
+ dsn |= ((u64)dword) << 32;
+
+ return dsn;
+}
+EXPORT_SYMBOL_GPL(pci_get_dsn);
+
static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
{
int rc, ttl = PCI_FIND_CAP_TTL;
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
index 6dbe9d0b9ff3..15a3bcf32308 100644
--- a/drivers/phy/ti/Kconfig
+++ b/drivers/phy/ti/Kconfig
@@ -106,11 +106,8 @@ config TWL4030_USB
config PHY_TI_GMII_SEL
tristate
- default y if TI_CPSW=y || TI_CPSW_SWITCHDEV=y
- depends on TI_CPSW || TI_CPSW_SWITCHDEV || COMPILE_TEST
select GENERIC_PHY
select REGMAP
- default m
help
This driver supports configuring of the TI CPSW Port mode depending on
the Ethernet PHY connected to the CPSW Port.
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 475c60dccaa4..86400c708150 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -115,6 +115,18 @@ config PTP_1588_CLOCK_KVM
To compile this driver as a module, choose M here: the module
will be called ptp_kvm.
+config PTP_1588_CLOCK_IDT82P33
+ tristate "IDT 82P33xxx PTP clock"
+ depends on PTP_1588_CLOCK && I2C
+ default n
+ help
+ This driver adds support for using the IDT 82P33xxx as a PTP
+ clock. This clock is only useful if your time stamping MAC
+ is connected to the IDT chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_idt82p33.
+
config PTP_1588_CLOCK_IDTCM
tristate "IDT CLOCKMATRIX as PTP clock"
depends on PTP_1588_CLOCK && I2C
@@ -127,4 +139,16 @@ config PTP_1588_CLOCK_IDTCM
To compile this driver as a module, choose M here: the module
will be called ptp_clockmatrix.
+config PTP_1588_CLOCK_VMW
+ tristate "VMware virtual PTP clock"
+ depends on ACPI && HYPERVISOR_GUEST && X86
+ depends on PTP_1588_CLOCK
+ help
+ This driver adds support for using VMware virtual precision
+ clock device as a PTP clock. This is only useful in virtual
+ machines running on VMware virtual infrastructure.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_vmw.
+
endmenu
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index 8c830336f178..7aff75f745dc 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
ptp-qoriq-y += ptp_qoriq.o
ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o
+obj-$(CONFIG_PTP_1588_CLOCK_IDT82P33) += ptp_idt82p33.o
+obj-$(CONFIG_PTP_1588_CLOCK_VMW) += ptp_vmw.o
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 9d72ab593f13..93d574faf1fe 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -175,7 +175,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
}
req.type = PTP_CLK_REQ_EXTTS;
enable = req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0;
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
err = ops->enable(ops, &req, enable);
+ mutex_unlock(&ptp->pincfg_mux);
break;
case PTP_PEROUT_REQUEST:
@@ -206,7 +209,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
}
req.type = PTP_CLK_REQ_PEROUT;
enable = req.perout.period.sec || req.perout.period.nsec;
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
err = ops->enable(ops, &req, enable);
+ mutex_unlock(&ptp->pincfg_mux);
break;
case PTP_ENABLE_PPS:
@@ -217,7 +223,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
return -EPERM;
req.type = PTP_CLK_REQ_PPS;
enable = arg ? 1 : 0;
+ if (mutex_lock_interruptible(&ptp->pincfg_mux))
+ return -ERESTARTSYS;
err = ops->enable(ops, &req, enable);
+ mutex_unlock(&ptp->pincfg_mux);
break;
case PTP_SYS_OFFSET_PRECISE:
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index ac1f2bf9e888..acabbe72e55e 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -348,7 +348,6 @@ int ptp_find_pin(struct ptp_clock *ptp,
struct ptp_pin_desc *pin = NULL;
int i;
- mutex_lock(&ptp->pincfg_mux);
for (i = 0; i < ptp->info->n_pins; i++) {
if (ptp->info->pin_config[i].func == func &&
ptp->info->pin_config[i].chan == chan) {
@@ -356,12 +355,26 @@ int ptp_find_pin(struct ptp_clock *ptp,
break;
}
}
- mutex_unlock(&ptp->pincfg_mux);
return pin ? i : -1;
}
EXPORT_SYMBOL(ptp_find_pin);
+int ptp_find_pin_unlocked(struct ptp_clock *ptp,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ int result;
+
+ mutex_lock(&ptp->pincfg_mux);
+
+ result = ptp_find_pin(ptp, func, chan);
+
+ mutex_unlock(&ptp->pincfg_mux);
+
+ return result;
+}
+EXPORT_SYMBOL(ptp_find_pin_unlocked);
+
int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
{
return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
diff --git a/drivers/ptp/ptp_idt82p33.c b/drivers/ptp/ptp_idt82p33.c
new file mode 100644
index 000000000000..b63ac240308b
--- /dev/null
+++ b/drivers/ptp/ptp_idt82p33.c
@@ -0,0 +1,1008 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 Integrated Device Technology, Inc
+//
+
+#define pr_fmt(fmt) "IDT_82p33xxx: " fmt
+
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/timekeeping.h>
+#include <linux/bitops.h>
+
+#include "ptp_private.h"
+#include "ptp_idt82p33.h"
+
+MODULE_DESCRIPTION("Driver for IDT 82p33xxx clock devices");
+MODULE_AUTHOR("IDT support-1588 <IDT-support-1588@lm.renesas.com>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+/* Module Parameters */
+u32 sync_tod_timeout = SYNC_TOD_TIMEOUT_SEC;
+module_param(sync_tod_timeout, uint, 0);
+MODULE_PARM_DESC(sync_tod_timeout,
+"duration in second to keep SYNC_TOD on (set to 0 to keep it always on)");
+
+u32 phase_snap_threshold = SNAP_THRESHOLD_NS;
+module_param(phase_snap_threshold, uint, 0);
+MODULE_PARM_DESC(phase_snap_threshold,
+"threshold (150000ns by default) below which adjtime would ignore");
+
+static void idt82p33_byte_array_to_timespec(struct timespec64 *ts,
+ u8 buf[TOD_BYTE_COUNT])
+{
+ time64_t sec;
+ s32 nsec;
+ u8 i;
+
+ nsec = buf[3];
+ for (i = 0; i < 3; i++) {
+ nsec <<= 8;
+ nsec |= buf[2 - i];
+ }
+
+ sec = buf[9];
+ for (i = 0; i < 5; i++) {
+ sec <<= 8;
+ sec |= buf[8 - i];
+ }
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
+static void idt82p33_timespec_to_byte_array(struct timespec64 const *ts,
+ u8 buf[TOD_BYTE_COUNT])
+{
+ time64_t sec;
+ s32 nsec;
+ u8 i;
+
+ nsec = ts->tv_nsec;
+ sec = ts->tv_sec;
+
+ for (i = 0; i < 4; i++) {
+ buf[i] = nsec & 0xff;
+ nsec >>= 8;
+ }
+
+ for (i = 4; i < TOD_BYTE_COUNT; i++) {
+ buf[i] = sec & 0xff;
+ sec >>= 8;
+ }
+}
+
+static int idt82p33_xfer(struct idt82p33 *idt82p33,
+ unsigned char regaddr,
+ unsigned char *buf,
+ unsigned int count,
+ int write)
+{
+ struct i2c_client *client = idt82p33->client;
+ struct i2c_msg msg[2];
+ int cnt;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &regaddr;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = write ? 0 : I2C_M_RD;
+ msg[1].len = count;
+ msg[1].buf = buf;
+
+ cnt = i2c_transfer(client->adapter, msg, 2);
+ if (cnt < 0) {
+ dev_err(&client->dev, "i2c_transfer returned %d\n", cnt);
+ return cnt;
+ } else if (cnt != 2) {
+ dev_err(&client->dev,
+ "i2c_transfer sent only %d of %d messages\n", cnt, 2);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int idt82p33_page_offset(struct idt82p33 *idt82p33, unsigned char val)
+{
+ int err;
+
+ if (idt82p33->page_offset == val)
+ return 0;
+
+ err = idt82p33_xfer(idt82p33, PAGE_ADDR, &val, sizeof(val), 1);
+ if (err)
+ dev_err(&idt82p33->client->dev,
+ "failed to set page offset %d\n", val);
+ else
+ idt82p33->page_offset = val;
+
+ return err;
+}
+
+static int idt82p33_rdwr(struct idt82p33 *idt82p33, unsigned int regaddr,
+ unsigned char *buf, unsigned int count, bool write)
+{
+ u8 offset, page;
+ int err;
+
+ page = _PAGE(regaddr);
+ offset = _OFFSET(regaddr);
+
+ err = idt82p33_page_offset(idt82p33, page);
+ if (err)
+ goto out;
+
+ err = idt82p33_xfer(idt82p33, offset, buf, count, write);
+out:
+ return err;
+}
+
+static int idt82p33_read(struct idt82p33 *idt82p33, unsigned int regaddr,
+ unsigned char *buf, unsigned int count)
+{
+ return idt82p33_rdwr(idt82p33, regaddr, buf, count, false);
+}
+
+static int idt82p33_write(struct idt82p33 *idt82p33, unsigned int regaddr,
+ unsigned char *buf, unsigned int count)
+{
+ return idt82p33_rdwr(idt82p33, regaddr, buf, count, true);
+}
+
+static int idt82p33_dpll_set_mode(struct idt82p33_channel *channel,
+ enum pll_mode mode)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 dpll_mode;
+ int err;
+
+ if (channel->pll_mode == mode)
+ return 0;
+
+ err = idt82p33_read(idt82p33, channel->dpll_mode_cnfg,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ dpll_mode &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
+
+ dpll_mode |= (mode << PLL_MODE_SHIFT);
+
+ err = idt82p33_write(idt82p33, channel->dpll_mode_cnfg,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ channel->pll_mode = dpll_mode;
+
+ return 0;
+}
+
+static int _idt82p33_gettime(struct idt82p33_channel *channel,
+ struct timespec64 *ts)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 buf[TOD_BYTE_COUNT];
+ u8 trigger;
+ int err;
+
+ trigger = TOD_TRIGGER(HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS);
+
+
+ err = idt82p33_write(idt82p33, channel->dpll_tod_trigger,
+ &trigger, sizeof(trigger));
+
+ if (err)
+ return err;
+
+ if (idt82p33->calculate_overhead_flag)
+ idt82p33->start_time = ktime_get_raw();
+
+ err = idt82p33_read(idt82p33, channel->dpll_tod_sts, buf, sizeof(buf));
+
+ if (err)
+ return err;
+
+ idt82p33_byte_array_to_timespec(ts, buf);
+
+ return 0;
+}
+
+/*
+ * TOD Trigger:
+ * Bits[7:4] Write 0x9, MSB write
+ * Bits[3:0] Read 0x9, LSB read
+ */
+
+static int _idt82p33_settime(struct idt82p33_channel *channel,
+ struct timespec64 const *ts)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ struct timespec64 local_ts = *ts;
+ char buf[TOD_BYTE_COUNT];
+ s64 dynamic_overhead_ns;
+ unsigned char trigger;
+ int err;
+ u8 i;
+
+ trigger = TOD_TRIGGER(HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS);
+
+ err = idt82p33_write(idt82p33, channel->dpll_tod_trigger,
+ &trigger, sizeof(trigger));
+
+ if (err)
+ return err;
+
+ if (idt82p33->calculate_overhead_flag) {
+ dynamic_overhead_ns = ktime_to_ns(ktime_get_raw())
+ - ktime_to_ns(idt82p33->start_time);
+
+ timespec64_add_ns(&local_ts, dynamic_overhead_ns);
+
+ idt82p33->calculate_overhead_flag = 0;
+ }
+
+ idt82p33_timespec_to_byte_array(&local_ts, buf);
+
+ /*
+ * Store the new time value.
+ */
+ for (i = 0; i < TOD_BYTE_COUNT; i++) {
+ err = idt82p33_write(idt82p33, channel->dpll_tod_cnfg + i,
+ &buf[i], sizeof(buf[i]));
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int _idt82p33_adjtime(struct idt82p33_channel *channel, s64 delta_ns)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ struct timespec64 ts;
+ s64 now_ns;
+ int err;
+
+ idt82p33->calculate_overhead_flag = 1;
+
+ err = _idt82p33_gettime(channel, &ts);
+
+ if (err)
+ return err;
+
+ now_ns = timespec64_to_ns(&ts);
+ now_ns += delta_ns + idt82p33->tod_write_overhead_ns;
+
+ ts = ns_to_timespec64(now_ns);
+
+ err = _idt82p33_settime(channel, &ts);
+
+ return err;
+}
+
+static int _idt82p33_adjfine(struct idt82p33_channel *channel, long scaled_ppm)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ unsigned char buf[5] = {0};
+ int neg_adj = 0;
+ int err, i;
+ s64 fcw;
+
+ if (scaled_ppm == channel->current_freq_ppb)
+ return 0;
+
+ /*
+ * Frequency Control Word unit is: 1.68 * 10^-10 ppm
+ *
+ * adjfreq:
+ * ppb * 10^9
+ * FCW = ----------
+ * 168
+ *
+ * adjfine:
+ * scaled_ppm * 5^12
+ * FCW = -------------
+ * 168 * 2^4
+ */
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ fcw = scaled_ppm * 244140625ULL;
+ fcw = div_u64(fcw, 2688);
+
+ if (neg_adj)
+ fcw = -fcw;
+
+ for (i = 0; i < 5; i++) {
+ buf[i] = fcw & 0xff;
+ fcw >>= 8;
+ }
+
+ err = idt82p33_dpll_set_mode(channel, PLL_MODE_DCO);
+
+ if (err)
+ return err;
+
+ err = idt82p33_write(idt82p33, channel->dpll_freq_cnfg,
+ buf, sizeof(buf));
+
+ if (err == 0)
+ channel->current_freq_ppb = scaled_ppm;
+
+ return err;
+}
+
+static int idt82p33_measure_one_byte_write_overhead(
+ struct idt82p33_channel *channel, s64 *overhead_ns)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ ktime_t start, stop;
+ s64 total_ns;
+ u8 trigger;
+ int err;
+ u8 i;
+
+ total_ns = 0;
+ *overhead_ns = 0;
+ trigger = TOD_TRIGGER(HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS);
+
+ for (i = 0; i < MAX_MEASURMENT_COUNT; i++) {
+
+ start = ktime_get_raw();
+
+ err = idt82p33_write(idt82p33, channel->dpll_tod_trigger,
+ &trigger, sizeof(trigger));
+
+ stop = ktime_get_raw();
+
+ if (err)
+ return err;
+
+ total_ns += ktime_to_ns(stop) - ktime_to_ns(start);
+ }
+
+ *overhead_ns = div_s64(total_ns, MAX_MEASURMENT_COUNT);
+
+ return err;
+}
+
+static int idt82p33_measure_tod_write_9_byte_overhead(
+ struct idt82p33_channel *channel)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 buf[TOD_BYTE_COUNT];
+ ktime_t start, stop;
+ s64 total_ns;
+ int err = 0;
+ u8 i, j;
+
+ total_ns = 0;
+ idt82p33->tod_write_overhead_ns = 0;
+
+ for (i = 0; i < MAX_MEASURMENT_COUNT; i++) {
+
+ start = ktime_get_raw();
+
+ /* Need one less byte for applicable overhead */
+ for (j = 0; j < (TOD_BYTE_COUNT - 1); j++) {
+ err = idt82p33_write(idt82p33,
+ channel->dpll_tod_cnfg + i,
+ &buf[i], sizeof(buf[i]));
+ if (err)
+ return err;
+ }
+
+ stop = ktime_get_raw();
+
+ total_ns += ktime_to_ns(stop) - ktime_to_ns(start);
+ }
+
+ idt82p33->tod_write_overhead_ns = div_s64(total_ns,
+ MAX_MEASURMENT_COUNT);
+
+ return err;
+}
+
+static int idt82p33_measure_settime_gettime_gap_overhead(
+ struct idt82p33_channel *channel, s64 *overhead_ns)
+{
+ struct timespec64 ts1 = {0, 0};
+ struct timespec64 ts2;
+ int err;
+
+ *overhead_ns = 0;
+
+ err = _idt82p33_settime(channel, &ts1);
+
+ if (err)
+ return err;
+
+ err = _idt82p33_gettime(channel, &ts2);
+
+ if (!err)
+ *overhead_ns = timespec64_to_ns(&ts2) - timespec64_to_ns(&ts1);
+
+ return err;
+}
+
+static int idt82p33_measure_tod_write_overhead(struct idt82p33_channel *channel)
+{
+ s64 trailing_overhead_ns, one_byte_write_ns, gap_ns;
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ idt82p33->tod_write_overhead_ns = 0;
+
+ err = idt82p33_measure_settime_gettime_gap_overhead(channel, &gap_ns);
+
+ if (err)
+ return err;
+
+ err = idt82p33_measure_one_byte_write_overhead(channel,
+ &one_byte_write_ns);
+
+ if (err)
+ return err;
+
+ err = idt82p33_measure_tod_write_9_byte_overhead(channel);
+
+ if (err)
+ return err;
+
+ trailing_overhead_ns = gap_ns - (2 * one_byte_write_ns);
+
+ idt82p33->tod_write_overhead_ns -= trailing_overhead_ns;
+
+ return err;
+}
+
+static int idt82p33_check_and_set_masks(struct idt82p33 *idt82p33,
+ u8 page,
+ u8 offset,
+ u8 val)
+{
+ int err = 0;
+
+ if (page == PLLMASK_ADDR_HI && offset == PLLMASK_ADDR_LO) {
+ if ((val & 0xfc) || !(val & 0x3)) {
+ dev_err(&idt82p33->client->dev,
+ "Invalid PLL mask 0x%hhx\n", val);
+ err = -EINVAL;
+ } else {
+ idt82p33->pll_mask = val;
+ }
+ } else if (page == PLL0_OUTMASK_ADDR_HI &&
+ offset == PLL0_OUTMASK_ADDR_LO) {
+ idt82p33->channel[0].output_mask = val;
+ } else if (page == PLL1_OUTMASK_ADDR_HI &&
+ offset == PLL1_OUTMASK_ADDR_LO) {
+ idt82p33->channel[1].output_mask = val;
+ }
+
+ return err;
+}
+
+static void idt82p33_display_masks(struct idt82p33 *idt82p33)
+{
+ u8 mask, i;
+
+ dev_info(&idt82p33->client->dev,
+ "pllmask = 0x%02x\n", idt82p33->pll_mask);
+
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ mask = 1 << i;
+
+ if (mask & idt82p33->pll_mask)
+ dev_info(&idt82p33->client->dev,
+ "PLL%d output_mask = 0x%04x\n",
+ i, idt82p33->channel[i].output_mask);
+ }
+}
+
+static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 sync_cnfg;
+ int err;
+
+ if (enable == channel->sync_tod_on) {
+ if (enable && sync_tod_timeout) {
+ mod_delayed_work(system_wq, &channel->sync_tod_work,
+ sync_tod_timeout * HZ);
+ }
+ return 0;
+ }
+
+ err = idt82p33_read(idt82p33, channel->dpll_sync_cnfg,
+ &sync_cnfg, sizeof(sync_cnfg));
+ if (err)
+ return err;
+
+ sync_cnfg &= ~SYNC_TOD;
+
+ if (enable)
+ sync_cnfg |= SYNC_TOD;
+
+ err = idt82p33_write(idt82p33, channel->dpll_sync_cnfg,
+ &sync_cnfg, sizeof(sync_cnfg));
+ if (err)
+ return err;
+
+ channel->sync_tod_on = enable;
+
+ if (enable && sync_tod_timeout) {
+ mod_delayed_work(system_wq, &channel->sync_tod_work,
+ sync_tod_timeout * HZ);
+ }
+
+ return 0;
+}
+
+static void idt82p33_sync_tod_work_handler(struct work_struct *work)
+{
+ struct idt82p33_channel *channel =
+ container_of(work, struct idt82p33_channel, sync_tod_work.work);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+
+ mutex_lock(&idt82p33->reg_lock);
+
+ (void)idt82p33_sync_tod(channel, false);
+
+ mutex_unlock(&idt82p33->reg_lock);
+}
+
+static int idt82p33_pps_enable(struct idt82p33_channel *channel, bool enable)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ u8 mask, outn, val;
+ int err;
+
+ mask = channel->output_mask;
+ outn = 0;
+
+ while (mask) {
+ if (mask & 0x1) {
+ err = idt82p33_read(idt82p33, OUT_MUX_CNFG(outn),
+ &val, sizeof(val));
+ if (err)
+ return err;
+
+ if (enable)
+ val &= ~SQUELCH_ENABLE;
+ else
+ val |= SQUELCH_ENABLE;
+
+ err = idt82p33_write(idt82p33, OUT_MUX_CNFG(outn),
+ &val, sizeof(val));
+
+ if (err)
+ return err;
+ }
+ mask >>= 0x1;
+ outn++;
+ }
+
+ return 0;
+}
+
+static int idt82p33_enable_tod(struct idt82p33_channel *channel)
+{
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ struct timespec64 ts = {0, 0};
+ int err;
+ u8 val;
+
+ val = 0;
+ err = idt82p33_write(idt82p33, channel->dpll_input_mode_cnfg,
+ &val, sizeof(val));
+ if (err)
+ return err;
+
+ err = idt82p33_pps_enable(channel, false);
+
+ if (err)
+ return err;
+
+ err = idt82p33_measure_tod_write_overhead(channel);
+
+ if (err)
+ return err;
+
+ err = _idt82p33_settime(channel, &ts);
+
+ if (err)
+ return err;
+
+ return idt82p33_sync_tod(channel, true);
+}
+
+static void idt82p33_ptp_clock_unregister_all(struct idt82p33 *idt82p33)
+{
+ struct idt82p33_channel *channel;
+ u8 i;
+
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+
+ channel = &idt82p33->channel[i];
+
+ if (channel->ptp_clock) {
+ ptp_clock_unregister(channel->ptp_clock);
+ cancel_delayed_work_sync(&channel->sync_tod_work);
+ }
+ }
+}
+
+static int idt82p33_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ err = -EOPNOTSUPP;
+
+ mutex_lock(&idt82p33->reg_lock);
+
+ if (rq->type == PTP_CLK_REQ_PEROUT) {
+ if (!on)
+ err = idt82p33_pps_enable(channel, false);
+
+ /* Only accept a 1-PPS aligned to the second. */
+ else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
+ rq->perout.period.nsec) {
+ err = -ERANGE;
+ } else
+ err = idt82p33_pps_enable(channel, true);
+ }
+
+ mutex_unlock(&idt82p33->reg_lock);
+
+ return err;
+}
+
+static int idt82p33_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ mutex_lock(&idt82p33->reg_lock);
+ err = _idt82p33_adjfine(channel, scaled_ppm);
+ mutex_unlock(&idt82p33->reg_lock);
+
+ return err;
+}
+
+static int idt82p33_adjtime(struct ptp_clock_info *ptp, s64 delta_ns)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ mutex_lock(&idt82p33->reg_lock);
+
+ if (abs(delta_ns) < phase_snap_threshold) {
+ mutex_unlock(&idt82p33->reg_lock);
+ return 0;
+ }
+
+ err = _idt82p33_adjtime(channel, delta_ns);
+
+ if (err) {
+ mutex_unlock(&idt82p33->reg_lock);
+ return err;
+ }
+
+ err = idt82p33_sync_tod(channel, true);
+
+ mutex_unlock(&idt82p33->reg_lock);
+
+ return err;
+}
+
+static int idt82p33_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ mutex_lock(&idt82p33->reg_lock);
+ err = _idt82p33_gettime(channel, ts);
+ mutex_unlock(&idt82p33->reg_lock);
+
+ return err;
+}
+
+static int idt82p33_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ int err;
+
+ mutex_lock(&idt82p33->reg_lock);
+ err = _idt82p33_settime(channel, ts);
+ mutex_unlock(&idt82p33->reg_lock);
+
+ return err;
+}
+
+static int idt82p33_channel_init(struct idt82p33_channel *channel, int index)
+{
+ switch (index) {
+ case 0:
+ channel->dpll_tod_cnfg = DPLL1_TOD_CNFG;
+ channel->dpll_tod_trigger = DPLL1_TOD_TRIGGER;
+ channel->dpll_tod_sts = DPLL1_TOD_STS;
+ channel->dpll_mode_cnfg = DPLL1_OPERATING_MODE_CNFG;
+ channel->dpll_freq_cnfg = DPLL1_HOLDOVER_FREQ_CNFG;
+ channel->dpll_phase_cnfg = DPLL1_PHASE_OFFSET_CNFG;
+ channel->dpll_sync_cnfg = DPLL1_SYNC_EDGE_CNFG;
+ channel->dpll_input_mode_cnfg = DPLL1_INPUT_MODE_CNFG;
+ break;
+ case 1:
+ channel->dpll_tod_cnfg = DPLL2_TOD_CNFG;
+ channel->dpll_tod_trigger = DPLL2_TOD_TRIGGER;
+ channel->dpll_tod_sts = DPLL2_TOD_STS;
+ channel->dpll_mode_cnfg = DPLL2_OPERATING_MODE_CNFG;
+ channel->dpll_freq_cnfg = DPLL2_HOLDOVER_FREQ_CNFG;
+ channel->dpll_phase_cnfg = DPLL2_PHASE_OFFSET_CNFG;
+ channel->dpll_sync_cnfg = DPLL2_SYNC_EDGE_CNFG;
+ channel->dpll_input_mode_cnfg = DPLL2_INPUT_MODE_CNFG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ INIT_DELAYED_WORK(&channel->sync_tod_work,
+ idt82p33_sync_tod_work_handler);
+ channel->sync_tod_on = false;
+ channel->current_freq_ppb = 0;
+
+ return 0;
+}
+
+static void idt82p33_caps_init(struct ptp_clock_info *caps)
+{
+ caps->owner = THIS_MODULE;
+ caps->max_adj = 92000;
+ caps->adjfine = idt82p33_adjfine;
+ caps->adjtime = idt82p33_adjtime;
+ caps->gettime64 = idt82p33_gettime;
+ caps->settime64 = idt82p33_settime;
+ caps->enable = idt82p33_enable;
+}
+
+static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
+{
+ struct idt82p33_channel *channel;
+ int err;
+
+ if (!(index < MAX_PHC_PLL))
+ return -EINVAL;
+
+ channel = &idt82p33->channel[index];
+
+ err = idt82p33_channel_init(channel, index);
+ if (err)
+ return err;
+
+ channel->idt82p33 = idt82p33;
+
+ idt82p33_caps_init(&channel->caps);
+ snprintf(channel->caps.name, sizeof(channel->caps.name),
+ "IDT 82P33 PLL%u", index);
+ channel->caps.n_per_out = hweight8(channel->output_mask);
+
+ err = idt82p33_dpll_set_mode(channel, PLL_MODE_DCO);
+ if (err)
+ return err;
+
+ err = idt82p33_enable_tod(channel);
+ if (err)
+ return err;
+
+ channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
+
+ if (IS_ERR(channel->ptp_clock)) {
+ err = PTR_ERR(channel->ptp_clock);
+ channel->ptp_clock = NULL;
+ return err;
+ }
+
+ if (!channel->ptp_clock)
+ return -ENOTSUPP;
+
+ dev_info(&idt82p33->client->dev, "PLL%d registered as ptp%d\n",
+ index, channel->ptp_clock->index);
+
+ return 0;
+}
+
+static int idt82p33_load_firmware(struct idt82p33 *idt82p33)
+{
+ const struct firmware *fw;
+ struct idt82p33_fwrc *rec;
+ u8 loaddr, page, val;
+ int err;
+ s32 len;
+
+ dev_dbg(&idt82p33->client->dev,
+ "requesting firmware '%s'\n", FW_FILENAME);
+
+ err = request_firmware(&fw, FW_FILENAME, &idt82p33->client->dev);
+
+ if (err)
+ return err;
+
+ dev_dbg(&idt82p33->client->dev, "firmware size %zu bytes\n", fw->size);
+
+ rec = (struct idt82p33_fwrc *) fw->data;
+
+ for (len = fw->size; len > 0; len -= sizeof(*rec)) {
+
+ if (rec->reserved) {
+ dev_err(&idt82p33->client->dev,
+ "bad firmware, reserved field non-zero\n");
+ err = -EINVAL;
+ } else {
+ val = rec->value;
+ loaddr = rec->loaddr;
+ page = rec->hiaddr;
+
+ rec++;
+
+ err = idt82p33_check_and_set_masks(idt82p33, page,
+ loaddr, val);
+ }
+
+ if (err == 0) {
+ /* maximum 8 pages */
+ if (page >= PAGE_NUM)
+ continue;
+
+ /* Page size 128, last 4 bytes of page skipped */
+ if (((loaddr > 0x7b) && (loaddr <= 0x7f))
+ || ((loaddr > 0xfb) && (loaddr <= 0xff)))
+ continue;
+
+ err = idt82p33_write(idt82p33, _ADDR(page, loaddr),
+ &val, sizeof(val));
+ }
+
+ if (err)
+ goto out;
+ }
+
+ idt82p33_display_masks(idt82p33);
+out:
+ release_firmware(fw);
+ return err;
+}
+
+
+static int idt82p33_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct idt82p33 *idt82p33;
+ int err;
+ u8 i;
+
+ (void)id;
+
+ idt82p33 = devm_kzalloc(&client->dev,
+ sizeof(struct idt82p33), GFP_KERNEL);
+ if (!idt82p33)
+ return -ENOMEM;
+
+ mutex_init(&idt82p33->reg_lock);
+
+ idt82p33->client = client;
+ idt82p33->page_offset = 0xff;
+ idt82p33->tod_write_overhead_ns = 0;
+ idt82p33->calculate_overhead_flag = 0;
+ idt82p33->pll_mask = DEFAULT_PLL_MASK;
+ idt82p33->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0;
+ idt82p33->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1;
+
+ mutex_lock(&idt82p33->reg_lock);
+
+ err = idt82p33_load_firmware(idt82p33);
+
+ if (err)
+ dev_warn(&idt82p33->client->dev,
+ "loading firmware failed with %d\n", err);
+
+ if (idt82p33->pll_mask) {
+ for (i = 0; i < MAX_PHC_PLL; i++) {
+ if (idt82p33->pll_mask & (1 << i)) {
+ err = idt82p33_enable_channel(idt82p33, i);
+ if (err)
+ break;
+ }
+ }
+ } else {
+ dev_err(&idt82p33->client->dev,
+ "no PLLs flagged as PHCs, nothing to do\n");
+ err = -ENODEV;
+ }
+
+ mutex_unlock(&idt82p33->reg_lock);
+
+ if (err) {
+ idt82p33_ptp_clock_unregister_all(idt82p33);
+ return err;
+ }
+
+ i2c_set_clientdata(client, idt82p33);
+
+ return 0;
+}
+
+static int idt82p33_remove(struct i2c_client *client)
+{
+ struct idt82p33 *idt82p33 = i2c_get_clientdata(client);
+
+ idt82p33_ptp_clock_unregister_all(idt82p33);
+ mutex_destroy(&idt82p33->reg_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id idt82p33_dt_id[] = {
+ { .compatible = "idt,82p33810" },
+ { .compatible = "idt,82p33813" },
+ { .compatible = "idt,82p33814" },
+ { .compatible = "idt,82p33831" },
+ { .compatible = "idt,82p33910" },
+ { .compatible = "idt,82p33913" },
+ { .compatible = "idt,82p33914" },
+ { .compatible = "idt,82p33931" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, idt82p33_dt_id);
+#endif
+
+static const struct i2c_device_id idt82p33_i2c_id[] = {
+ { "idt82p33810", },
+ { "idt82p33813", },
+ { "idt82p33814", },
+ { "idt82p33831", },
+ { "idt82p33910", },
+ { "idt82p33913", },
+ { "idt82p33914", },
+ { "idt82p33931", },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, idt82p33_i2c_id);
+
+static struct i2c_driver idt82p33_driver = {
+ .driver = {
+ .of_match_table = of_match_ptr(idt82p33_dt_id),
+ .name = "idt82p33",
+ },
+ .probe = idt82p33_probe,
+ .remove = idt82p33_remove,
+ .id_table = idt82p33_i2c_id,
+};
+
+module_i2c_driver(idt82p33_driver);
diff --git a/drivers/ptp/ptp_idt82p33.h b/drivers/ptp/ptp_idt82p33.h
new file mode 100644
index 000000000000..9d46966d25f1
--- /dev/null
+++ b/drivers/ptp/ptp_idt82p33.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * PTP hardware clock driver for the IDT 82P33XXX family of clocks.
+ *
+ * Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef PTP_IDT82P33_H
+#define PTP_IDT82P33_H
+
+#include <linux/ktime.h>
+#include <linux/workqueue.h>
+
+
+/* Register Map - AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf */
+#define PAGE_NUM (8)
+#define _ADDR(page, offset) (((page) << 0x7) | ((offset) & 0x7f))
+#define _PAGE(addr) (((addr) >> 0x7) & 0x7)
+#define _OFFSET(addr) ((addr) & 0x7f)
+
+#define DPLL1_TOD_CNFG 0x134
+#define DPLL2_TOD_CNFG 0x1B4
+
+#define DPLL1_TOD_STS 0x10B
+#define DPLL2_TOD_STS 0x18B
+
+#define DPLL1_TOD_TRIGGER 0x115
+#define DPLL2_TOD_TRIGGER 0x195
+
+#define DPLL1_OPERATING_MODE_CNFG 0x120
+#define DPLL2_OPERATING_MODE_CNFG 0x1A0
+
+#define DPLL1_HOLDOVER_FREQ_CNFG 0x12C
+#define DPLL2_HOLDOVER_FREQ_CNFG 0x1AC
+
+#define DPLL1_PHASE_OFFSET_CNFG 0x143
+#define DPLL2_PHASE_OFFSET_CNFG 0x1C3
+
+#define DPLL1_SYNC_EDGE_CNFG 0X140
+#define DPLL2_SYNC_EDGE_CNFG 0X1C0
+
+#define DPLL1_INPUT_MODE_CNFG 0X116
+#define DPLL2_INPUT_MODE_CNFG 0X196
+
+#define OUT_MUX_CNFG(outn) _ADDR(0x6, (0xC * (outn)))
+
+#define PAGE_ADDR 0x7F
+/* Register Map end */
+
+/* Register definitions - AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf*/
+#define TOD_TRIGGER(wr_trig, rd_trig) ((wr_trig & 0xf) << 4 | (rd_trig & 0xf))
+#define SYNC_TOD BIT(1)
+#define PH_OFFSET_EN BIT(7)
+#define SQUELCH_ENABLE BIT(5)
+
+/* Bit definitions for the DPLL_MODE register */
+#define PLL_MODE_SHIFT (0)
+#define PLL_MODE_MASK (0x1F)
+
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_AUTOMATIC = PLL_MODE_MIN,
+ PLL_MODE_FORCE_FREERUN = 1,
+ PLL_MODE_FORCE_HOLDOVER = 2,
+ PLL_MODE_FORCE_LOCKED = 4,
+ PLL_MODE_FORCE_PRE_LOCKED2 = 5,
+ PLL_MODE_FORCE_PRE_LOCKED = 6,
+ PLL_MODE_FORCE_LOST_PHASE = 7,
+ PLL_MODE_DCO = 10,
+ PLL_MODE_WPH = 18,
+ PLL_MODE_MAX = PLL_MODE_WPH,
+};
+
+enum hw_tod_trig_sel {
+ HW_TOD_TRIG_SEL_MIN = 0,
+ HW_TOD_TRIG_SEL_NO_WRITE = HW_TOD_TRIG_SEL_MIN,
+ HW_TOD_TRIG_SEL_SYNC_SEL = 1,
+ HW_TOD_TRIG_SEL_IN12 = 2,
+ HW_TOD_TRIG_SEL_IN13 = 3,
+ HW_TOD_TRIG_SEL_IN14 = 4,
+ HW_TOD_TRIG_SEL_TOD_PPS = 5,
+ HW_TOD_TRIG_SEL_TIMER_INTERVAL = 6,
+ HW_TOD_TRIG_SEL_MSB_PHASE_OFFSET_CNFG = 7,
+ HW_TOD_TRIG_SEL_MSB_HOLDOVER_FREQ_CNFG = 8,
+ HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG = 9,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+};
+
+/* Register bit definitions end */
+#define FW_FILENAME "idt82p33xxx.bin"
+#define MAX_PHC_PLL (2)
+#define TOD_BYTE_COUNT (10)
+#define MAX_MEASURMENT_COUNT (5)
+#define SNAP_THRESHOLD_NS (150000)
+#define SYNC_TOD_TIMEOUT_SEC (5)
+
+#define PLLMASK_ADDR_HI 0xFF
+#define PLLMASK_ADDR_LO 0xA5
+
+#define PLL0_OUTMASK_ADDR_HI 0xFF
+#define PLL0_OUTMASK_ADDR_LO 0xB0
+
+#define PLL1_OUTMASK_ADDR_HI 0xFF
+#define PLL1_OUTMASK_ADDR_LO 0xB2
+
+#define PLL2_OUTMASK_ADDR_HI 0xFF
+#define PLL2_OUTMASK_ADDR_LO 0xB4
+
+#define PLL3_OUTMASK_ADDR_HI 0xFF
+#define PLL3_OUTMASK_ADDR_LO 0xB6
+
+#define DEFAULT_PLL_MASK (0x01)
+#define DEFAULT_OUTPUT_MASK_PLL0 (0xc0)
+#define DEFAULT_OUTPUT_MASK_PLL1 DEFAULT_OUTPUT_MASK_PLL0
+
+/* PTP Hardware Clock interface */
+struct idt82p33_channel {
+ struct ptp_clock_info caps;
+ struct ptp_clock *ptp_clock;
+ struct idt82p33 *idt82p33;
+ enum pll_mode pll_mode;
+ /* task to turn off SYNC_TOD bit after pps sync */
+ struct delayed_work sync_tod_work;
+ bool sync_tod_on;
+ s32 current_freq_ppb;
+ u8 output_mask;
+ u16 dpll_tod_cnfg;
+ u16 dpll_tod_trigger;
+ u16 dpll_tod_sts;
+ u16 dpll_mode_cnfg;
+ u16 dpll_freq_cnfg;
+ u16 dpll_phase_cnfg;
+ u16 dpll_sync_cnfg;
+ u16 dpll_input_mode_cnfg;
+};
+
+struct idt82p33 {
+ struct idt82p33_channel channel[MAX_PHC_PLL];
+ struct i2c_client *client;
+ u8 page_offset;
+ u8 pll_mask;
+ ktime_t start_time;
+ int calculate_overhead_flag;
+ s64 tod_write_overhead_ns;
+ /* Protects I2C read/modify/write registers from concurrent access */
+ struct mutex reg_lock;
+};
+
+/* firmware interface */
+struct idt82p33_fwrc {
+ u8 hiaddr;
+ u8 loaddr;
+ u8 value;
+ u8 reserved;
+} __packed;
+
+/**
+ * @brief Maximum absolute value for write phase offset in femtoseconds
+ */
+#define WRITE_PHASE_OFFSET_LIMIT (20000052084ll)
+
+/** @brief Phase offset resolution
+ *
+ * DPLL phase offset = 10^15 fs / ( System Clock * 2^13)
+ * = 10^15 fs / ( 1638400000 * 2^23)
+ * = 74.5058059692382 fs
+ */
+#define IDT_T0DPLL_PHASE_RESOL 74506
+
+
+#endif /* PTP_IDT82P33_H */
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index b27c46ebfc8f..c09c16be0edf 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -131,8 +131,7 @@ irqreturn_t ptp_qoriq_isr(int irq, void *priv)
struct ptp_qoriq *ptp_qoriq = priv;
struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
struct ptp_clock_event event;
- u64 ns;
- u32 ack = 0, lo, hi, mask, val, irqs;
+ u32 ack = 0, mask, val, irqs;
spin_lock(&ptp_qoriq->lock);
@@ -153,32 +152,6 @@ irqreturn_t ptp_qoriq_isr(int irq, void *priv)
extts_clean_up(ptp_qoriq, 1, true);
}
- if (irqs & ALM2) {
- ack |= ALM2;
- if (ptp_qoriq->alarm_value) {
- event.type = PTP_CLOCK_ALARM;
- event.index = 0;
- event.timestamp = ptp_qoriq->alarm_value;
- ptp_clock_event(ptp_qoriq->clock, &event);
- }
- if (ptp_qoriq->alarm_interval) {
- ns = ptp_qoriq->alarm_value + ptp_qoriq->alarm_interval;
- hi = ns >> 32;
- lo = ns & 0xffffffff;
- ptp_qoriq->write(&regs->alarm_regs->tmr_alarm2_l, lo);
- ptp_qoriq->write(&regs->alarm_regs->tmr_alarm2_h, hi);
- ptp_qoriq->alarm_value = ns;
- } else {
- spin_lock(&ptp_qoriq->lock);
- mask = ptp_qoriq->read(&regs->ctrl_regs->tmr_temask);
- mask &= ~ALM2EN;
- ptp_qoriq->write(&regs->ctrl_regs->tmr_temask, mask);
- spin_unlock(&ptp_qoriq->lock);
- ptp_qoriq->alarm_value = 0;
- ptp_qoriq->alarm_interval = 0;
- }
- }
-
if (irqs & PP1) {
ack |= PP1;
event.type = PTP_CLOCK_PPS;
diff --git a/drivers/ptp/ptp_vmw.c b/drivers/ptp/ptp_vmw.c
new file mode 100644
index 000000000000..5dca26e14bdc
--- /dev/null
+++ b/drivers/ptp/ptp_vmw.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Copyright (C) 2020 VMware, Inc., Palo Alto, CA., USA
+ *
+ * PTP clock driver for VMware precision clock virtual device.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+#include <asm/hypervisor.h>
+#include <asm/vmware.h>
+
+#define VMWARE_MAGIC 0x564D5868
+#define VMWARE_CMD_PCLK(nr) ((nr << 16) | 97)
+#define VMWARE_CMD_PCLK_GETTIME VMWARE_CMD_PCLK(0)
+
+static struct acpi_device *ptp_vmw_acpi_device;
+static struct ptp_clock *ptp_vmw_clock;
+
+
+static int ptp_vmw_pclk_read(u64 *ns)
+{
+ u32 ret, nsec_hi, nsec_lo, unused1, unused2, unused3;
+
+ asm volatile (VMWARE_HYPERCALL :
+ "=a"(ret), "=b"(nsec_hi), "=c"(nsec_lo), "=d"(unused1),
+ "=S"(unused2), "=D"(unused3) :
+ "a"(VMWARE_MAGIC), "b"(0),
+ "c"(VMWARE_CMD_PCLK_GETTIME), "d"(0) :
+ "memory");
+
+ if (ret == 0)
+ *ns = ((u64)nsec_hi << 32) | nsec_lo;
+ return ret;
+}
+
+/*
+ * PTP clock ops.
+ */
+
+static int ptp_vmw_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ptp_vmw_adjfreq(struct ptp_clock_info *info, s32 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ptp_vmw_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+{
+ u64 ns;
+
+ if (ptp_vmw_pclk_read(&ns) != 0)
+ return -EIO;
+ *ts = ns_to_timespec64(ns);
+ return 0;
+}
+
+static int ptp_vmw_settime(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ptp_vmw_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *request, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ptp_vmw_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "ptp_vmw",
+ .max_adj = 0,
+ .adjtime = ptp_vmw_adjtime,
+ .adjfreq = ptp_vmw_adjfreq,
+ .gettime64 = ptp_vmw_gettime,
+ .settime64 = ptp_vmw_settime,
+ .enable = ptp_vmw_enable,
+};
+
+/*
+ * ACPI driver ops for VMware "precision clock" virtual device.
+ */
+
+static int ptp_vmw_acpi_add(struct acpi_device *device)
+{
+ ptp_vmw_clock = ptp_clock_register(&ptp_vmw_clock_info, NULL);
+ if (IS_ERR(ptp_vmw_clock)) {
+ pr_err("failed to register ptp clock\n");
+ return PTR_ERR(ptp_vmw_clock);
+ }
+
+ ptp_vmw_acpi_device = device;
+ return 0;
+}
+
+static int ptp_vmw_acpi_remove(struct acpi_device *device)
+{
+ ptp_clock_unregister(ptp_vmw_clock);
+ return 0;
+}
+
+static const struct acpi_device_id ptp_vmw_acpi_device_ids[] = {
+ { "VMW0005", 0 },
+ { "", 0 },
+};
+
+MODULE_DEVICE_TABLE(acpi, ptp_vmw_acpi_device_ids);
+
+static struct acpi_driver ptp_vmw_acpi_driver = {
+ .name = "ptp_vmw",
+ .ids = ptp_vmw_acpi_device_ids,
+ .ops = {
+ .add = ptp_vmw_acpi_add,
+ .remove = ptp_vmw_acpi_remove
+ },
+ .owner = THIS_MODULE
+};
+
+static int __init ptp_vmw_init(void)
+{
+ if (x86_hyper_type != X86_HYPER_VMWARE)
+ return -1;
+ return acpi_bus_register_driver(&ptp_vmw_acpi_driver);
+}
+
+static void __exit ptp_vmw_exit(void)
+{
+ acpi_bus_unregister_driver(&ptp_vmw_acpi_driver);
+}
+
+module_init(ptp_vmw_init);
+module_exit(ptp_vmw_exit);
+
+MODULE_DESCRIPTION("VMware virtual PTP clock driver");
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index de3862c15fcc..ffdb5bc25d6d 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -128,6 +128,7 @@ config QCOM_Q6V5_MSS
select MFD_SYSCON
select QCOM_MDT_LOADER
select QCOM_Q6V5_COMMON
+ select QCOM_Q6V5_IPA_NOTIFY
select QCOM_RPROC_COMMON
select QCOM_SCM
help
@@ -167,6 +168,9 @@ config QCOM_Q6V5_WCSS
Say y here to support the Qualcomm Peripheral Image Loader for the
Hexagon V5 based WCSS remote processors.
+config QCOM_Q6V5_IPA_NOTIFY
+ tristate
+
config QCOM_SYSMON
tristate "Qualcomm sysmon driver"
depends on RPMSG
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index e30a1b15fbac..0effd3825035 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_QCOM_Q6V5_ADSP) += qcom_q6v5_adsp.o
obj-$(CONFIG_QCOM_Q6V5_MSS) += qcom_q6v5_mss.o
obj-$(CONFIG_QCOM_Q6V5_PAS) += qcom_q6v5_pas.o
obj-$(CONFIG_QCOM_Q6V5_WCSS) += qcom_q6v5_wcss.o
+obj-$(CONFIG_QCOM_Q6V5_IPA_NOTIFY) += qcom_q6v5_ipa_notify.o
obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o
obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
qcom_wcnss_pil-y += qcom_wcnss.o
diff --git a/drivers/remoteproc/qcom_q6v5_ipa_notify.c b/drivers/remoteproc/qcom_q6v5_ipa_notify.c
new file mode 100644
index 000000000000..e1c10a128bfd
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5_ipa_notify.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Qualcomm IPA notification subdev support
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/qcom_q6v5_ipa_notify.h>
+
+static void
+ipa_notify_common(struct rproc_subdev *subdev, enum qcom_rproc_event event)
+{
+ struct qcom_rproc_ipa_notify *ipa_notify;
+ qcom_ipa_notify_t notify;
+
+ ipa_notify = container_of(subdev, struct qcom_rproc_ipa_notify, subdev);
+ notify = ipa_notify->notify;
+ if (notify)
+ notify(ipa_notify->data, event);
+}
+
+static int ipa_notify_prepare(struct rproc_subdev *subdev)
+{
+ ipa_notify_common(subdev, MODEM_STARTING);
+
+ return 0;
+}
+
+static int ipa_notify_start(struct rproc_subdev *subdev)
+{
+ ipa_notify_common(subdev, MODEM_RUNNING);
+
+ return 0;
+}
+
+static void ipa_notify_stop(struct rproc_subdev *subdev, bool crashed)
+
+{
+ ipa_notify_common(subdev, crashed ? MODEM_CRASHED : MODEM_STOPPING);
+}
+
+static void ipa_notify_unprepare(struct rproc_subdev *subdev)
+{
+ ipa_notify_common(subdev, MODEM_OFFLINE);
+}
+
+static void ipa_notify_removing(struct rproc_subdev *subdev)
+{
+ ipa_notify_common(subdev, MODEM_REMOVING);
+}
+
+/* Register the IPA notification subdevice with the Q6V5 MSS remoteproc */
+void qcom_add_ipa_notify_subdev(struct rproc *rproc,
+ struct qcom_rproc_ipa_notify *ipa_notify)
+{
+ ipa_notify->notify = NULL;
+ ipa_notify->data = NULL;
+ ipa_notify->subdev.prepare = ipa_notify_prepare;
+ ipa_notify->subdev.start = ipa_notify_start;
+ ipa_notify->subdev.stop = ipa_notify_stop;
+ ipa_notify->subdev.unprepare = ipa_notify_unprepare;
+
+ rproc_add_subdev(rproc, &ipa_notify->subdev);
+}
+EXPORT_SYMBOL_GPL(qcom_add_ipa_notify_subdev);
+
+/* Remove the IPA notification subdevice */
+void qcom_remove_ipa_notify_subdev(struct rproc *rproc,
+ struct qcom_rproc_ipa_notify *ipa_notify)
+{
+ struct rproc_subdev *subdev = &ipa_notify->subdev;
+
+ ipa_notify_removing(subdev);
+
+ rproc_remove_subdev(rproc, subdev);
+ ipa_notify->notify = NULL; /* Make it obvious */
+}
+EXPORT_SYMBOL_GPL(qcom_remove_ipa_notify_subdev);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm IPA notification remoteproc subdev");
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index a1cc9cbe038f..f9ccce76e44b 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -22,6 +22,7 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc.h>
+#include "linux/remoteproc/qcom_q6v5_ipa_notify.h"
#include <linux/reset.h>
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/iopoll.h>
@@ -201,6 +202,7 @@ struct q6v5 {
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_subdev smd_subdev;
struct qcom_rproc_ssr ssr_subdev;
+ struct qcom_rproc_ipa_notify ipa_notify_subdev;
struct qcom_sysmon *sysmon;
bool need_mem_protection;
bool has_alt_reset;
@@ -1540,6 +1542,39 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
return 0;
}
+#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY)
+
+/* Register IPA notification function */
+int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify,
+ void *data)
+{
+ struct qcom_rproc_ipa_notify *ipa_notify;
+ struct q6v5 *qproc = rproc->priv;
+
+ if (!notify)
+ return -EINVAL;
+
+ ipa_notify = &qproc->ipa_notify_subdev;
+ if (ipa_notify->notify)
+ return -EBUSY;
+
+ ipa_notify->notify = notify;
+ ipa_notify->data = data;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_register_ipa_notify);
+
+/* Deregister IPA notification function */
+void qcom_deregister_ipa_notify(struct rproc *rproc)
+{
+ struct q6v5 *qproc = rproc->priv;
+
+ qproc->ipa_notify_subdev.notify = NULL;
+}
+EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify);
+#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
+
static int q6v5_probe(struct platform_device *pdev)
{
const struct rproc_hexagon_res *desc;
@@ -1664,6 +1699,7 @@ static int q6v5_probe(struct platform_device *pdev)
qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
+ qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
if (IS_ERR(qproc->sysmon)) {
ret = PTR_ERR(qproc->sysmon);
@@ -1677,6 +1713,7 @@ static int q6v5_probe(struct platform_device *pdev)
return 0;
detach_proxy_pds:
+ qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev);
q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
detach_active_pds:
q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
@@ -1693,6 +1730,7 @@ static int q6v5_remove(struct platform_device *pdev)
rproc_del(qproc->rproc);
qcom_remove_sysmon_subdev(qproc->sysmon);
+ qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev);
qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index ff74eb5fce50..f72f961cc78f 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -177,8 +177,8 @@ struct qdio_queue_perf_stat {
unsigned int nr_sbal_total;
};
-enum qdio_queue_irq_states {
- QDIO_QUEUE_IRQS_DISABLED,
+enum qdio_irq_poll_states {
+ QDIO_IRQ_DISABLED,
};
struct qdio_input_q {
@@ -188,10 +188,6 @@ struct qdio_input_q {
int ack_count;
/* last time of noticing incoming data */
u64 timestamp;
- /* upper-layer polling flag */
- unsigned long queue_irq_state;
- /* callback to start upper-layer polling */
- void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
};
struct qdio_output_q {
@@ -299,6 +295,9 @@ struct qdio_irq {
struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
+ void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
+ unsigned long poll_state;
+
debug_info_t *debug_area;
struct mutex setup_mutex;
struct qdio_dev_perf_stat perf_stat;
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 9c0370b27426..00244607c8c0 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -128,8 +128,8 @@ static int qstat_show(struct seq_file *m, void *v)
q->u.in.ack_start, q->u.in.ack_count);
seq_printf(m, "DSCI: %x IRQs disabled: %u\n",
*(u8 *)q->irq_ptr->dsci,
- test_bit(QDIO_QUEUE_IRQS_DISABLED,
- &q->u.in.queue_irq_state));
+ test_bit(QDIO_IRQ_DISABLED,
+ &q->irq_ptr->poll_state));
}
seq_printf(m, "SBAL states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 3475317c42e5..02ced5949287 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -950,19 +950,14 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
- for_each_input_queue(irq_ptr, q, i) {
- if (q->u.in.queue_start_poll) {
- /* skip if polling is enabled or already in work */
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
- &q->u.in.queue_irq_state)) {
- QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
- continue;
- }
- q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
- q->irq_ptr->int_parm);
- } else {
+ if (irq_ptr->irq_poll) {
+ if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
+ irq_ptr->irq_poll(irq_ptr->cdev, irq_ptr->int_parm);
+ else
+ QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
+ } else {
+ for_each_input_queue(irq_ptr, q, i)
tasklet_schedule(&q->tasklet);
- }
}
if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
@@ -1610,24 +1605,26 @@ EXPORT_SYMBOL_GPL(do_QDIO);
/**
* qdio_start_irq - process input buffers
* @cdev: associated ccw_device for the qdio subchannel
- * @nr: input queue number
*
* Return codes
* 0 - success
* 1 - irqs not started since new data is available
*/
-int qdio_start_irq(struct ccw_device *cdev, int nr)
+int qdio_start_irq(struct ccw_device *cdev)
{
struct qdio_q *q;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ unsigned int i;
if (!irq_ptr)
return -ENODEV;
- q = irq_ptr->input_qs[nr];
clear_nonshared_ind(irq_ptr);
- qdio_stop_polling(q);
- clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
+
+ for_each_input_queue(irq_ptr, q, i)
+ qdio_stop_polling(q);
+
+ clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
/*
* We need to check again to not lose initiative after
@@ -1635,13 +1632,16 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
*/
if (test_nonshared_ind(irq_ptr))
goto rescan;
- if (!qdio_inbound_q_done(q, q->first_to_check))
- goto rescan;
+
+ for_each_input_queue(irq_ptr, q, i) {
+ if (!qdio_inbound_q_done(q, q->first_to_check))
+ goto rescan;
+ }
+
return 0;
rescan:
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
- &q->u.in.queue_irq_state))
+ if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
return 0;
else
return 1;
@@ -1729,23 +1729,19 @@ EXPORT_SYMBOL(qdio_get_next_buffers);
/**
* qdio_stop_irq - disable interrupt processing for the device
* @cdev: associated ccw_device for the qdio subchannel
- * @nr: input queue number
*
* Return codes
* 0 - interrupts were already disabled
* 1 - interrupts successfully disabled
*/
-int qdio_stop_irq(struct ccw_device *cdev, int nr)
+int qdio_stop_irq(struct ccw_device *cdev)
{
- struct qdio_q *q;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
- q = irq_ptr->input_qs[nr];
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
- &q->u.in.queue_irq_state))
+ if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
return 0;
else
return 1;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index e115623b86b2..7b831bb4e229 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -224,8 +224,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1;
- q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ?
- qdio_init->queue_start_poll_array[i] : NULL;
setup_storage_lists(q, irq_ptr, input_sbal_array, i);
input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
@@ -476,6 +474,13 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
setup_queues(irq_ptr, init_data);
+ if (init_data->irq_poll) {
+ irq_ptr->irq_poll = init_data->irq_poll;
+ set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
+ } else {
+ irq_ptr->irq_poll = NULL;
+ }
+
setup_qib(irq_ptr, init_data);
qdio_setup_thinint(irq_ptr);
set_impl_params(irq_ptr, init_data->qib_param_field_format,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 7c4e4ec08a12..8f315c53de23 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -135,28 +135,24 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
has_multiple_inq_on_dsci(irq))
xchg(irq->dsci, 0);
+ if (irq->irq_poll) {
+ if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq->poll_state))
+ irq->irq_poll(irq->cdev, irq->int_parm);
+ else
+ QDIO_PERF_STAT_INC(irq, int_discarded);
+
+ return;
+ }
+
for_each_input_queue(irq, q, i) {
- if (q->u.in.queue_start_poll) {
- /* skip if polling is enabled or already in work */
- if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
- &q->u.in.queue_irq_state)) {
- QDIO_PERF_STAT_INC(irq, int_discarded);
- continue;
- }
-
- /* avoid dsci clear here, done after processing */
- q->u.in.queue_start_poll(irq->cdev, q->nr,
- irq->int_parm);
- } else {
- if (!shared_ind(irq))
- xchg(irq->dsci, 0);
-
- /*
- * Call inbound processing but not directly
- * since that could starve other thinint queues.
- */
- tasklet_schedule(&q->tasklet);
- }
+ if (!shared_ind(irq))
+ xchg(irq->dsci, 0);
+
+ /*
+ * Call inbound processing but not directly
+ * since that could starve other thinint queues.
+ */
+ tasklet_schedule(&q->tasklet);
}
}
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index ced896d1534a..3850a0f5f0bc 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -91,6 +91,23 @@ config QETH_L3
To compile as a module choose M. The module name is qeth_l3.
If unsure, choose Y.
+config QETH_OSN
+ def_bool !HAVE_MARCH_Z14_FEATURES
+ prompt "qeth OSN device support"
+ depends on QETH
+ help
+ This enables the qeth driver to support devices in OSN mode.
+ This feature will be removed in 2021.
+ If unsure, choose N.
+
+config QETH_OSX
+ def_bool !HAVE_MARCH_Z15_FEATURES
+ prompt "qeth OSX device support"
+ depends on QETH
+ help
+ This enables the qeth driver to support devices in OSX mode.
+ If unsure, choose N.
+
config CCWGROUP
tristate
default (LCS || CTCM || QETH)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 468cada49e72..acda230323d5 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -178,10 +178,6 @@ struct qeth_vnicc_info {
#define QETH_RECLAIM_WORK_TIME HZ
#define QETH_MAX_PORTNO 15
-/*IPv6 address autoconfiguration stuff*/
-#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
-#define UNIQUE_ID_NOT_BY_CARD 0x10000
-
/*****************************************************************************/
/* QDIO queue and buffer handling */
/*****************************************************************************/
@@ -189,6 +185,8 @@ struct qeth_vnicc_info {
#define QETH_IQD_MIN_TXQ 2 /* One for ucast, one for mcast. */
#define QETH_IQD_MCAST_TXQ 0
#define QETH_IQD_MIN_UCAST_TXQ 1
+
+#define QETH_RX_COPYBREAK (PAGE_SIZE >> 1)
#define QETH_IN_BUF_SIZE_DEFAULT 65536
#define QETH_IN_BUF_COUNT_DEFAULT 64
#define QETH_IN_BUF_COUNT_HSDEFAULT 128
@@ -213,15 +211,13 @@ struct qeth_vnicc_info {
#define QETH_PRIO_Q_ING_TOS 2
#define QETH_PRIO_Q_ING_SKB 3
#define QETH_PRIO_Q_ING_VLAN 4
+#define QETH_PRIO_Q_ING_FIXED 5
/* Packing */
#define QETH_LOW_WATERMARK_PACK 2
#define QETH_HIGH_WATERMARK_PACK 5
#define QETH_WATERMARK_PACK_FUZZ 1
-/* large receive scatter gather copy break */
-#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
-
struct qeth_hdr_layer3 {
__u8 id;
__u8 flags;
@@ -407,6 +403,7 @@ struct qeth_qdio_out_buffer {
struct qdio_buffer *buffer;
atomic_t state;
int next_element_to_fill;
+ unsigned int frames;
unsigned int bytes;
struct sk_buff_head skb_list;
int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
@@ -462,6 +459,8 @@ struct qeth_out_q_stats {
u64 tso_bytes;
u64 packing_mode_switch;
u64 stopped;
+ u64 doorbell;
+ u64 coal_frames;
u64 completion_yield;
u64 completion_timer;
@@ -472,6 +471,8 @@ struct qeth_out_q_stats {
u64 tx_dropped;
};
+#define QETH_TX_MAX_COALESCED_FRAMES 1
+#define QETH_TX_COALESCE_USECS 25
#define QETH_TX_TIMER_USECS 500
struct qeth_qdio_out_q {
@@ -495,9 +496,13 @@ struct qeth_qdio_out_q {
struct napi_struct napi;
struct timer_list timer;
struct qeth_hdr *prev_hdr;
+ unsigned int coalesced_frames;
u8 bulk_start;
u8 bulk_count;
u8 bulk_max;
+
+ unsigned int coalesce_usecs;
+ unsigned int max_coalesced_frames;
};
#define qeth_for_each_output_queue(card, q, i) \
@@ -506,12 +511,10 @@ struct qeth_qdio_out_q {
#define qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi)
-static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue)
+static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue,
+ unsigned long usecs)
{
- if (timer_pending(&queue->timer))
- return;
- mod_timer(&queue->timer, usecs_to_jiffies(QETH_TX_TIMER_USECS) +
- jiffies);
+ timer_reduce(&queue->timer, usecs_to_jiffies(usecs) + jiffies);
}
static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
@@ -673,22 +676,20 @@ struct qeth_card_blkt {
#define QETH_BROADCAST_WITH_ECHO 0x01
#define QETH_BROADCAST_WITHOUT_ECHO 0x02
-#define QETH_LAYER2_MAC_REGISTERED 0x02
struct qeth_card_info {
unsigned short unit_addr2;
unsigned short cula;
u8 chpid;
__u16 func_level;
char mcl_level[QETH_MCL_LENGTH + 1];
+ u8 dev_addr_is_registered:1;
u8 open_when_online:1;
u8 promisc_mode:1;
u8 use_v1_blkt:1;
u8 is_vm_nic:1;
- int mac_bits;
enum qeth_card_types type;
enum qeth_link_types link_type;
int broadcast_capable;
- int unique_id;
bool layer_enforced;
struct qeth_card_blkt blkt;
__u32 diagass_support;
@@ -709,9 +710,7 @@ struct qeth_card_options {
struct qeth_ipa_caps adp; /* Adapter parameters */
struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
struct qeth_vnicc_info vnicc; /* VNICC options */
- int fake_broadcast;
enum qeth_discipline_id layer;
- int rx_sg_cb;
enum qeth_ipa_isolation_modes isolation;
enum qeth_ipa_isolation_modes prev_isolation;
int sniffer;
@@ -754,7 +753,7 @@ enum qeth_addr_disposition {
struct qeth_rx {
int b_count;
int b_index;
- struct qdio_buffer_element *b_element;
+ u8 buf_element;
int e_offset;
int qdio_err;
};
@@ -770,6 +769,10 @@ struct qeth_switch_info {
__u32 settings;
};
+struct qeth_priv {
+ unsigned int rx_copybreak;
+};
+
#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
struct qeth_card {
@@ -845,11 +848,6 @@ struct qeth_trap_id {
/*some helper functions*/
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
-static inline bool qeth_netdev_is_registered(struct net_device *dev)
-{
- return dev->netdev_ops != NULL;
-}
-
static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
{
if (txq == QETH_IQD_MCAST_TXQ)
@@ -1051,6 +1049,7 @@ int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
void qeth_trace_features(struct qeth_card *);
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
+int qeth_setup_netdev(struct qeth_card *card);
int qeth_set_features(struct net_device *, netdev_features_t);
void qeth_enable_hw_features(struct net_device *dev);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
@@ -1058,6 +1057,7 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
+int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count);
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
u8 cast_type, struct net_device *sb_dev);
int qeth_open(struct net_device *dev);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 6d3f2f14b414..24fd17b347fe 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -244,7 +244,7 @@ static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
return NULL;
for (i = 0; i < pages; i++) {
- entry->elements[i] = alloc_page(GFP_KERNEL);
+ entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
if (!entry->elements[i]) {
qeth_free_pool_entry(entry);
@@ -538,23 +538,16 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
for (i = 0;
i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
i++) {
- if (aob->sba[i] && buffer->is_header[i])
- kmem_cache_free(qeth_core_header_cache,
- (void *) aob->sba[i]);
+ void *data = phys_to_virt(aob->sba[i]);
+
+ if (data && buffer->is_header[i])
+ kmem_cache_free(qeth_core_header_cache, data);
}
atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
qdio_release_aob(aob);
}
-static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
-{
- return card->options.cq == QETH_CQ_ENABLED &&
- card->qdio.c_q != NULL &&
- queue != 0 &&
- queue == card->qdio.no_in_queues - 1;
-}
-
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
void *data)
{
@@ -813,7 +806,7 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
/* fall through */
default:
qeth_clear_ipacmd_list(card);
- goto out;
+ goto err_idx;
}
cmd = __ipa_reply(iob);
@@ -866,8 +859,9 @@ out:
memcpy(&card->seqno.pdu_hdr_ack,
QETH_PDU_HEADER_SEQ_NO(iob->data),
QETH_SEQ_NO_LENGTH);
- qeth_put_cmd(iob);
__qeth_issue_next_read(card);
+err_idx:
+ qeth_put_cmd(iob);
}
static int qeth_set_thread_start_bit(struct qeth_card *card,
@@ -1161,17 +1155,20 @@ static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
QETH_TXQ_STAT_INC(queue, bufs);
QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
+ if (error) {
+ QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
+ } else {
+ QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
+ QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
+ }
+
while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
unsigned int bytes = qdisc_pkt_len(skb);
bool is_tso = skb_is_gso(skb);
unsigned int packets;
packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
- if (error) {
- QETH_TXQ_STAT_ADD(queue, tx_errors, packets);
- } else {
- QETH_TXQ_STAT_ADD(queue, tx_packets, packets);
- QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes);
+ if (!error) {
if (skb->ip_summed == CHECKSUM_PARTIAL)
QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
if (skb_is_nonlinear(skb))
@@ -1208,6 +1205,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
buf->next_element_to_fill = 0;
+ buf->frames = 0;
buf->bytes = 0;
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
}
@@ -1243,9 +1241,12 @@ EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{
- unsigned int count = single ? 1 : card->dev->num_tx_queues;
+ unsigned int max = single ? 1 : card->dev->num_tx_queues;
+ unsigned int count;
int rc;
+ count = IS_VM_NIC(card) ? min(max, card->dev->real_num_tx_queues) : max;
+
rtnl_lock();
rc = netif_set_real_num_tx_queues(card->dev, count);
rtnl_unlock();
@@ -1253,16 +1254,16 @@ static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
if (rc)
return rc;
- if (card->qdio.no_out_queues == count)
+ if (card->qdio.no_out_queues == max)
return 0;
if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
qeth_free_qdio_queues(card);
- if (count == 1)
+ if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
- card->qdio.no_out_queues = count;
+ card->qdio.no_out_queues = max;
return 0;
}
@@ -1314,7 +1315,6 @@ static void qeth_set_initial_options(struct qeth_card *card)
{
card->options.route4.type = NO_ROUTER;
card->options.route6.type = NO_ROUTER;
- card->options.rx_sg_cb = QETH_RX_SG_CB;
card->options.isolation = ISOLATION_MODE_NONE;
card->options.cq = QETH_CQ_DISABLED;
card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
@@ -1682,17 +1682,16 @@ static void qeth_set_blkt_defaults(struct qeth_card *card)
}
}
-static void qeth_init_tokens(struct qeth_card *card)
+static void qeth_idx_init(struct qeth_card *card)
{
+ memset(&card->seqno, 0, sizeof(card->seqno));
+
card->token.issuer_rm_w = 0x00010103UL;
card->token.cm_filter_w = 0x00010108UL;
card->token.cm_connection_w = 0x0001010aUL;
card->token.ulp_filter_w = 0x0001010bUL;
card->token.ulp_connection_w = 0x0001010dUL;
-}
-static void qeth_init_func_level(struct qeth_card *card)
-{
switch (card->info.type) {
case QETH_CARD_TYPE_IQD:
card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
@@ -2405,6 +2404,8 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
queue->card = card;
queue->queue_no = i;
timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
+ queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
+ queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
/* give outbound qeth_qdio_buffers their qdio_buffers */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
@@ -2628,15 +2629,13 @@ static void qeth_initialize_working_pool_list(struct qeth_card *card)
static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
struct qeth_card *card)
{
- struct list_head *plh;
struct qeth_buffer_pool_entry *entry;
int i, free;
if (list_empty(&card->qdio.in_buf_pool.entry_list))
return NULL;
- list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
- entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
+ list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
free = 1;
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
if (page_count(entry->elements[i]) > 1) {
@@ -2651,11 +2650,11 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
}
/* no free buffer in pool so take first one and swap pages */
- entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
- struct qeth_buffer_pool_entry, list);
+ entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
+ struct qeth_buffer_pool_entry, list);
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
if (page_count(entry->elements[i]) > 1) {
- struct page *page = alloc_page(GFP_ATOMIC);
+ struct page *page = dev_alloc_page();
if (!page)
return NULL;
@@ -2763,6 +2762,7 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
queue->next_buf_to_fill = 0;
queue->do_pack = 0;
queue->prev_hdr = NULL;
+ queue->coalesced_frames = 0;
queue->bulk_start = 0;
queue->bulk_count = 0;
queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
@@ -3353,16 +3353,21 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
for (i = index; i < index + count; ++i) {
unsigned int bidx = QDIO_BUFNR(i);
+ struct sk_buff *skb;
buf = queue->bufs[bidx];
buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
SBAL_EFLAGS_LAST_ENTRY;
+ queue->coalesced_frames += buf->frames;
if (queue->bufstates)
queue->bufstates[bidx].user = buf;
- if (IS_IQD(queue->card))
+ if (IS_IQD(card)) {
+ skb_queue_walk(&buf->skb_list, skb)
+ skb_tx_timestamp(skb);
continue;
+ }
if (!queue->do_pack) {
if ((atomic_read(&queue->used_buffers) >=
@@ -3390,6 +3395,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
}
}
+ QETH_TXQ_STAT_INC(queue, doorbell);
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT;
@@ -3397,8 +3403,18 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
queue->queue_no, index, count);
/* Fake the TX completion interrupt: */
- if (IS_IQD(card))
- napi_schedule(&queue->napi);
+ if (IS_IQD(card)) {
+ unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
+ unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
+
+ if (frames && queue->coalesced_frames >= frames) {
+ napi_schedule(&queue->napi);
+ queue->coalesced_frames = 0;
+ QETH_TXQ_STAT_INC(queue, coal_frames);
+ } else if (usecs) {
+ qeth_tx_arm_timer(queue, usecs);
+ }
+ }
if (rc) {
/* ignore temporary SIGA errors without busy condition */
@@ -3462,13 +3478,11 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
}
}
-static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
- unsigned long card_ptr)
+static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *)card_ptr;
- if (card->dev->flags & IFF_UP)
- napi_schedule_irqoff(&card->napi);
+ napi_schedule_irqoff(&card->napi);
}
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
@@ -3502,9 +3516,6 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
int i;
int rc;
- if (!qeth_is_cq(card, queue))
- return;
-
QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
@@ -3550,9 +3561,7 @@ static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
- if (qeth_is_cq(card, queue))
- qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
- else if (qdio_err)
+ if (qdio_err)
qeth_schedule_recovery(card);
}
@@ -3635,6 +3644,8 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
return ~ntohs(veth->h_vlan_TCI) >>
(VLAN_PRIO_SHIFT + 1) & 3;
break;
+ case QETH_PRIO_Q_ING_FIXED:
+ return card->qdio.default_out_queue;
default:
break;
}
@@ -3707,6 +3718,7 @@ static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
unsigned int hdr_len, unsigned int proto_len,
unsigned int *elements)
{
+ gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
const unsigned int contiguous = proto_len ? proto_len : 1;
const unsigned int max_elements = queue->max_elements;
unsigned int __elements;
@@ -3762,10 +3774,11 @@ check_layout:
*hdr = skb_push(skb, hdr_len);
return hdr_len;
}
- /* fall back */
+
+ /* Fall back to cache element with known-good alignment: */
if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
return -E2BIG;
- *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
+ *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
if (!*hdr)
return -ENOMEM;
/* Copy protocol headers behind HW header: */
@@ -3948,6 +3961,7 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
buffer->bytes += bytes;
+ buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
queue->prev_hdr = hdr;
flush = __netdev_tx_sent_queue(txq, bytes,
@@ -4038,6 +4052,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
}
next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
+ buffer->bytes += qdisc_pkt_len(skb);
+ buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
if (queue->do_pack)
QETH_TXQ_STAT_INC(queue, skbs_pack);
@@ -4797,10 +4813,7 @@ out:
}
static void qeth_qdio_establish_cq(struct qeth_card *card,
- struct qdio_buffer **in_sbal_ptrs,
- void (**queue_start_poll)
- (struct ccw_device *, int,
- unsigned long))
+ struct qdio_buffer **in_sbal_ptrs)
{
int i;
@@ -4811,8 +4824,6 @@ static void qeth_qdio_establish_cq(struct qeth_card *card,
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
in_sbal_ptrs[offset + i] =
card->qdio.c_q->bufs[i].buffer;
-
- queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
}
}
@@ -4821,7 +4832,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
struct qdio_initialize init_data;
char *qib_param_field;
struct qdio_buffer **in_sbal_ptrs;
- void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
struct qdio_buffer **out_sbal_ptrs;
int i, j, k;
int rc = 0;
@@ -4848,16 +4858,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
in_sbal_ptrs[i] = card->qdio.in_q->bufs[i].buffer;
- queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
- GFP_KERNEL);
- if (!queue_start_poll) {
- rc = -ENOMEM;
- goto out_free_in_sbals;
- }
- for (i = 0; i < card->qdio.no_in_queues; ++i)
- queue_start_poll[i] = qeth_qdio_start_poll;
-
- qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
+ qeth_qdio_establish_cq(card, in_sbal_ptrs);
out_sbal_ptrs =
kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
@@ -4865,7 +4866,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
GFP_KERNEL);
if (!out_sbal_ptrs) {
rc = -ENOMEM;
- goto out_free_queue_start_poll;
+ goto out_free_in_sbals;
}
for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
@@ -4883,7 +4884,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.no_output_qs = card->qdio.no_out_queues;
init_data.input_handler = qeth_qdio_input_handler;
init_data.output_handler = qeth_qdio_output_handler;
- init_data.queue_start_poll_array = queue_start_poll;
+ init_data.irq_poll = qeth_qdio_poll;
init_data.int_parm = (unsigned long) card;
init_data.input_sbal_addr_array = in_sbal_ptrs;
init_data.output_sbal_addr_array = out_sbal_ptrs;
@@ -4916,8 +4917,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
}
out:
kfree(out_sbal_ptrs);
-out_free_queue_start_poll:
- kfree(queue_start_poll);
out_free_in_sbals:
kfree(in_sbal_ptrs);
out_free_qib_param:
@@ -4952,12 +4951,16 @@ static struct ccw_device_id qeth_ids[] = {
.driver_info = QETH_CARD_TYPE_OSD},
{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
.driver_info = QETH_CARD_TYPE_IQD},
+#ifdef CONFIG_QETH_OSN
{CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
.driver_info = QETH_CARD_TYPE_OSN},
+#endif
{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
.driver_info = QETH_CARD_TYPE_OSM},
+#ifdef CONFIG_QETH_OSX
{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
.driver_info = QETH_CARD_TYPE_OSX},
+#endif
{},
};
MODULE_DEVICE_TABLE(ccw, qeth_ids);
@@ -5012,9 +5015,9 @@ retriable:
else
goto retry;
}
+
qeth_determine_capabilities(card);
- qeth_init_tokens(card);
- qeth_init_func_level(card);
+ qeth_idx_init(card);
rc = qeth_idx_activate_read_channel(card);
if (rc == -EINTR) {
@@ -5324,13 +5327,13 @@ static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
}
static int qeth_extract_skb(struct qeth_card *card,
- struct qeth_qdio_buffer *qethbuffer,
- struct qdio_buffer_element **__element,
+ struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
int *__offset)
{
- struct qdio_buffer_element *element = *__element;
+ struct qeth_priv *priv = netdev_priv(card->dev);
struct qdio_buffer *buffer = qethbuffer->buffer;
struct napi_struct *napi = &card->napi;
+ struct qdio_buffer_element *element;
unsigned int linear_len = 0;
bool uses_frags = false;
int offset = *__offset;
@@ -5340,6 +5343,8 @@ static int qeth_extract_skb(struct qeth_card *card,
struct sk_buff *skb;
int skb_len = 0;
+ element = &buffer->element[*element_no];
+
next_packet:
/* qeth_hdr must not cross element boundaries */
while (element->length < offset + sizeof(struct qeth_hdr)) {
@@ -5404,7 +5409,7 @@ next_packet:
}
use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
- (skb_len > card->options.rx_sg_cb &&
+ (skb_len > READ_ONCE(priv->rx_copybreak) &&
!atomic_read(&card->force_alloc_skb) &&
!IS_OSN(card));
@@ -5495,22 +5500,20 @@ walk_packet:
if (!skb)
goto next_packet;
- *__element = element;
+ *element_no = element - &buffer->element[0];
*__offset = offset;
qeth_receive_skb(card, skb, hdr, uses_frags);
return 0;
}
-static int qeth_extract_skbs(struct qeth_card *card, int budget,
- struct qeth_qdio_buffer *buf, bool *done)
+static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
+ struct qeth_qdio_buffer *buf, bool *done)
{
- int work_done = 0;
-
- *done = false;
+ unsigned int work_done = 0;
while (budget) {
- if (qeth_extract_skb(card, buf, &card->rx.b_element,
+ if (qeth_extract_skb(card, buf, &card->rx.buf_element,
&card->rx.e_offset)) {
*done = true;
break;
@@ -5523,15 +5526,16 @@ static int qeth_extract_skbs(struct qeth_card *card, int budget,
return work_done;
}
-int qeth_poll(struct napi_struct *napi, int budget)
+static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
{
- struct qeth_card *card = container_of(napi, struct qeth_card, napi);
- int work_done = 0;
- struct qeth_qdio_buffer *buffer;
- int new_budget = budget;
- bool done;
+ unsigned int work_done = 0;
- while (1) {
+ while (budget > 0) {
+ struct qeth_qdio_buffer *buffer;
+ unsigned int skbs_done = 0;
+ bool done = false;
+
+ /* Fetch completed RX buffers: */
if (!card->rx.b_count) {
card->rx.qdio_err = 0;
card->rx.b_count = qdio_get_next_buffers(
@@ -5541,50 +5545,73 @@ int qeth_poll(struct napi_struct *napi, int budget)
card->rx.b_count = 0;
break;
}
- card->rx.b_element =
- &card->qdio.in_q->bufs[card->rx.b_index]
- .buffer->element[0];
- card->rx.e_offset = 0;
}
- while (card->rx.b_count) {
- buffer = &card->qdio.in_q->bufs[card->rx.b_index];
- if (!(card->rx.qdio_err &&
- qeth_check_qdio_errors(card, buffer->buffer,
- card->rx.qdio_err, "qinerr")))
- work_done += qeth_extract_skbs(card, new_budget,
- buffer, &done);
- else
- done = true;
-
- if (done) {
- QETH_CARD_STAT_INC(card, rx_bufs);
- qeth_put_buffer_pool_entry(card,
- buffer->pool_entry);
- qeth_queue_input_buffer(card, card->rx.b_index);
- card->rx.b_count--;
- if (card->rx.b_count) {
- card->rx.b_index =
- QDIO_BUFNR(card->rx.b_index + 1);
- card->rx.b_element =
- &card->qdio.in_q
- ->bufs[card->rx.b_index]
- .buffer->element[0];
- card->rx.e_offset = 0;
- }
- }
+ /* Process one completed RX buffer: */
+ buffer = &card->qdio.in_q->bufs[card->rx.b_index];
+ if (!(card->rx.qdio_err &&
+ qeth_check_qdio_errors(card, buffer->buffer,
+ card->rx.qdio_err, "qinerr")))
+ skbs_done = qeth_extract_skbs(card, budget, buffer,
+ &done);
+ else
+ done = true;
- if (work_done >= budget)
- goto out;
- else
- new_budget = budget - work_done;
+ work_done += skbs_done;
+ budget -= skbs_done;
+
+ if (done) {
+ QETH_CARD_STAT_INC(card, rx_bufs);
+ qeth_put_buffer_pool_entry(card, buffer->pool_entry);
+ qeth_queue_input_buffer(card, card->rx.b_index);
+ card->rx.b_count--;
+
+ /* Step forward to next buffer: */
+ card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
+ card->rx.buf_element = 0;
+ card->rx.e_offset = 0;
}
}
+ return work_done;
+}
+
+static void qeth_cq_poll(struct qeth_card *card)
+{
+ unsigned int work_done = 0;
+
+ while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
+ unsigned int start, error;
+ int completed;
+
+ completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
+ &error);
+ if (completed <= 0)
+ return;
+
+ qeth_qdio_cq_handler(card, error, 1, start, completed);
+ work_done += completed;
+ }
+}
+
+int qeth_poll(struct napi_struct *napi, int budget)
+{
+ struct qeth_card *card = container_of(napi, struct qeth_card, napi);
+ unsigned int work_done;
+
+ work_done = qeth_rx_poll(card, budget);
+
+ if (card->options.cq == QETH_CQ_ENABLED)
+ qeth_cq_poll(card);
+
+ /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
+ if (budget && work_done >= budget)
+ return work_done;
+
if (napi_complete_done(napi, work_done) &&
- qdio_start_irq(CARD_DDEV(card), 0))
+ qdio_start_irq(CARD_DDEV(card)))
napi_schedule(napi);
-out:
+
return work_done;
}
EXPORT_SYMBOL_GPL(qeth_poll);
@@ -5658,7 +5685,7 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
if (completed <= 0) {
/* Ensure we see TX completion for pending work: */
if (napi_complete_done(napi, 0))
- qeth_tx_arm_timer(queue);
+ qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS);
return 0;
}
@@ -5667,7 +5694,7 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
unsigned int bidx = QDIO_BUFNR(i);
buffer = queue->bufs[bidx];
- packets += skb_queue_len(&buffer->skb_list);
+ packets += buffer->frames;
bytes += buffer->bytes;
qeth_handle_send_error(card, buffer, error);
@@ -5953,25 +5980,30 @@ static void qeth_clear_dbf_list(void)
static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
{
struct net_device *dev;
+ struct qeth_priv *priv;
switch (card->info.type) {
case QETH_CARD_TYPE_IQD:
- dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN,
+ dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
ether_setup, QETH_MAX_QUEUES, 1);
break;
case QETH_CARD_TYPE_OSM:
- dev = alloc_etherdev(0);
+ dev = alloc_etherdev(sizeof(*priv));
break;
case QETH_CARD_TYPE_OSN:
- dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
+ dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN,
+ ether_setup);
break;
default:
- dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1);
+ dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_QUEUES, 1);
}
if (!dev)
return NULL;
+ priv = netdev_priv(dev);
+ priv->rx_copybreak = QETH_RX_COPYBREAK;
+
dev->ml_priv = card;
dev->watchdog_timeo = QETH_TX_TIMEOUT;
dev->min_mtu = IS_OSN(card) ? 64 : 576;
@@ -5981,22 +6013,8 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
SET_NETDEV_DEV(dev, &card->gdev->dev);
netif_carrier_off(dev);
- if (IS_OSN(card)) {
- dev->ethtool_ops = &qeth_osn_ethtool_ops;
- } else {
- dev->ethtool_ops = &qeth_ethtool_ops;
- dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- dev->hw_features |= NETIF_F_SG;
- dev->vlan_features |= NETIF_F_SG;
- if (IS_IQD(card)) {
- dev->features |= NETIF_F_SG;
- if (netif_set_real_num_tx_queues(dev,
- QETH_IQD_MIN_TXQ)) {
- free_netdev(dev);
- return NULL;
- }
- }
- }
+ dev->ethtool_ops = IS_OSN(card) ? &qeth_osn_ethtool_ops :
+ &qeth_ethtool_ops;
return dev;
}
@@ -6012,6 +6030,28 @@ struct net_device *qeth_clone_netdev(struct net_device *orig)
return clone;
}
+int qeth_setup_netdev(struct qeth_card *card)
+{
+ struct net_device *dev = card->dev;
+ unsigned int num_tx_queues;
+
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->hw_features |= NETIF_F_SG;
+ dev->vlan_features |= NETIF_F_SG;
+
+ if (IS_IQD(card)) {
+ dev->features |= NETIF_F_SG;
+ num_tx_queues = QETH_IQD_MIN_TXQ;
+ } else if (IS_VM_NIC(card)) {
+ num_tx_queues = 1;
+ } else {
+ num_tx_queues = dev->real_num_tx_queues;
+ }
+
+ return qeth_set_real_num_tx_queues(card, num_tx_queues);
+}
+EXPORT_SYMBOL_GPL(qeth_setup_netdev);
+
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card;
@@ -6051,12 +6091,13 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
goto err_card;
}
+ qeth_determine_capabilities(card);
+ qeth_set_blkt_defaults(card);
+
card->qdio.no_out_queues = card->dev->num_tx_queues;
rc = qeth_update_from_chp_desc(card);
if (rc)
goto err_chp_desc;
- qeth_determine_capabilities(card);
- qeth_set_blkt_defaults(card);
enforced_disc = qeth_enforce_discipline(card);
switch (enforced_disc) {
@@ -6241,9 +6282,6 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
struct mii_ioctl_data *mii_data;
int rc = 0;
- if (!card)
- return -ENODEV;
-
switch (cmd) {
case SIOC_QETH_ADP_SET_SNMP_CONTROL:
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
@@ -6623,12 +6661,59 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
}
EXPORT_SYMBOL_GPL(qeth_get_stats64);
+#define TC_IQD_UCAST 0
+static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
+ unsigned int ucast_txqs)
+{
+ unsigned int prio;
+
+ /* IQD requires mcast traffic to be placed on a dedicated queue, and
+ * qeth_iqd_select_queue() deals with this.
+ * For unicast traffic, we defer the queue selection to the stack.
+ * By installing a trivial prio map that spans over only the unicast
+ * queues, we can encourage the stack to spread the ucast traffic evenly
+ * without selecting the mcast queue.
+ */
+
+ /* One traffic class, spanning over all active ucast queues: */
+ netdev_set_num_tc(dev, 1);
+ netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
+ QETH_IQD_MIN_UCAST_TXQ);
+
+ /* Map all priorities to this traffic class: */
+ for (prio = 0; prio <= TC_BITMASK; prio++)
+ netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
+}
+
+int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
+{
+ struct net_device *dev = card->dev;
+ int rc;
+
+ /* Per netif_setup_tc(), adjust the mapping first: */
+ if (IS_IQD(card))
+ qeth_iqd_set_prio_tc_map(dev, count - 1);
+
+ rc = netif_set_real_num_tx_queues(dev, count);
+
+ if (rc && IS_IQD(card))
+ qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
+
+ return rc;
+}
+
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
u8 cast_type, struct net_device *sb_dev)
{
+ u16 txq;
+
if (cast_type != RTN_UNICAST)
return QETH_IQD_MCAST_TXQ;
- return QETH_IQD_MIN_UCAST_TXQ;
+ if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
+ return QETH_IQD_MIN_UCAST_TXQ;
+
+ txq = netdev_pick_tx(dev, skb, sb_dev);
+ return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
}
EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
@@ -6638,9 +6723,6 @@ int qeth_open(struct net_device *dev)
QETH_CARD_TEXT(card, 4, "qethopen");
- if (qdio_stop_irq(CARD_DDEV(card), 0) < 0)
- return -EIO;
-
card->data.state = CH_STATE_UP;
netif_tx_start_all_queues(dev);
@@ -6690,6 +6772,8 @@ int qeth_stop(struct net_device *dev)
}
napi_disable(&card->napi);
+ qdio_stop_irq(CARD_DDEV(card));
+
return 0;
}
EXPORT_SYMBOL_GPL(qeth_stop);
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 3865f7258449..d89a04bfd8b0 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -74,8 +74,19 @@ enum qeth_card_types {
#define IS_IQD(card) ((card)->info.type == QETH_CARD_TYPE_IQD)
#define IS_OSD(card) ((card)->info.type == QETH_CARD_TYPE_OSD)
#define IS_OSM(card) ((card)->info.type == QETH_CARD_TYPE_OSM)
+
+#ifdef CONFIG_QETH_OSN
#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN)
+#else
+#define IS_OSN(card) false
+#endif
+
+#ifdef CONFIG_QETH_OSX
#define IS_OSX(card) ((card)->info.type == QETH_CARD_TYPE_OSX)
+#else
+#define IS_OSX(card) false
+#endif
+
#define IS_VM_NIC(card) ((card)->info.is_vm_nic)
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
@@ -93,10 +104,6 @@ enum qeth_link_types {
QETH_LINK_TYPE_LANE = 0x88,
};
-/*
- * Routing stuff
- */
-#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */
enum qeth_routing_types {
/* TODO: set to bit flag used in IPA Command */
NO_ROUTER = 0,
@@ -427,7 +434,6 @@ struct qeth_ipacmd_setassparms {
struct qeth_arp_cache_entry arp_entry;
struct qeth_arp_query_data query_arp;
struct qeth_tso_start_data tso;
- __u8 ip[16];
} data;
} __attribute__ ((packed));
@@ -550,8 +556,9 @@ struct qeth_ipacmd_setadpparms {
/* CREATE_ADDR IPA Command: ***********************************************/
struct qeth_create_destroy_address {
- __u8 unique_id[8];
-} __attribute__ ((packed));
+ u8 mac_addr[ETH_ALEN];
+ u16 uid;
+};
/* SET DIAGNOSTIC ASSIST IPA Command: *************************************/
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 78cae61bc924..d7e429f6631e 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -176,7 +176,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
int rc = 0;
- if (IS_IQD(card))
+ if (IS_IQD(card) || IS_VM_NIC(card))
return -EOPNOTSUPP;
mutex_lock(&card->conf_mutex);
@@ -211,16 +211,16 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else if (sysfs_streq(buf, "no_prio_queueing:0")) {
- card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+ card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
card->qdio.default_out_queue = 0;
} else if (sysfs_streq(buf, "no_prio_queueing:1")) {
- card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+ card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
card->qdio.default_out_queue = 1;
} else if (sysfs_streq(buf, "no_prio_queueing:2")) {
- card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+ card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
card->qdio.default_out_queue = 2;
} else if (sysfs_streq(buf, "no_prio_queueing:3")) {
- card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
+ card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED;
card->qdio.default_out_queue = 3;
} else if (sysfs_streq(buf, "no_prio_queueing")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index ab59bc975719..ebdc03210608 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -39,6 +39,8 @@ static const struct qeth_stats txq_stats[] = {
QETH_TXQ_STAT("TSO bytes", tso_bytes),
QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
QETH_TXQ_STAT("Queue stopped", stopped),
+ QETH_TXQ_STAT("Doorbell", doorbell),
+ QETH_TXQ_STAT("IRQ for frames", coal_frames),
QETH_TXQ_STAT("Completion yield", completion_yield),
QETH_TXQ_STAT("Completion timer", completion_timer),
};
@@ -108,6 +110,38 @@ static void qeth_get_ethtool_stats(struct net_device *dev,
txq_stats, TXQ_STATS_LEN);
}
+static void __qeth_set_coalesce(struct net_device *dev,
+ struct qeth_qdio_out_q *queue,
+ struct ethtool_coalesce *coal)
+{
+ WRITE_ONCE(queue->coalesce_usecs, coal->tx_coalesce_usecs);
+ WRITE_ONCE(queue->max_coalesced_frames, coal->tx_max_coalesced_frames);
+
+ if (coal->tx_coalesce_usecs &&
+ netif_running(dev) &&
+ !qeth_out_queue_is_empty(queue))
+ qeth_tx_arm_timer(queue, coal->tx_coalesce_usecs);
+}
+
+static int qeth_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct qeth_card *card = dev->ml_priv;
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
+
+ if (!IS_IQD(card))
+ return -EOPNOTSUPP;
+
+ if (!coal->tx_coalesce_usecs && !coal->tx_max_coalesced_frames)
+ return -EINVAL;
+
+ qeth_for_each_output_queue(card, queue, i)
+ __qeth_set_coalesce(dev, queue, coal);
+
+ return 0;
+}
+
static void qeth_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
@@ -153,7 +187,6 @@ static void qeth_get_drvinfo(struct net_device *dev,
strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
sizeof(info->driver));
- strlcpy(info->version, "1.0", sizeof(info->version));
strlcpy(info->fw_version, card->info.mcl_level,
sizeof(info->fw_version));
snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
@@ -175,6 +208,112 @@ static void qeth_get_channels(struct net_device *dev,
channels->combined_count = 0;
}
+static int qeth_set_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ if (channels->rx_count == 0 || channels->tx_count == 0)
+ return -EINVAL;
+ if (channels->tx_count > card->qdio.no_out_queues)
+ return -EINVAL;
+
+ if (IS_IQD(card)) {
+ if (channels->tx_count < QETH_IQD_MIN_TXQ)
+ return -EINVAL;
+
+ /* Reject downgrade while running. It could push displaced
+ * ucast flows onto txq0, which is reserved for mcast.
+ */
+ if (netif_running(dev) &&
+ channels->tx_count < dev->real_num_tx_queues)
+ return -EPERM;
+ } else {
+ /* OSA still uses the legacy prio-queue mechanism: */
+ if (!IS_VM_NIC(card))
+ return -EOPNOTSUPP;
+ }
+
+ return qeth_set_real_num_tx_queues(card, channels->tx_count);
+}
+
+static int qeth_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ if (!IS_IQD(card))
+ return -EOPNOTSUPP;
+
+ return ethtool_op_get_ts_info(dev, info);
+}
+
+static int qeth_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct qeth_priv *priv = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = priv->rx_copybreak;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qeth_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct qeth_priv *priv = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ WRITE_ONCE(priv->rx_copybreak, *(u32 *)data);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qeth_get_per_queue_coalesce(struct net_device *dev, u32 __queue,
+ struct ethtool_coalesce *coal)
+{
+ struct qeth_card *card = dev->ml_priv;
+ struct qeth_qdio_out_q *queue;
+
+ if (!IS_IQD(card))
+ return -EOPNOTSUPP;
+
+ if (__queue >= card->qdio.no_out_queues)
+ return -EINVAL;
+
+ queue = card->qdio.out_qs[__queue];
+
+ coal->tx_coalesce_usecs = queue->coalesce_usecs;
+ coal->tx_max_coalesced_frames = queue->max_coalesced_frames;
+ return 0;
+}
+
+static int qeth_set_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ if (!IS_IQD(card))
+ return -EOPNOTSUPP;
+
+ if (queue >= card->qdio.no_out_queues)
+ return -EINVAL;
+
+ if (!coal->tx_coalesce_usecs && !coal->tx_max_coalesced_frames)
+ return -EINVAL;
+
+ __qeth_set_coalesce(dev, card->qdio.out_qs[queue], coal);
+ return 0;
+}
+
/* Helper function to fill 'advertising' and 'supported' which are the same. */
/* Autoneg and full-duplex are supported and advertised unconditionally. */
/* Always advertise and support all speeds up to specified, and only one */
@@ -374,13 +513,22 @@ static int qeth_get_link_ksettings(struct net_device *netdev,
}
const struct ethtool_ops qeth_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES,
.get_link = ethtool_op_get_link,
+ .set_coalesce = qeth_set_coalesce,
.get_ringparam = qeth_get_ringparam,
.get_strings = qeth_get_strings,
.get_ethtool_stats = qeth_get_ethtool_stats,
.get_sset_count = qeth_get_sset_count,
.get_drvinfo = qeth_get_drvinfo,
.get_channels = qeth_get_channels,
+ .set_channels = qeth_set_channels,
+ .get_ts_info = qeth_get_ts_info,
+ .get_tunable = qeth_get_tunable,
+ .set_tunable = qeth_set_tunable,
+ .get_per_queue_coalesce = qeth_get_per_queue_coalesce,
+ .set_per_queue_coalesce = qeth_set_per_queue_coalesce,
.get_link_ksettings = qeth_get_link_ksettings,
};
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8fb29371788b..0bd5b09e7a22 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -52,11 +52,11 @@ static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode)
break;
case IPA_RC_L2_DUP_MAC:
case IPA_RC_L2_DUP_LAYER3_MAC:
- rc = -EEXIST;
+ rc = -EADDRINUSE;
break;
case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
- rc = -EPERM;
+ rc = -EADDRNOTAVAIL;
break;
case IPA_RC_L2_MAC_NOT_FOUND:
rc = -ENOENT;
@@ -105,11 +105,11 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
"MAC address %pM successfully registered\n", mac);
} else {
switch (rc) {
- case -EEXIST:
+ case -EADDRINUSE:
dev_warn(&card->gdev->dev,
"MAC address %pM already exists\n", mac);
break;
- case -EPERM:
+ case -EADDRNOTAVAIL:
dev_warn(&card->gdev->dev,
"MAC address %pM is not authorized\n", mac);
break;
@@ -126,7 +126,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
QETH_CARD_TEXT(card, 2, "L2Wmac");
rc = qeth_l2_send_setdelmac(card, mac, cmd);
- if (rc == -EEXIST)
+ if (rc == -EADDRINUSE)
QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n",
CARD_DEVID(card));
else if (rc)
@@ -291,7 +291,6 @@ static void qeth_l2_stop_card(struct qeth_card *card)
qeth_qdio_clear_card(card, 0);
qeth_clear_working_pool_list(card);
flush_workqueue(card->event_wq);
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
card->info.promisc_mode = 0;
}
@@ -337,14 +336,16 @@ static void qeth_l2_register_dev_addr(struct qeth_card *card)
qeth_l2_request_initial_mac(card);
if (!IS_OSN(card) && !qeth_l2_send_setmac(card, card->dev->dev_addr))
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+ card->info.dev_addr_is_registered = 1;
+ else
+ card->info.dev_addr_is_registered = 0;
}
static int qeth_l2_validate_addr(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
+ if (card->info.dev_addr_is_registered)
return eth_validate_addr(dev);
QETH_CARD_TEXT(card, 4, "nomacadr");
@@ -370,7 +371,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
/* don't register the same address twice */
if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
- (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
+ card->info.dev_addr_is_registered)
return 0;
/* add the new address, switch over, drop the old */
@@ -380,9 +381,9 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
ether_addr_copy(old_addr, dev->dev_addr);
ether_addr_copy(dev->dev_addr, addr->sa_data);
- if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
+ if (card->info.dev_addr_is_registered)
qeth_l2_remove_mac(card, old_addr);
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+ card->info.dev_addr_is_registered = 1;
return 0;
}
@@ -499,6 +500,7 @@ static void qeth_l2_rx_mode_work(struct work_struct *work)
static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue)
{
+ gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
struct qeth_hdr *hdr = (struct qeth_hdr *)skb->data;
addr_t end = (addr_t)(skb->data + sizeof(*hdr));
addr_t start = (addr_t)skb->data;
@@ -511,7 +513,7 @@ static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
if (qeth_get_elements_for_range(start, end) > 1) {
/* Misaligned HW header, move it to its own buffer element. */
- hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
+ hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
if (!hdr)
return -ENOMEM;
hd_len = sizeof(*hdr);
@@ -570,7 +572,9 @@ static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb,
return qeth_iqd_select_queue(dev, skb,
qeth_get_ether_cast_type(skb),
sb_dev);
- return qeth_get_priority_queue(card, skb);
+
+ return IS_VM_NIC(card) ? netdev_pick_tx(dev, skb, sb_dev) :
+ qeth_get_priority_queue(card, skb);
}
static const struct device_type qeth_l2_devtype = {
@@ -583,6 +587,9 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
+ if (IS_OSN(card))
+ dev_notice(&gdev->dev, "OSN support will be dropped in 2021\n");
+
qeth_l2_vnicc_set_defaults(card);
mutex_init(&card->sbp_lock);
@@ -610,7 +617,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
qeth_set_offline(card, false);
cancel_work_sync(&card->close_dev_work);
- if (qeth_netdev_is_registered(card->dev))
+ if (card->dev->reg_state == NETREG_REGISTERED)
unregister_netdev(card->dev);
}
@@ -648,7 +655,7 @@ static const struct net_device_ops qeth_osn_netdev_ops = {
.ndo_tx_timeout = qeth_tx_timeout,
};
-static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
+static int qeth_l2_setup_netdev(struct qeth_card *card)
{
int rc;
@@ -658,6 +665,10 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
goto add_napi;
}
+ rc = qeth_setup_netdev(card);
+ if (rc)
+ return rc;
+
card->dev->needed_headroom = sizeof(struct qeth_hdr);
card->dev->netdev_ops = &qeth_l2_netdev_ops;
card->dev->priv_flags |= IFF_UNICAST_FLT;
@@ -704,13 +715,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
add_napi:
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
- rc = register_netdev(card->dev);
- if (!rc && carrier_ok)
- netif_carrier_on(card->dev);
-
- if (rc)
- card->dev->netdev_ops = NULL;
- return rc;
+ return register_netdev(card->dev);
}
static void qeth_l2_trace_features(struct qeth_card *card)
@@ -783,10 +788,13 @@ static int qeth_l2_set_online(struct qeth_card *card)
qeth_set_allowed_threads(card, 0xffffffff, 0);
- if (!qeth_netdev_is_registered(dev)) {
- rc = qeth_l2_setup_netdev(card, carrier_ok);
+ if (dev->reg_state != NETREG_REGISTERED) {
+ rc = qeth_l2_setup_netdev(card);
if (rc)
goto out_remove;
+
+ if (carrier_ok)
+ netif_carrier_on(dev);
} else {
rtnl_lock();
if (carrier_ok)
@@ -864,6 +872,7 @@ struct qeth_discipline qeth_l2_discipline = {
};
EXPORT_SYMBOL_GPL(qeth_l2_discipline);
+#ifdef CONFIG_QETH_OSN
static void qeth_osn_assist_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
unsigned int data_length)
@@ -940,6 +949,7 @@ void qeth_osn_deregister(struct net_device *dev)
return;
}
EXPORT_SYMBOL(qeth_osn_deregister);
+#endif
/* SETBRIDGEPORT support, async notifications */
@@ -1512,8 +1522,6 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
struct ccw_device *ddev;
struct subchannel_id schid;
- if (!card)
- return -EINVAL;
if (!card->options.sbp.supported_funcs)
return -EOPNOTSUPP;
ddev = CARD_DDEV(card);
@@ -1568,23 +1576,11 @@ static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc)
return rc;
}
-/* generic VNICC request call back control */
-struct _qeth_l2_vnicc_request_cbctl {
- struct {
- union{
- u32 *sup_cmds;
- u32 *timeout;
- };
- } result;
-};
-
/* generic VNICC request call back */
static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
struct qeth_reply *reply,
unsigned long data)
{
- struct _qeth_l2_vnicc_request_cbctl *cbctl =
- (struct _qeth_l2_vnicc_request_cbctl *) reply->param;
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc;
u32 sub_cmd = cmd->data.vnicc.hdr.sub_command;
@@ -1597,9 +1593,9 @@ static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled;
if (sub_cmd == IPA_VNICC_QUERY_CMDS)
- *cbctl->result.sup_cmds = rep->data.query_cmds.sup_cmds;
+ *(u32 *)reply->param = rep->data.query_cmds.sup_cmds;
else if (sub_cmd == IPA_VNICC_GET_TIMEOUT)
- *cbctl->result.timeout = rep->data.getset_timeout.timeout;
+ *(u32 *)reply->param = rep->data.getset_timeout.timeout;
return 0;
}
@@ -1640,7 +1636,6 @@ static int qeth_l2_vnicc_query_chars(struct qeth_card *card)
static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
u32 *sup_cmds)
{
- struct _qeth_l2_vnicc_request_cbctl cbctl;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "vniccqcm");
@@ -1651,10 +1646,7 @@ static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
__ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char;
- /* prepare callback control */
- cbctl.result.sup_cmds = sup_cmds;
-
- return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, sup_cmds);
}
/* VNICC enable/disable characteristic request */
@@ -1678,7 +1670,6 @@ static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
u32 cmd, u32 *timeout)
{
struct qeth_vnicc_getset_timeout *getset_timeout;
- struct _qeth_l2_vnicc_request_cbctl cbctl;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "vniccgst");
@@ -1693,11 +1684,7 @@ static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
if (cmd == IPA_VNICC_SET_TIMEOUT)
getset_timeout->timeout = *timeout;
- /* prepare callback control */
- if (cmd == IPA_VNICC_GET_TIMEOUT)
- cbctl.result.timeout = timeout;
-
- return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, timeout);
}
/* set current VNICC flag state; called from sysfs store function */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 82f800d1d7b3..0742a749d26e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -920,9 +920,11 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
if (cmd->hdr.return_code)
return -EIO;
+ if (!is_valid_ether_addr(cmd->data.create_destroy_addr.mac_addr))
+ return -EADDRNOTAVAIL;
ether_addr_copy(card->dev->dev_addr,
- cmd->data.create_destroy_addr.unique_id);
+ cmd->data.create_destroy_addr.mac_addr);
return 0;
}
@@ -930,7 +932,6 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
{
int rc = 0;
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 2, "hsrmac");
@@ -938,9 +939,6 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
IPA_DATA_SIZEOF(create_destroy_addr));
if (!iob)
return -ENOMEM;
- cmd = __ipa_cmd(iob);
- *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
- card->info.unique_id;
rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb,
NULL);
@@ -951,43 +949,36 @@ static int qeth_l3_get_unique_id_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ u16 *uid = reply->param;
if (cmd->hdr.return_code == 0) {
- card->info.unique_id = *((__u16 *)
- &cmd->data.create_destroy_addr.unique_id[6]);
+ *uid = cmd->data.create_destroy_addr.uid;
return 0;
}
- card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
- UNIQUE_ID_NOT_BY_CARD;
dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n");
return -EIO;
}
-static int qeth_l3_get_unique_id(struct qeth_card *card)
+static u16 qeth_l3_get_unique_id(struct qeth_card *card, u16 uid)
{
- int rc = 0;
struct qeth_cmd_buffer *iob;
- struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 2, "guniqeid");
- if (!qeth_is_supported(card, IPA_IPV6)) {
- card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED |
- UNIQUE_ID_NOT_BY_CARD;
- return 0;
- }
+ if (!qeth_is_supported(card, IPA_IPV6))
+ goto out;
iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6,
IPA_DATA_SIZEOF(create_destroy_addr));
if (!iob)
- return -ENOMEM;
- cmd = __ipa_cmd(iob);
- *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
- card->info.unique_id;
+ goto out;
- rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL);
- return rc;
+ __ipa_cmd(iob)->data.create_destroy_addr.uid = uid;
+ qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, &uid);
+
+out:
+ return uid;
}
static int
@@ -1886,7 +1877,8 @@ static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct qeth_card *card = dev->ml_priv;
- return qeth_get_priority_queue(card, skb);
+ return IS_VM_NIC(card) ? netdev_pick_tx(dev, skb, sb_dev) :
+ qeth_get_priority_queue(card, skb);
}
static const struct net_device_ops qeth_l3_netdev_ops = {
@@ -1923,11 +1915,16 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_neigh_setup = qeth_l3_neigh_setup,
};
-static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
+static int qeth_l3_setup_netdev(struct qeth_card *card)
{
+ struct net_device *dev = card->dev;
unsigned int headroom;
int rc;
+ rc = qeth_setup_netdev(card);
+ if (rc)
+ return rc;
+
if (IS_OSD(card) || IS_OSX(card)) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
(card->info.link_type == QETH_LINK_TYPE_HSTR)) {
@@ -1938,9 +1935,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
/*IPv6 address autoconfiguration stuff*/
- qeth_l3_get_unique_id(card);
- if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
- card->dev->dev_id = card->info.unique_id & 0xffff;
+ dev->dev_id = qeth_l3_get_unique_id(card, dev->dev_id);
if (!IS_VM_NIC(card)) {
card->dev->features |= NETIF_F_SG;
@@ -1973,7 +1968,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
rc = qeth_l3_iqd_read_initial_mac(card);
if (rc)
- goto out;
+ return rc;
} else
return -ENODEV;
@@ -1988,14 +1983,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
- rc = register_netdev(card->dev);
- if (!rc && carrier_ok)
- netif_carrier_on(card->dev);
-
-out:
- if (rc)
- card->dev->netdev_ops = NULL;
- return rc;
+ return register_netdev(card->dev);
}
static const struct device_type qeth_l3_devtype = {
@@ -2042,7 +2030,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_set_offline(card, false);
cancel_work_sync(&card->close_dev_work);
- if (qeth_netdev_is_registered(card->dev))
+ if (card->dev->reg_state == NETREG_REGISTERED)
unregister_netdev(card->dev);
flush_workqueue(card->cmd_wq);
@@ -2089,10 +2077,13 @@ static int qeth_l3_set_online(struct qeth_card *card)
qeth_set_allowed_threads(card, 0xffffffff, 0);
qeth_l3_recover_ip(card);
- if (!qeth_netdev_is_registered(dev)) {
- rc = qeth_l3_setup_netdev(card, carrier_ok);
+ if (dev->reg_state != NETREG_REGISTERED) {
+ rc = qeth_l3_setup_netdev(card);
if (rc)
goto out_remove;
+
+ if (carrier_ok)
+ netif_carrier_on(dev);
} else {
rtnl_lock();
if (carrier_ok)
@@ -2204,9 +2195,6 @@ static int qeth_l3_ip_event(struct notifier_block *this,
struct qeth_ipaddr addr;
struct qeth_card *card;
- if (dev_net(dev) != &init_net)
- return NOTIFY_DONE;
-
card = qeth_l3_get_card_from_dev(dev);
if (!card)
return NOTIFY_DONE;
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index a3d1c3bdfadb..dd0b39082534 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -133,40 +133,6 @@ static ssize_t qeth_l3_dev_route6_store(struct device *dev,
static DEVICE_ATTR(route6, 0644, qeth_l3_dev_route6_show,
qeth_l3_dev_route6_store);
-static ssize_t qeth_l3_dev_fake_broadcast_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct qeth_card *card = dev_get_drvdata(dev);
-
- return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
-}
-
-static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct qeth_card *card = dev_get_drvdata(dev);
- char *tmp;
- int i, rc = 0;
-
- mutex_lock(&card->conf_mutex);
- if (card->state != CARD_STATE_DOWN) {
- rc = -EPERM;
- goto out;
- }
-
- i = simple_strtoul(buf, &tmp, 16);
- if ((i == 0) || (i == 1))
- card->options.fake_broadcast = i;
- else
- rc = -EINVAL;
-out:
- mutex_unlock(&card->conf_mutex);
- return rc ? rc : count;
-}
-
-static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show,
- qeth_l3_dev_fake_broadcast_store);
-
static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -305,7 +271,6 @@ static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show,
static struct attribute *qeth_l3_device_attrs[] = {
&dev_attr_route4.attr,
&dev_attr_route6.attr,
- &dev_attr_fake_broadcast.attr,
&dev_attr_sniffer.attr,
&dev_attr_hsuid.attr,
NULL,
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 604856e72cfb..5b19f5175c5c 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1577,8 +1577,7 @@ static void qedf_setup_fdmi(struct qedf_ctx *qedf)
{
struct fc_lport *lport = qedf->lport;
struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
- u8 buf[8];
- int i, pos;
+ u64 dsn;
/*
* fdmi_enabled needs to be set for libfc to execute FDMI registration.
@@ -1591,18 +1590,11 @@ static void qedf_setup_fdmi(struct qedf_ctx *qedf)
*/
/* Get the PCI-e Device Serial Number Capability */
- pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
- if (pos) {
- pos += 4;
- for (i = 0; i < 8; i++)
- pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
-
+ dsn = pci_get_dsn(qedf->pdev);
+ if (dsn)
snprintf(fc_host->serial_number,
- sizeof(fc_host->serial_number),
- "%02X%02X%02X%02X%02X%02X%02X%02X",
- buf[7], buf[6], buf[5], buf[4],
- buf[3], buf[2], buf[1], buf[0]);
- } else
+ sizeof(fc_host->serial_number), "%016llX", dsn);
+ else
snprintf(fc_host->serial_number,
sizeof(fc_host->serial_number), "Unknown");
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index 4f028a80d6c4..52d2e0f33be7 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -26,9 +26,9 @@ static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len,
int i, pos = 0;
for (i = 0; i < sprom_size_words; i++)
- pos += snprintf(buf + pos, buf_len - pos - 1,
+ pos += scnprintf(buf + pos, buf_len - pos - 1,
"%04X", swab16(sprom[i]) & 0xFFFF);
- pos += snprintf(buf + pos, buf_len - pos - 1, "\n");
+ pos += scnprintf(buf + pos, buf_len - pos - 1, "\n");
return pos + 1;
}
diff --git a/drivers/staging/qlge/qlge_ethtool.c b/drivers/staging/qlge/qlge_ethtool.c
index 441ac08d14d2..949abd53a7a9 100644
--- a/drivers/staging/qlge/qlge_ethtool.c
+++ b/drivers/staging/qlge/qlge_ethtool.c
@@ -718,6 +718,8 @@ static void ql_set_msglevel(struct net_device *ndev, u32 value)
}
const struct ethtool_ops qlge_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = ql_get_drvinfo,
.get_wol = ql_get_wol,
.set_wol = ql_set_wol,
diff --git a/fs/nsfs.c b/fs/nsfs.c
index b13bfd406820..4f1205725cfe 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -247,6 +247,20 @@ out_invalid:
return ERR_PTR(-EINVAL);
}
+/**
+ * ns_match() - Returns true if current namespace matches dev/ino provided.
+ * @ns_common: current ns
+ * @dev: dev_t from nsfs that will be matched against current nsfs
+ * @ino: ino_t from nsfs that will be matched against current nsfs
+ *
+ * Return: true if dev and ino matches the current nsfs.
+ */
+bool ns_match(const struct ns_common *ns, dev_t dev, ino_t ino)
+{
+ return (ns->inum == ino) && (nsfs_mnt->mnt_sb->s_dev == dev);
+}
+
+
static int nsfs_show_path(struct seq_file *seq, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 130fc6fbcc03..26bbf960e2a2 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -558,3 +558,151 @@ void sysfs_remove_bin_file(struct kobject *kobj,
kernfs_remove_by_name(kobj->sd, attr->attr.name);
}
EXPORT_SYMBOL_GPL(sysfs_remove_bin_file);
+
+static int internal_change_owner(struct kernfs_node *kn, kuid_t kuid,
+ kgid_t kgid)
+{
+ struct iattr newattrs = {
+ .ia_valid = ATTR_UID | ATTR_GID,
+ .ia_uid = kuid,
+ .ia_gid = kgid,
+ };
+ return kernfs_setattr(kn, &newattrs);
+}
+
+/**
+ * sysfs_link_change_owner - change owner of a sysfs file.
+ * @kobj: object of the kernfs_node the symlink is located in.
+ * @targ: object of the kernfs_node the symlink points to.
+ * @name: name of the link.
+ * @kuid: new owner's kuid
+ * @kgid: new owner's kgid
+ *
+ * This function looks up the sysfs symlink entry @name under @kobj and changes
+ * the ownership to @kuid/@kgid. The symlink is looked up in the namespace of
+ * @targ.
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ,
+ const char *name, kuid_t kuid, kgid_t kgid)
+{
+ struct kernfs_node *kn = NULL;
+ int error;
+
+ if (!name || !kobj->state_in_sysfs || !targ->state_in_sysfs)
+ return -EINVAL;
+
+ error = -ENOENT;
+ kn = kernfs_find_and_get_ns(kobj->sd, name, targ->sd->ns);
+ if (!kn)
+ goto out;
+
+ error = -EINVAL;
+ if (kernfs_type(kn) != KERNFS_LINK)
+ goto out;
+ if (kn->symlink.target_kn->priv != targ)
+ goto out;
+
+ error = internal_change_owner(kn, kuid, kgid);
+
+out:
+ kernfs_put(kn);
+ return error;
+}
+
+/**
+ * sysfs_file_change_owner - change owner of a sysfs file.
+ * @kobj: object.
+ * @name: name of the file to change.
+ * @kuid: new owner's kuid
+ * @kgid: new owner's kgid
+ *
+ * This function looks up the sysfs entry @name under @kobj and changes the
+ * ownership to @kuid/@kgid.
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int sysfs_file_change_owner(struct kobject *kobj, const char *name, kuid_t kuid,
+ kgid_t kgid)
+{
+ struct kernfs_node *kn;
+ int error;
+
+ if (!name)
+ return -EINVAL;
+
+ if (!kobj->state_in_sysfs)
+ return -EINVAL;
+
+ kn = kernfs_find_and_get(kobj->sd, name);
+ if (!kn)
+ return -ENOENT;
+
+ error = internal_change_owner(kn, kuid, kgid);
+
+ kernfs_put(kn);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(sysfs_file_change_owner);
+
+/**
+ * sysfs_change_owner - change owner of the given object.
+ * @kobj: object.
+ * @kuid: new owner's kuid
+ * @kgid: new owner's kgid
+ *
+ * Change the owner of the default directory, files, groups, and attributes of
+ * @kobj to @kuid/@kgid. Note that sysfs_change_owner mirrors how the sysfs
+ * entries for a kobject are added by driver core. In summary,
+ * sysfs_change_owner() takes care of the default directory entry for @kobj,
+ * the default attributes associated with the ktype of @kobj and the default
+ * attributes associated with the ktype of @kobj.
+ * Additional properties not added by driver core have to be changed by the
+ * driver or subsystem which created them. This is similar to how
+ * driver/subsystem specific entries are removed.
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid)
+{
+ int error;
+ const struct kobj_type *ktype;
+
+ if (!kobj->state_in_sysfs)
+ return -EINVAL;
+
+ /* Change the owner of the kobject itself. */
+ error = internal_change_owner(kobj->sd, kuid, kgid);
+ if (error)
+ return error;
+
+ ktype = get_ktype(kobj);
+ if (ktype) {
+ struct attribute **kattr;
+
+ /*
+ * Change owner of the default attributes associated with the
+ * ktype of @kobj.
+ */
+ for (kattr = ktype->default_attrs; kattr && *kattr; kattr++) {
+ error = sysfs_file_change_owner(kobj, (*kattr)->name,
+ kuid, kgid);
+ if (error)
+ return error;
+ }
+
+ /*
+ * Change owner of the default groups associated with the
+ * ktype of @kobj.
+ */
+ error = sysfs_groups_change_owner(kobj, ktype->default_groups,
+ kuid, kgid);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sysfs_change_owner);
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index c4ab045926b7..5afe0e7ff7cd 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -13,6 +13,7 @@
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/err.h>
+#include <linux/fs.h>
#include "sysfs.h"
@@ -457,3 +458,117 @@ int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
return PTR_ERR_OR_ZERO(link);
}
EXPORT_SYMBOL_GPL(__compat_only_sysfs_link_entry_to_kobj);
+
+static int sysfs_group_attrs_change_owner(struct kernfs_node *grp_kn,
+ const struct attribute_group *grp,
+ struct iattr *newattrs)
+{
+ struct kernfs_node *kn;
+ int error;
+
+ if (grp->attrs) {
+ struct attribute *const *attr;
+
+ for (attr = grp->attrs; *attr; attr++) {
+ kn = kernfs_find_and_get(grp_kn, (*attr)->name);
+ if (!kn)
+ return -ENOENT;
+
+ error = kernfs_setattr(kn, newattrs);
+ kernfs_put(kn);
+ if (error)
+ return error;
+ }
+ }
+
+ if (grp->bin_attrs) {
+ struct bin_attribute *const *bin_attr;
+
+ for (bin_attr = grp->bin_attrs; *bin_attr; bin_attr++) {
+ kn = kernfs_find_and_get(grp_kn, (*bin_attr)->attr.name);
+ if (!kn)
+ return -ENOENT;
+
+ error = kernfs_setattr(kn, newattrs);
+ kernfs_put(kn);
+ if (error)
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * sysfs_group_change_owner - change owner of an attribute group.
+ * @kobj: The kobject containing the group.
+ * @grp: The attribute group.
+ * @kuid: new owner's kuid
+ * @kgid: new owner's kgid
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int sysfs_group_change_owner(struct kobject *kobj,
+ const struct attribute_group *grp, kuid_t kuid,
+ kgid_t kgid)
+{
+ struct kernfs_node *grp_kn;
+ int error;
+ struct iattr newattrs = {
+ .ia_valid = ATTR_UID | ATTR_GID,
+ .ia_uid = kuid,
+ .ia_gid = kgid,
+ };
+
+ if (!kobj->state_in_sysfs)
+ return -EINVAL;
+
+ if (grp->name) {
+ grp_kn = kernfs_find_and_get(kobj->sd, grp->name);
+ } else {
+ kernfs_get(kobj->sd);
+ grp_kn = kobj->sd;
+ }
+ if (!grp_kn)
+ return -ENOENT;
+
+ error = kernfs_setattr(grp_kn, &newattrs);
+ if (!error)
+ error = sysfs_group_attrs_change_owner(grp_kn, grp, &newattrs);
+
+ kernfs_put(grp_kn);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(sysfs_group_change_owner);
+
+/**
+ * sysfs_groups_change_owner - change owner of a set of attribute groups.
+ * @kobj: The kobject containing the groups.
+ * @groups: The attribute groups.
+ * @kuid: new owner's kuid
+ * @kgid: new owner's kgid
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int sysfs_groups_change_owner(struct kobject *kobj,
+ const struct attribute_group **groups,
+ kuid_t kuid, kgid_t kgid)
+{
+ int error = 0, i;
+
+ if (!kobj->state_in_sysfs)
+ return -EINVAL;
+
+ if (!groups)
+ return 0;
+
+ for (i = 0; groups[i]; i++) {
+ error = sysfs_group_change_owner(kobj, groups[i], kuid, kgid);
+ if (error)
+ break;
+ }
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(sysfs_groups_change_owner);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 2444336ef04c..71e387a5fe90 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -535,6 +535,7 @@
\
RO_EXCEPTION_TABLE \
NOTES \
+ BTF \
\
. = ALIGN((align)); \
__end_rodata = .;
@@ -622,6 +623,20 @@
}
/*
+ * .BTF
+ */
+#ifdef CONFIG_DEBUG_INFO_BTF
+#define BTF \
+ .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
+ __start_BTF = .; \
+ *(.BTF) \
+ __stop_BTF = .; \
+ }
+#else
+#define BTF
+#endif
+
+/*
* Init task
*/
#define INIT_TASK_DATA_SECTION(align) \
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 4bbb5f1c8b5b..48ea093ff04c 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -56,6 +56,19 @@
})
/**
+ * FIELD_MAX() - produce the maximum value representable by a field
+ * @_mask: shifted mask defining the field's length and position
+ *
+ * FIELD_MAX() returns the maximum value that can be held in the field
+ * specified by @_mask.
+ */
+#define FIELD_MAX(_mask) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_MAX: "); \
+ (typeof(_mask))((_mask) >> __bf_shf(_mask)); \
+ })
+
+/**
* FIELD_FIT() - check if value fits in the field
* @_mask: shifted mask defining the field's length and position
* @_val: value to test against the field
@@ -110,6 +123,7 @@ static __always_inline u64 field_mask(u64 field)
{
return field / field_multiplier(field);
}
+#define field_max(field) ((typeof(field))field_mask(field))
#define ____MAKE_OP(type,base,to,from) \
static __always_inline __##type type##_encode_bits(base v, base field) \
{ \
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index a11d5b7dbbf3..c11b413d5b1a 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -36,7 +36,7 @@ struct bpf_cgroup_storage_map;
struct bpf_storage_buffer {
struct rcu_head rcu;
- char data[0];
+ char data[];
};
struct bpf_cgroup_storage {
@@ -51,9 +51,18 @@ struct bpf_cgroup_storage {
struct rcu_head rcu;
};
+struct bpf_cgroup_link {
+ struct bpf_link link;
+ struct cgroup *cgroup;
+ enum bpf_attach_type type;
+};
+
+extern const struct bpf_link_ops bpf_cgroup_link_lops;
+
struct bpf_prog_list {
struct list_head node;
struct bpf_prog *prog;
+ struct bpf_cgroup_link *link;
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
};
@@ -84,20 +93,27 @@ struct cgroup_bpf {
int cgroup_bpf_inherit(struct cgroup *cgrp);
void cgroup_bpf_offline(struct cgroup *cgrp);
-int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
- struct bpf_prog *replace_prog,
+int __cgroup_bpf_attach(struct cgroup *cgrp,
+ struct bpf_prog *prog, struct bpf_prog *replace_prog,
+ struct bpf_cgroup_link *link,
enum bpf_attach_type type, u32 flags);
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
+ struct bpf_cgroup_link *link,
enum bpf_attach_type type);
+int __cgroup_bpf_replace(struct cgroup *cgrp, struct bpf_cgroup_link *link,
+ struct bpf_prog *new_prog);
int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
union bpf_attr __user *uattr);
/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
-int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
- struct bpf_prog *replace_prog, enum bpf_attach_type type,
+int cgroup_bpf_attach(struct cgroup *cgrp,
+ struct bpf_prog *prog, struct bpf_prog *replace_prog,
+ struct bpf_cgroup_link *link, enum bpf_attach_type type,
u32 flags);
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type, u32 flags);
+ enum bpf_attach_type type);
+int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *old_prog,
+ struct bpf_prog *new_prog);
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
union bpf_attr __user *uattr);
@@ -332,11 +348,13 @@ int cgroup_bpf_prog_attach(const union bpf_attr *attr,
enum bpf_prog_type ptype, struct bpf_prog *prog);
int cgroup_bpf_prog_detach(const union bpf_attr *attr,
enum bpf_prog_type ptype);
+int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int cgroup_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr);
#else
struct bpf_prog;
+struct bpf_link;
struct cgroup_bpf {};
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
@@ -354,6 +372,19 @@ static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
return -EINVAL;
}
+static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int cgroup_bpf_replace(struct bpf_link *link,
+ struct bpf_prog *old_prog,
+ struct bpf_prog *new_prog)
+{
+ return -EINVAL;
+}
+
static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 212991f6f2a5..fd2b2322412d 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -18,6 +18,7 @@
#include <linux/refcount.h>
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/kallsyms.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -233,6 +234,7 @@ enum bpf_arg_type {
ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
ARG_PTR_TO_CTX, /* pointer to context */
+ ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */
ARG_ANYTHING, /* any (initialized) argument is ok */
ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
@@ -434,6 +436,16 @@ struct btf_func_model {
*/
#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
+/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
+ * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
+ */
+#define BPF_MAX_TRAMP_PROGS 40
+
+struct bpf_tramp_progs {
+ struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
+ int nr_progs;
+};
+
/* Different use cases for BPF trampoline:
* 1. replace nop at the function entry (kprobe equivalent)
* flags = BPF_TRAMP_F_RESTORE_REGS
@@ -456,16 +468,25 @@ struct btf_func_model {
*/
int arch_prepare_bpf_trampoline(void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
- struct bpf_prog **fentry_progs, int fentry_cnt,
- struct bpf_prog **fexit_progs, int fexit_cnt,
+ struct bpf_tramp_progs *tprogs,
void *orig_call);
/* these two functions are called from generated trampoline */
u64 notrace __bpf_prog_enter(void);
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
+struct bpf_ksym {
+ unsigned long start;
+ unsigned long end;
+ char name[KSYM_NAME_LEN];
+ struct list_head lnode;
+ struct latch_tree_node tnode;
+ bool prog;
+};
+
enum bpf_tramp_prog_type {
BPF_TRAMP_FENTRY,
BPF_TRAMP_FEXIT,
+ BPF_TRAMP_MODIFY_RETURN,
BPF_TRAMP_MAX,
BPF_TRAMP_REPLACE, /* more than MAX */
};
@@ -494,6 +515,7 @@ struct bpf_trampoline {
/* Executable image of trampoline */
void *image;
u64 selector;
+ struct bpf_ksym ksym;
};
#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
@@ -511,9 +533,10 @@ struct bpf_dispatcher {
int num_progs;
void *image;
u32 image_off;
+ struct bpf_ksym ksym;
};
-static __always_inline unsigned int bpf_dispatcher_nopfunc(
+static __always_inline unsigned int bpf_dispatcher_nop_func(
const void *ctx,
const struct bpf_insn *insnsi,
unsigned int (*bpf_func)(const void *,
@@ -526,17 +549,21 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
int bpf_trampoline_link_prog(struct bpf_prog *prog);
int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
void bpf_trampoline_put(struct bpf_trampoline *tr);
-#define BPF_DISPATCHER_INIT(name) { \
- .mutex = __MUTEX_INITIALIZER(name.mutex), \
- .func = &name##func, \
- .progs = {}, \
- .num_progs = 0, \
- .image = NULL, \
- .image_off = 0 \
+#define BPF_DISPATCHER_INIT(_name) { \
+ .mutex = __MUTEX_INITIALIZER(_name.mutex), \
+ .func = &_name##_func, \
+ .progs = {}, \
+ .num_progs = 0, \
+ .image = NULL, \
+ .image_off = 0, \
+ .ksym = { \
+ .name = #_name, \
+ .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
+ }, \
}
#define DEFINE_BPF_DISPATCHER(name) \
- noinline unsigned int name##func( \
+ noinline unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
unsigned int (*bpf_func)(const void *, \
@@ -544,26 +571,26 @@ void bpf_trampoline_put(struct bpf_trampoline *tr);
{ \
return bpf_func(ctx, insnsi); \
} \
- EXPORT_SYMBOL(name##func); \
- struct bpf_dispatcher name = BPF_DISPATCHER_INIT(name);
+ EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
+ struct bpf_dispatcher bpf_dispatcher_##name = \
+ BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
#define DECLARE_BPF_DISPATCHER(name) \
- unsigned int name##func( \
+ unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
unsigned int (*bpf_func)(const void *, \
const struct bpf_insn *)); \
- extern struct bpf_dispatcher name;
-#define BPF_DISPATCHER_FUNC(name) name##func
-#define BPF_DISPATCHER_PTR(name) (&name)
+ extern struct bpf_dispatcher bpf_dispatcher_##name;
+#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
+#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
struct bpf_prog *to);
-struct bpf_image {
- struct latch_tree_node tnode;
- unsigned char data[];
-};
-#define BPF_IMAGE_SIZE (PAGE_SIZE - sizeof(struct bpf_image))
-bool is_bpf_image_address(unsigned long address);
-void *bpf_image_alloc(void);
+/* Called only from JIT-enabled code, so there's no need for stubs. */
+void *bpf_jit_alloc_exec_page(void);
+void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
+void bpf_image_ksym_del(struct bpf_ksym *ksym);
+void bpf_ksym_add(struct bpf_ksym *ksym);
+void bpf_ksym_del(struct bpf_ksym *ksym);
#else
static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{
@@ -580,7 +607,7 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
#define DEFINE_BPF_DISPATCHER(name)
#define DECLARE_BPF_DISPATCHER(name)
-#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nopfunc
+#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
#define BPF_DISPATCHER_PTR(name) NULL
static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
struct bpf_prog *from,
@@ -641,8 +668,7 @@ struct bpf_prog_aux {
void *jit_data; /* JIT specific data. arch dependent */
struct bpf_jit_poke_descriptor *poke_tab;
u32 size_poke_tab;
- struct latch_tree_node ksym_tnode;
- struct list_head ksym_lnode;
+ struct bpf_ksym ksym;
const struct bpf_prog_ops *ops;
struct bpf_map **used_maps;
struct bpf_prog *prog;
@@ -860,7 +886,7 @@ struct bpf_prog_array_item {
struct bpf_prog_array {
struct rcu_head rcu;
- struct bpf_prog_array_item items[0];
+ struct bpf_prog_array_item items[];
};
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
@@ -886,7 +912,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
struct bpf_prog *_prog; \
struct bpf_prog_array *_array; \
u32 _ret = 1; \
- preempt_disable(); \
+ migrate_disable(); \
rcu_read_lock(); \
_array = rcu_dereference(array); \
if (unlikely(check_non_null && !_array))\
@@ -899,7 +925,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
} \
_out: \
rcu_read_unlock(); \
- preempt_enable(); \
+ migrate_enable(); \
_ret; \
})
@@ -933,7 +959,7 @@ _out: \
u32 ret; \
u32 _ret = 1; \
u32 _cn = 0; \
- preempt_disable(); \
+ migrate_disable(); \
rcu_read_lock(); \
_array = rcu_dereference(array); \
_item = &_array->items[0]; \
@@ -945,7 +971,7 @@ _out: \
_item++; \
} \
rcu_read_unlock(); \
- preempt_enable(); \
+ migrate_enable(); \
if (_ret) \
_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
else \
@@ -962,6 +988,36 @@ _out: \
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
+/*
+ * Block execution of BPF programs attached to instrumentation (perf,
+ * kprobes, tracepoints) to prevent deadlocks on map operations as any of
+ * these events can happen inside a region which holds a map bucket lock
+ * and can deadlock on it.
+ *
+ * Use the preemption safe inc/dec variants on RT because migrate disable
+ * is preemptible on RT and preemption in the middle of the RMW operation
+ * might lead to inconsistent state. Use the raw variants for non RT
+ * kernels as migrate_disable() maps to preempt_disable() so the slightly
+ * more expensive save operation can be avoided.
+ */
+static inline void bpf_disable_instrumentation(void)
+{
+ migrate_disable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ this_cpu_inc(bpf_prog_active);
+ else
+ __this_cpu_inc(bpf_prog_active);
+}
+
+static inline void bpf_enable_instrumentation(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ this_cpu_dec(bpf_prog_active);
+ else
+ __this_cpu_dec(bpf_prog_active);
+ migrate_enable();
+}
+
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;
@@ -994,6 +1050,7 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux,
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
+struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
void bpf_map_inc(struct bpf_map *map);
@@ -1026,6 +1083,29 @@ extern int sysctl_unprivileged_bpf_disabled;
int bpf_map_new_fd(struct bpf_map *map, int flags);
int bpf_prog_new_fd(struct bpf_prog *prog);
+struct bpf_link {
+ atomic64_t refcnt;
+ const struct bpf_link_ops *ops;
+ struct bpf_prog *prog;
+ struct work_struct work;
+};
+
+struct bpf_link_ops {
+ void (*release)(struct bpf_link *link);
+ void (*dealloc)(struct bpf_link *link);
+
+};
+
+void bpf_link_init(struct bpf_link *link, const struct bpf_link_ops *ops,
+ struct bpf_prog *prog);
+void bpf_link_cleanup(struct bpf_link *link, struct file *link_file,
+ int link_fd);
+void bpf_link_inc(struct bpf_link *link);
+void bpf_link_put(struct bpf_link *link);
+int bpf_link_new_fd(struct bpf_link *link);
+struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
+struct bpf_link *bpf_link_get_from_fd(u32 ufd);
+
int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
int bpf_obj_get_user(const char __user *pathname, int flags);
@@ -1103,6 +1183,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
+int bpf_prog_test_run_tracing(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
@@ -1260,6 +1343,13 @@ static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
return -ENOTSUPP;
}
+static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr)
@@ -1356,6 +1446,8 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
#if defined(CONFIG_BPF_STREAM_PARSER)
int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
+void sock_map_unhash(struct sock *sk);
+void sock_map_close(struct sock *sk, long timeout);
#else
static inline int sock_map_prog_update(struct bpf_map *map,
struct bpf_prog *prog, u32 which)
@@ -1368,7 +1460,7 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr,
{
return -EINVAL;
}
-#endif
+#endif /* CONFIG_BPF_STREAM_PARSER */
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
@@ -1418,6 +1510,7 @@ extern const struct bpf_func_proto bpf_get_stack_proto;
extern const struct bpf_func_proto bpf_sock_map_update_proto;
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
+extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
@@ -1429,6 +1522,10 @@ extern const struct bpf_func_proto bpf_strtol_proto;
extern const struct bpf_func_proto bpf_strtoul_proto;
extern const struct bpf_func_proto bpf_tcp_sock_proto;
extern const struct bpf_func_proto bpf_jiffies64_proto;
+extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
+
+const struct bpf_func_proto *bpf_tracing_func_proto(
+ enum bpf_func_id func_id, const struct bpf_prog *prog);
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
new file mode 100644
index 000000000000..af74712af585
--- /dev/null
+++ b/include/linux/bpf_lsm.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2020 Google LLC.
+ */
+
+#ifndef _LINUX_BPF_LSM_H
+#define _LINUX_BPF_LSM_H
+
+#include <linux/bpf.h>
+#include <linux/lsm_hooks.h>
+
+#ifdef CONFIG_BPF_LSM
+
+#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ RET bpf_lsm_##NAME(__VA_ARGS__);
+#include <linux/lsm_hook_defs.h>
+#undef LSM_HOOK
+
+int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
+ const struct bpf_prog *prog);
+
+#else /* !CONFIG_BPF_LSM */
+
+static inline int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
+ const struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_BPF_LSM */
+
+#endif /* _LINUX_BPF_LSM_H */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index c81d4ece79a4..ba0c2d56f8a3 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -70,6 +70,10 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops,
void *, void *)
BPF_PROG_TYPE(BPF_PROG_TYPE_EXT, bpf_extension,
void *, void *)
+#ifdef CONFIG_BPF_LSM
+BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm,
+ void *, void *)
+#endif /* CONFIG_BPF_LSM */
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 5406e6e96585..6abd5a778fcd 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -123,6 +123,10 @@ struct bpf_reg_state {
s64 smax_value; /* maximum possible (s64)value */
u64 umin_value; /* minimum possible (u64)value */
u64 umax_value; /* maximum possible (u64)value */
+ s32 s32_min_value; /* minimum possible (s32)value */
+ s32 s32_max_value; /* maximum possible (s32)value */
+ u32 u32_min_value; /* minimum possible (u32)value */
+ u32 u32_max_value; /* maximum possible (u32)value */
/* parentage chain for liveness checking */
struct bpf_reg_state *parent;
/* Inside the callee two registers can be both PTR_TO_STACK like
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index b475e7f20d28..6462c5447872 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -79,6 +79,7 @@
#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */
+#define MII_BCM54XX_ECR_FIFOE 0x0001 /* FIFO elasticity */
#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */
#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */
@@ -119,6 +120,7 @@
#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00
#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
+#define MII_BCM54XX_AUXCTL_ACTL_EXT_PKT_LEN 0x4000
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 6b64b6cc2175..07e547c02fd8 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -198,7 +198,7 @@ enum dccp_role {
struct dccp_service_list {
__u32 dccpsl_nr;
- __be32 dccpsl_list[0];
+ __be32 dccpsl_list[];
};
#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
diff --git a/include/linux/device.h b/include/linux/device.h
index fa04dfd22bbc..1311f276f533 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -828,6 +828,7 @@ extern struct device *device_find_child_by_name(struct device *parent,
extern int device_rename(struct device *dev, const char *new_name);
extern int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order);
+extern int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
extern const char *device_get_devnode(struct device *dev,
umode_t *mode, kuid_t *uid, kgid_t *gid,
const char **tmp);
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 95991e4300bf..c1d379bf6ee1 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -177,8 +177,57 @@ void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
const unsigned long *src);
+#define ETHTOOL_COALESCE_RX_USECS BIT(0)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES BIT(1)
+#define ETHTOOL_COALESCE_RX_USECS_IRQ BIT(2)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ BIT(3)
+#define ETHTOOL_COALESCE_TX_USECS BIT(4)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES BIT(5)
+#define ETHTOOL_COALESCE_TX_USECS_IRQ BIT(6)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ BIT(7)
+#define ETHTOOL_COALESCE_STATS_BLOCK_USECS BIT(8)
+#define ETHTOOL_COALESCE_USE_ADAPTIVE_RX BIT(9)
+#define ETHTOOL_COALESCE_USE_ADAPTIVE_TX BIT(10)
+#define ETHTOOL_COALESCE_PKT_RATE_LOW BIT(11)
+#define ETHTOOL_COALESCE_RX_USECS_LOW BIT(12)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW BIT(13)
+#define ETHTOOL_COALESCE_TX_USECS_LOW BIT(14)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW BIT(15)
+#define ETHTOOL_COALESCE_PKT_RATE_HIGH BIT(16)
+#define ETHTOOL_COALESCE_RX_USECS_HIGH BIT(17)
+#define ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH BIT(18)
+#define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19)
+#define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20)
+#define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21)
+
+#define ETHTOOL_COALESCE_USECS \
+ (ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS)
+#define ETHTOOL_COALESCE_MAX_FRAMES \
+ (ETHTOOL_COALESCE_RX_MAX_FRAMES | ETHTOOL_COALESCE_TX_MAX_FRAMES)
+#define ETHTOOL_COALESCE_USECS_IRQ \
+ (ETHTOOL_COALESCE_RX_USECS_IRQ | ETHTOOL_COALESCE_TX_USECS_IRQ)
+#define ETHTOOL_COALESCE_MAX_FRAMES_IRQ \
+ (ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ)
+#define ETHTOOL_COALESCE_USE_ADAPTIVE \
+ (ETHTOOL_COALESCE_USE_ADAPTIVE_RX | ETHTOOL_COALESCE_USE_ADAPTIVE_TX)
+#define ETHTOOL_COALESCE_USECS_LOW_HIGH \
+ (ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_TX_USECS_LOW | \
+ ETHTOOL_COALESCE_RX_USECS_HIGH | ETHTOOL_COALESCE_TX_USECS_HIGH)
+#define ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH \
+ (ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH)
+#define ETHTOOL_COALESCE_PKT_RATE_RX_USECS \
+ (ETHTOOL_COALESCE_USE_ADAPTIVE_RX | \
+ ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \
+ ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \
+ ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL)
+
/**
* struct ethtool_ops - optional netdev operations
+ * @supported_coalesce_params: supported types of interrupt coalescing.
* @get_drvinfo: Report driver/device information. Should only set the
* @driver, @version, @fw_version and @bus_info fields. If not
* implemented, the @driver and @bus_info fields will be filled in
@@ -207,8 +256,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* or zero.
* @get_coalesce: Get interrupt coalescing parameters. Returns a negative
* error code or zero.
- * @set_coalesce: Set interrupt coalescing parameters. Returns a negative
- * error code or zero.
+ * @set_coalesce: Set interrupt coalescing parameters. Supported coalescing
+ * types should be set in @supported_coalesce_params.
+ * Returns a negative error code or zero.
* @get_ringparam: Report ring sizes
* @set_ringparam: Set ring sizes. Returns a negative error code or zero.
* @get_pauseparam: Report pause parameters
@@ -292,7 +342,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* @set_per_queue_coalesce: Set interrupt coalescing parameters per queue.
* It must check that the given queue number is valid. If neither a RX nor
* a TX queue has this number, return -EINVAL. If only a RX queue or a TX
- * queue has this number, ignore the inapplicable fields.
+ * queue has this number, ignore the inapplicable fields. Supported
+ * coalescing types should be set in @supported_coalesce_params.
* Returns a negative error code or zero.
* @get_link_ksettings: Get various device settings including Ethernet link
* settings. The %cmd and %link_mode_masks_nwords fields should be
@@ -323,6 +374,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* of the generic netdev features interface.
*/
struct ethtool_ops {
+ u32 supported_coalesce_params;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
int (*get_regs_len)(struct net_device *);
void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
@@ -406,6 +458,8 @@ struct ethtool_ops {
struct ethtool_stats *, u64 *);
};
+int ethtool_check_ops(const struct ethtool_ops *ops);
+
struct ethtool_rx_flow_rule {
struct flow_rule *rule;
unsigned long priv[0];
@@ -420,4 +474,10 @@ struct ethtool_rx_flow_rule *
ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input);
void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *rule);
+bool ethtool_virtdev_validate_cmd(const struct ethtool_link_ksettings *cmd);
+int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd,
+ u32 *dev_speed, u8 *dev_duplex);
+
+
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index f349e2c0884c..9b5aa5c483cc 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -561,7 +561,7 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
u32 ret; \
- cant_sleep(); \
+ cant_migrate(); \
if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
struct bpf_prog_stats *stats; \
u64 start = sched_clock(); \
@@ -576,8 +576,30 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
} \
ret; })
-#define BPF_PROG_RUN(prog, ctx) __BPF_PROG_RUN(prog, ctx, \
- bpf_dispatcher_nopfunc)
+#define BPF_PROG_RUN(prog, ctx) \
+ __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func)
+
+/*
+ * Use in preemptible and therefore migratable context to make sure that
+ * the execution of the BPF program runs on one CPU.
+ *
+ * This uses migrate_disable/enable() explicitly to document that the
+ * invocation of a BPF program does not require reentrancy protection
+ * against a BPF program which is invoked from a preempting task.
+ *
+ * For non RT enabled kernels migrate_disable/enable() maps to
+ * preempt_disable/enable(), i.e. it disables also preemption.
+ */
+static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
+ const void *ctx)
+{
+ u32 ret;
+
+ migrate_disable();
+ ret = __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func);
+ migrate_enable();
+ return ret;
+}
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
@@ -655,6 +677,7 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
return qdisc_skb_cb(skb)->data;
}
+/* Must be invoked with migration disabled */
static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
{
@@ -680,9 +703,9 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
{
u32 res;
- preempt_disable();
+ migrate_disable();
res = __bpf_prog_run_save_cb(prog, skb);
- preempt_enable();
+ migrate_enable();
return res;
}
@@ -695,13 +718,11 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
if (unlikely(prog->cb_access))
memset(cb_data, 0, BPF_SKB_CB_LEN);
- preempt_disable();
- res = BPF_PROG_RUN(prog, skb);
- preempt_enable();
+ res = bpf_prog_run_pin_on_cpu(prog, skb);
return res;
}
-DECLARE_BPF_DISPATCHER(bpf_dispatcher_xdp)
+DECLARE_BPF_DISPATCHER(xdp)
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
struct xdp_buff *xdp)
@@ -712,8 +733,7 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
* already takes rcu_read_lock() when fetching the program, so
* it's not necessary here anymore.
*/
- return __BPF_PROG_RUN(prog, xdp,
- BPF_DISPATCHER_FUNC(bpf_dispatcher_xdp));
+ return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
}
void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
@@ -1063,7 +1083,6 @@ bpf_address_lookup(unsigned long addr, unsigned long *size,
void bpf_prog_kallsyms_add(struct bpf_prog *fp);
void bpf_prog_kallsyms_del(struct bpf_prog *fp);
-void bpf_get_prog_name(const struct bpf_prog *prog, char *sym);
#else /* CONFIG_BPF_JIT */
@@ -1132,11 +1151,6 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
{
}
-static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
-{
- sym[0] = '\0';
-}
-
#endif /* CONFIG_BPF_JIT */
void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index b0b743563f43..75884563059f 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -149,8 +149,6 @@ struct ptp_qoriq {
bool extts_fifo_support;
int irq;
int phc_index;
- u64 alarm_interval; /* for periodic alarm */
- u64 alarm_value;
u32 tclk_period; /* nanoseconds */
u32 tmr_prsc;
u32 tmr_add;
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 73c66a3a33ae..16268ef1cbcc 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -620,6 +620,15 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc)
}
/**
+ * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee80211_is_any_nullfunc(__le16 fc)
+{
+ return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc));
+}
+
+/**
* ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
* @fc: frame control field in little-endian byteorder
*/
@@ -2047,13 +2056,13 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
#define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000
#define IEEE80211_HE_OPERATION_6GHZ_OP_INFO 0x00020000
#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000
-#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24
+#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24
#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
/*
* ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
- * @he_oper_ie: byte data of the He Operations IE, stating from the the byte
+ * @he_oper_ie: byte data of the He Operations IE, stating from the byte
* after the ext ID byte. It is assumed that he_oper_ie has at least
* sizeof(struct ieee80211_he_operation) bytes, the caller must have
* validated this.
@@ -2091,7 +2100,7 @@ ieee80211_he_oper_size(const u8 *he_oper_ie)
/*
* ieee80211_he_spr_size - calculate 802.11ax HE Spatial Reuse IE size
- * @he_spr_ie: byte data of the He Spatial Reuse IE, stating from the the byte
+ * @he_spr_ie: byte data of the He Spatial Reuse IE, stating from the byte
* after the ext ID byte. It is assumed that he_spr_ie has at least
* sizeof(struct ieee80211_he_spr) bytes, the caller must have validated
* this
@@ -2523,6 +2532,7 @@ enum ieee80211_eid {
WLAN_EID_FILS_INDICATION = 240,
WLAN_EID_DILS = 241,
WLAN_EID_FRAGMENT = 242,
+ WLAN_EID_RSNX = 244,
WLAN_EID_EXTENSION = 255
};
@@ -2734,7 +2744,7 @@ enum ieee80211_tdls_actioncode {
*/
#define WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT BIT(6)
-/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */
+/* TDLS capabilities in the 4th byte of @WLAN_EID_EXT_CAPABILITY */
#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4)
#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5)
#define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6)
@@ -3034,6 +3044,7 @@ struct ieee80211_multiple_bssid_configuration {
#define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15)
#define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16)
#define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17)
+#define WLAN_AKM_SUITE_OWE SUITE(0x000FAC, 18)
#define WLAN_MAX_KEY_LEN 32
@@ -3412,4 +3423,11 @@ static inline bool for_each_element_completed(const struct element *element,
return (const u8 *)element == (const u8 *)data + datalen;
}
+/**
+ * RSNX Capabilities:
+ * bits 0-3: Field length (n-1)
+ */
+#define WLAN_RSNX_CAPA_PROTECTED_TWT BIT(4)
+#define WLAN_RSNX_CAPA_SAE_H2E BIT(5)
+
#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index c91cf2dee12a..ce9ed1c0602f 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -10,11 +10,9 @@ struct inet_hashinfo;
struct inet_diag_handler {
void (*dump)(struct sk_buff *skb,
struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- struct nlattr *bc);
+ const struct inet_diag_req_v2 *r);
- int (*dump_one)(struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
+ int (*dump_one)(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req);
void (*idiag_get_info)(struct sock *sk,
@@ -35,18 +33,25 @@ struct inet_diag_handler {
__u16 idiag_info_size;
};
+struct bpf_sk_storage_diag;
+struct inet_diag_dump_data {
+ struct nlattr *req_nlas[__INET_DIAG_REQ_MAX];
+#define inet_diag_nla_bc req_nlas[INET_DIAG_REQ_BYTECODE]
+#define inet_diag_nla_bpf_stgs req_nlas[INET_DIAG_REQ_SK_BPF_STORAGES]
+
+ struct bpf_sk_storage_diag *bpf_stg_diag;
+};
+
struct inet_connection_sock;
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
- struct sk_buff *skb, const struct inet_diag_req_v2 *req,
- struct user_namespace *user_ns,
- u32 pid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh, bool net_admin);
+ struct sk_buff *skb, struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *req,
+ u16 nlmsg_flags, bool net_admin);
void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- struct nlattr *bc);
+ const struct inet_diag_req_v2 *r);
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
- struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+ struct netlink_callback *cb,
const struct inet_diag_req_v2 *req);
struct sock *inet_diag_find_one_icsk(struct net *net,
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index 35e15dfd4155..cb20c733b15a 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -14,36 +14,41 @@
#include <linux/io.h>
/**
- * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
- * @op: accessor function (takes @addr as its only argument)
- * @addr: Address to poll
+ * read_poll_timeout - Periodically poll an address until a condition is
+ * met or a timeout occurs
+ * @op: accessor function (takes @args as its arguments)
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
* @sleep_us: Maximum time to sleep between reads in us (0
* tight-loops). Should be less than ~20ms since usleep_range
* is used (see Documentation/timers/timers-howto.rst).
* @timeout_us: Timeout in us, 0 means never timeout
+ * @sleep_before_read: if it is true, sleep @sleep_us before read.
+ * @args: arguments for @op poll
*
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @addr is stored in @val. Must not
+ * case, the last read value at @args is stored in @val. Must not
* be called from atomic context if sleep_us or timeout_us are used.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
*/
-#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
+#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \
+ sleep_before_read, args...) \
({ \
u64 __timeout_us = (timeout_us); \
unsigned long __sleep_us = (sleep_us); \
ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
might_sleep_if((__sleep_us) != 0); \
+ if (sleep_before_read && __sleep_us) \
+ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
for (;;) { \
- (val) = op(addr); \
+ (val) = op(args); \
if (cond) \
break; \
if (__timeout_us && \
ktime_compare(ktime_get(), __timeout) > 0) { \
- (val) = op(addr); \
+ (val) = op(args); \
break; \
} \
if (__sleep_us) \
@@ -53,6 +58,27 @@
})
/**
+ * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
+ * @op: accessor function (takes @addr as its only argument)
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ */
+#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
+ read_poll_timeout(op, val, cond, sleep_us, timeout_us, false, addr)
+
+/**
* readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs
* @op: accessor function (takes @addr as its only argument)
* @addr: Address to poll
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index ea7c7906591e..2cb445a8fc9e 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -74,6 +74,7 @@ struct ipv6_devconf {
__u32 addr_gen_mode;
__s32 disable_policy;
__s32 ndisc_tclass;
+ __s32 rpl_seg_enabled;
struct ctl_table_header *sysctl_header;
};
diff --git a/include/linux/limits.h b/include/linux/limits.h
index 7fc497ee1393..b568b9c30bbf 100644
--- a/include/linux/limits.h
+++ b/include/linux/limits.h
@@ -16,6 +16,7 @@
#define S16_MAX ((s16)(U16_MAX >> 1))
#define S16_MIN ((s16)(-S16_MAX - 1))
#define U32_MAX ((u32)~0U)
+#define U32_MIN ((u32)0)
#define S32_MAX ((s32)(U32_MAX >> 1))
#define S32_MIN ((s32)(-S32_MAX - 1))
#define U64_MAX ((u64)~0ULL)
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
index fe740031339d..c664c27a29a0 100644
--- a/include/linux/linkmode.h
+++ b/include/linux/linkmode.h
@@ -71,7 +71,7 @@ static inline void linkmode_change_bit(int nr, volatile unsigned long *addr)
__change_bit(nr, addr);
}
-static inline int linkmode_test_bit(int nr, volatile unsigned long *addr)
+static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr)
{
return test_bit(nr, addr);
}
@@ -88,4 +88,10 @@ static inline int linkmode_subset(const unsigned long *src1,
return bitmap_subset(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
+void linkmode_resolve_pause(const unsigned long *local_adv,
+ const unsigned long *partner_adv,
+ bool *tx_pause, bool *rx_pause);
+
+void linkmode_set_pause(unsigned long *advertisement, bool tx, bool rx);
+
#endif /* __LINKMODE_H */
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
new file mode 100644
index 000000000000..9cd4455528e5
--- /dev/null
+++ b/include/linux/lsm_hook_defs.h
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Linux Security Module Hook declarations.
+ *
+ * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
+ * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
+ * Copyright (C) 2001 James Morris <jmorris@intercode.com.au>
+ * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group)
+ * Copyright (C) 2015 Intel Corporation.
+ * Copyright (C) 2015 Casey Schaufler <casey@schaufler-ca.com>
+ * Copyright (C) 2016 Mellanox Techonologies
+ * Copyright (C) 2020 Google LLC.
+ */
+
+/*
+ * The macro LSM_HOOK is used to define the data structures required by the
+ * the LSM framework using the pattern:
+ *
+ * LSM_HOOK(<return_type>, <default_value>, <hook_name>, args...)
+ *
+ * struct security_hook_heads {
+ * #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME;
+ * #include <linux/lsm_hook_defs.h>
+ * #undef LSM_HOOK
+ * };
+ */
+LSM_HOOK(int, 0, binder_set_context_mgr, struct task_struct *mgr)
+LSM_HOOK(int, 0, binder_transaction, struct task_struct *from,
+ struct task_struct *to)
+LSM_HOOK(int, 0, binder_transfer_binder, struct task_struct *from,
+ struct task_struct *to)
+LSM_HOOK(int, 0, binder_transfer_file, struct task_struct *from,
+ struct task_struct *to, struct file *file)
+LSM_HOOK(int, 0, ptrace_access_check, struct task_struct *child,
+ unsigned int mode)
+LSM_HOOK(int, 0, ptrace_traceme, struct task_struct *parent)
+LSM_HOOK(int, 0, capget, struct task_struct *target, kernel_cap_t *effective,
+ kernel_cap_t *inheritable, kernel_cap_t *permitted)
+LSM_HOOK(int, 0, capset, struct cred *new, const struct cred *old,
+ const kernel_cap_t *effective, const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted)
+LSM_HOOK(int, 0, capable, const struct cred *cred, struct user_namespace *ns,
+ int cap, unsigned int opts)
+LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, struct super_block *sb)
+LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
+LSM_HOOK(int, 0, syslog, int type)
+LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
+ const struct timezone *tz)
+LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
+LSM_HOOK(int, 0, bprm_set_creds, struct linux_binprm *bprm)
+LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
+LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm)
+LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm)
+LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc,
+ struct fs_context *src_sc)
+LSM_HOOK(int, 0, fs_context_parse_param, struct fs_context *fc,
+ struct fs_parameter *param)
+LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb)
+LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb)
+LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts)
+LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts)
+LSM_HOOK(int, 0, sb_remount, struct super_block *sb, void *mnt_opts)
+LSM_HOOK(int, 0, sb_kern_mount, struct super_block *sb)
+LSM_HOOK(int, 0, sb_show_options, struct seq_file *m, struct super_block *sb)
+LSM_HOOK(int, 0, sb_statfs, struct dentry *dentry)
+LSM_HOOK(int, 0, sb_mount, const char *dev_name, const struct path *path,
+ const char *type, unsigned long flags, void *data)
+LSM_HOOK(int, 0, sb_umount, struct vfsmount *mnt, int flags)
+LSM_HOOK(int, 0, sb_pivotroot, const struct path *old_path,
+ const struct path *new_path)
+LSM_HOOK(int, 0, sb_set_mnt_opts, struct super_block *sb, void *mnt_opts,
+ unsigned long kern_flags, unsigned long *set_kern_flags)
+LSM_HOOK(int, 0, sb_clone_mnt_opts, const struct super_block *oldsb,
+ struct super_block *newsb, unsigned long kern_flags,
+ unsigned long *set_kern_flags)
+LSM_HOOK(int, 0, sb_add_mnt_opt, const char *option, const char *val,
+ int len, void **mnt_opts)
+LSM_HOOK(int, 0, move_mount, const struct path *from_path,
+ const struct path *to_path)
+LSM_HOOK(int, 0, dentry_init_security, struct dentry *dentry,
+ int mode, const struct qstr *name, void **ctx, u32 *ctxlen)
+LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode,
+ struct qstr *name, const struct cred *old, struct cred *new)
+
+#ifdef CONFIG_SECURITY_PATH
+LSM_HOOK(int, 0, path_unlink, const struct path *dir, struct dentry *dentry)
+LSM_HOOK(int, 0, path_mkdir, const struct path *dir, struct dentry *dentry,
+ umode_t mode)
+LSM_HOOK(int, 0, path_rmdir, const struct path *dir, struct dentry *dentry)
+LSM_HOOK(int, 0, path_mknod, const struct path *dir, struct dentry *dentry,
+ umode_t mode, unsigned int dev)
+LSM_HOOK(int, 0, path_truncate, const struct path *path)
+LSM_HOOK(int, 0, path_symlink, const struct path *dir, struct dentry *dentry,
+ const char *old_name)
+LSM_HOOK(int, 0, path_link, struct dentry *old_dentry,
+ const struct path *new_dir, struct dentry *new_dentry)
+LSM_HOOK(int, 0, path_rename, const struct path *old_dir,
+ struct dentry *old_dentry, const struct path *new_dir,
+ struct dentry *new_dentry)
+LSM_HOOK(int, 0, path_chmod, const struct path *path, umode_t mode)
+LSM_HOOK(int, 0, path_chown, const struct path *path, kuid_t uid, kgid_t gid)
+LSM_HOOK(int, 0, path_chroot, const struct path *path)
+#endif /* CONFIG_SECURITY_PATH */
+
+/* Needed for inode based security check */
+LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask,
+ unsigned int obj_type)
+LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode)
+LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode)
+LSM_HOOK(int, 0, inode_init_security, struct inode *inode,
+ struct inode *dir, const struct qstr *qstr, const char **name,
+ void **value, size_t *len)
+LSM_HOOK(int, 0, inode_create, struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+LSM_HOOK(int, 0, inode_link, struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry)
+LSM_HOOK(int, 0, inode_unlink, struct inode *dir, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_symlink, struct inode *dir, struct dentry *dentry,
+ const char *old_name)
+LSM_HOOK(int, 0, inode_mkdir, struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+LSM_HOOK(int, 0, inode_rmdir, struct inode *dir, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_mknod, struct inode *dir, struct dentry *dentry,
+ umode_t mode, dev_t dev)
+LSM_HOOK(int, 0, inode_rename, struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+LSM_HOOK(int, 0, inode_readlink, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_follow_link, struct dentry *dentry, struct inode *inode,
+ bool rcu)
+LSM_HOOK(int, 0, inode_permission, struct inode *inode, int mask)
+LSM_HOOK(int, 0, inode_setattr, struct dentry *dentry, struct iattr *attr)
+LSM_HOOK(int, 0, inode_getattr, const struct path *path)
+LSM_HOOK(int, 0, inode_setxattr, struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_setxattr, struct dentry *dentry,
+ const char *name, const void *value, size_t size, int flags)
+LSM_HOOK(int, 0, inode_getxattr, struct dentry *dentry, const char *name)
+LSM_HOOK(int, 0, inode_listxattr, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_removexattr, struct dentry *dentry, const char *name)
+LSM_HOOK(int, 0, inode_need_killpriv, struct dentry *dentry)
+LSM_HOOK(int, 0, inode_killpriv, struct dentry *dentry)
+LSM_HOOK(int, -EOPNOTSUPP, inode_getsecurity, struct inode *inode,
+ const char *name, void **buffer, bool alloc)
+LSM_HOOK(int, -EOPNOTSUPP, inode_setsecurity, struct inode *inode,
+ const char *name, const void *value, size_t size, int flags)
+LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer,
+ size_t buffer_size)
+LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid)
+LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new)
+LSM_HOOK(int, 0, inode_copy_up_xattr, const char *name)
+LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir,
+ struct kernfs_node *kn)
+LSM_HOOK(int, 0, file_permission, struct file *file, int mask)
+LSM_HOOK(int, 0, file_alloc_security, struct file *file)
+LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file)
+LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd,
+ unsigned long arg)
+LSM_HOOK(int, 0, mmap_addr, unsigned long addr)
+LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
+LSM_HOOK(int, 0, file_mprotect, struct vm_area_struct *vma,
+ unsigned long reqprot, unsigned long prot)
+LSM_HOOK(int, 0, file_lock, struct file *file, unsigned int cmd)
+LSM_HOOK(int, 0, file_fcntl, struct file *file, unsigned int cmd,
+ unsigned long arg)
+LSM_HOOK(void, LSM_RET_VOID, file_set_fowner, struct file *file)
+LSM_HOOK(int, 0, file_send_sigiotask, struct task_struct *tsk,
+ struct fown_struct *fown, int sig)
+LSM_HOOK(int, 0, file_receive, struct file *file)
+LSM_HOOK(int, 0, file_open, struct file *file)
+LSM_HOOK(int, 0, task_alloc, struct task_struct *task,
+ unsigned long clone_flags)
+LSM_HOOK(void, LSM_RET_VOID, task_free, struct task_struct *task)
+LSM_HOOK(int, 0, cred_alloc_blank, struct cred *cred, gfp_t gfp)
+LSM_HOOK(void, LSM_RET_VOID, cred_free, struct cred *cred)
+LSM_HOOK(int, 0, cred_prepare, struct cred *new, const struct cred *old,
+ gfp_t gfp)
+LSM_HOOK(void, LSM_RET_VOID, cred_transfer, struct cred *new,
+ const struct cred *old)
+LSM_HOOK(void, LSM_RET_VOID, cred_getsecid, const struct cred *c, u32 *secid)
+LSM_HOOK(int, 0, kernel_act_as, struct cred *new, u32 secid)
+LSM_HOOK(int, 0, kernel_create_files_as, struct cred *new, struct inode *inode)
+LSM_HOOK(int, 0, kernel_module_request, char *kmod_name)
+LSM_HOOK(int, 0, kernel_load_data, enum kernel_load_data_id id)
+LSM_HOOK(int, 0, kernel_read_file, struct file *file,
+ enum kernel_read_file_id id)
+LSM_HOOK(int, 0, kernel_post_read_file, struct file *file, char *buf,
+ loff_t size, enum kernel_read_file_id id)
+LSM_HOOK(int, 0, task_fix_setuid, struct cred *new, const struct cred *old,
+ int flags)
+LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid)
+LSM_HOOK(int, 0, task_getpgid, struct task_struct *p)
+LSM_HOOK(int, 0, task_getsid, struct task_struct *p)
+LSM_HOOK(void, LSM_RET_VOID, task_getsecid, struct task_struct *p, u32 *secid)
+LSM_HOOK(int, 0, task_setnice, struct task_struct *p, int nice)
+LSM_HOOK(int, 0, task_setioprio, struct task_struct *p, int ioprio)
+LSM_HOOK(int, 0, task_getioprio, struct task_struct *p)
+LSM_HOOK(int, 0, task_prlimit, const struct cred *cred,
+ const struct cred *tcred, unsigned int flags)
+LSM_HOOK(int, 0, task_setrlimit, struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim)
+LSM_HOOK(int, 0, task_setscheduler, struct task_struct *p)
+LSM_HOOK(int, 0, task_getscheduler, struct task_struct *p)
+LSM_HOOK(int, 0, task_movememory, struct task_struct *p)
+LSM_HOOK(int, 0, task_kill, struct task_struct *p, struct kernel_siginfo *info,
+ int sig, const struct cred *cred)
+LSM_HOOK(int, -ENOSYS, task_prctl, int option, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5)
+LSM_HOOK(void, LSM_RET_VOID, task_to_inode, struct task_struct *p,
+ struct inode *inode)
+LSM_HOOK(int, 0, ipc_permission, struct kern_ipc_perm *ipcp, short flag)
+LSM_HOOK(void, LSM_RET_VOID, ipc_getsecid, struct kern_ipc_perm *ipcp,
+ u32 *secid)
+LSM_HOOK(int, 0, msg_msg_alloc_security, struct msg_msg *msg)
+LSM_HOOK(void, LSM_RET_VOID, msg_msg_free_security, struct msg_msg *msg)
+LSM_HOOK(int, 0, msg_queue_alloc_security, struct kern_ipc_perm *perm)
+LSM_HOOK(void, LSM_RET_VOID, msg_queue_free_security,
+ struct kern_ipc_perm *perm)
+LSM_HOOK(int, 0, msg_queue_associate, struct kern_ipc_perm *perm, int msqflg)
+LSM_HOOK(int, 0, msg_queue_msgctl, struct kern_ipc_perm *perm, int cmd)
+LSM_HOOK(int, 0, msg_queue_msgsnd, struct kern_ipc_perm *perm,
+ struct msg_msg *msg, int msqflg)
+LSM_HOOK(int, 0, msg_queue_msgrcv, struct kern_ipc_perm *perm,
+ struct msg_msg *msg, struct task_struct *target, long type, int mode)
+LSM_HOOK(int, 0, shm_alloc_security, struct kern_ipc_perm *perm)
+LSM_HOOK(void, LSM_RET_VOID, shm_free_security, struct kern_ipc_perm *perm)
+LSM_HOOK(int, 0, shm_associate, struct kern_ipc_perm *perm, int shmflg)
+LSM_HOOK(int, 0, shm_shmctl, struct kern_ipc_perm *perm, int cmd)
+LSM_HOOK(int, 0, shm_shmat, struct kern_ipc_perm *perm, char __user *shmaddr,
+ int shmflg)
+LSM_HOOK(int, 0, sem_alloc_security, struct kern_ipc_perm *perm)
+LSM_HOOK(void, LSM_RET_VOID, sem_free_security, struct kern_ipc_perm *perm)
+LSM_HOOK(int, 0, sem_associate, struct kern_ipc_perm *perm, int semflg)
+LSM_HOOK(int, 0, sem_semctl, struct kern_ipc_perm *perm, int cmd)
+LSM_HOOK(int, 0, sem_semop, struct kern_ipc_perm *perm, struct sembuf *sops,
+ unsigned nsops, int alter)
+LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb)
+LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry,
+ struct inode *inode)
+LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, char *name,
+ char **value)
+LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size)
+LSM_HOOK(int, 0, ismaclabel, const char *name)
+LSM_HOOK(int, 0, secid_to_secctx, u32 secid, char **secdata,
+ u32 *seclen)
+LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
+LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
+LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen)
+LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
+LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx,
+ u32 *ctxlen)
+
+#ifdef CONFIG_SECURITY_NETWORK
+LSM_HOOK(int, 0, unix_stream_connect, struct sock *sock, struct sock *other,
+ struct sock *newsk)
+LSM_HOOK(int, 0, unix_may_send, struct socket *sock, struct socket *other)
+LSM_HOOK(int, 0, socket_create, int family, int type, int protocol, int kern)
+LSM_HOOK(int, 0, socket_post_create, struct socket *sock, int family, int type,
+ int protocol, int kern)
+LSM_HOOK(int, 0, socket_socketpair, struct socket *socka, struct socket *sockb)
+LSM_HOOK(int, 0, socket_bind, struct socket *sock, struct sockaddr *address,
+ int addrlen)
+LSM_HOOK(int, 0, socket_connect, struct socket *sock, struct sockaddr *address,
+ int addrlen)
+LSM_HOOK(int, 0, socket_listen, struct socket *sock, int backlog)
+LSM_HOOK(int, 0, socket_accept, struct socket *sock, struct socket *newsock)
+LSM_HOOK(int, 0, socket_sendmsg, struct socket *sock, struct msghdr *msg,
+ int size)
+LSM_HOOK(int, 0, socket_recvmsg, struct socket *sock, struct msghdr *msg,
+ int size, int flags)
+LSM_HOOK(int, 0, socket_getsockname, struct socket *sock)
+LSM_HOOK(int, 0, socket_getpeername, struct socket *sock)
+LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname)
+LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname)
+LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how)
+LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb)
+LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock,
+ char __user *optval, int __user *optlen, unsigned len)
+LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock,
+ struct sk_buff *skb, u32 *secid)
+LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority)
+LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk)
+LSM_HOOK(void, LSM_RET_VOID, sk_clone_security, const struct sock *sk,
+ struct sock *newsk)
+LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, struct sock *sk, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, sock_graft, struct sock *sk, struct socket *parent)
+LSM_HOOK(int, 0, inet_conn_request, struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req)
+LSM_HOOK(void, LSM_RET_VOID, inet_csk_clone, struct sock *newsk,
+ const struct request_sock *req)
+LSM_HOOK(void, LSM_RET_VOID, inet_conn_established, struct sock *sk,
+ struct sk_buff *skb)
+LSM_HOOK(int, 0, secmark_relabel_packet, u32 secid)
+LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void)
+LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void)
+LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req,
+ struct flowi *fl)
+LSM_HOOK(int, 0, tun_dev_alloc_security, void **security)
+LSM_HOOK(void, LSM_RET_VOID, tun_dev_free_security, void *security)
+LSM_HOOK(int, 0, tun_dev_create, void)
+LSM_HOOK(int, 0, tun_dev_attach_queue, void *security)
+LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security)
+LSM_HOOK(int, 0, tun_dev_open, void *security)
+LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_endpoint *ep,
+ struct sk_buff *skb)
+LSM_HOOK(int, 0, sctp_bind_connect, struct sock *sk, int optname,
+ struct sockaddr *address, int addrlen)
+LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_endpoint *ep,
+ struct sock *sk, struct sock *newsk)
+#endif /* CONFIG_SECURITY_NETWORK */
+
+#ifdef CONFIG_SECURITY_INFINIBAND
+LSM_HOOK(int, 0, ib_pkey_access, void *sec, u64 subnet_prefix, u16 pkey)
+LSM_HOOK(int, 0, ib_endport_manage_subnet, void *sec, const char *dev_name,
+ u8 port_num)
+LSM_HOOK(int, 0, ib_alloc_security, void **sec)
+LSM_HOOK(void, LSM_RET_VOID, ib_free_security, void *sec)
+#endif /* CONFIG_SECURITY_INFINIBAND */
+
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+LSM_HOOK(int, 0, xfrm_policy_alloc_security, struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp)
+LSM_HOOK(int, 0, xfrm_policy_clone_security, struct xfrm_sec_ctx *old_ctx,
+ struct xfrm_sec_ctx **new_ctx)
+LSM_HOOK(void, LSM_RET_VOID, xfrm_policy_free_security,
+ struct xfrm_sec_ctx *ctx)
+LSM_HOOK(int, 0, xfrm_policy_delete_security, struct xfrm_sec_ctx *ctx)
+LSM_HOOK(int, 0, xfrm_state_alloc, struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx)
+LSM_HOOK(int, 0, xfrm_state_alloc_acquire, struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid)
+LSM_HOOK(void, LSM_RET_VOID, xfrm_state_free_security, struct xfrm_state *x)
+LSM_HOOK(int, 0, xfrm_state_delete_security, struct xfrm_state *x)
+LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid,
+ u8 dir)
+LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x,
+ struct xfrm_policy *xp, const struct flowi *fl)
+LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid,
+ int ckall)
+#endif /* CONFIG_SECURITY_NETWORK_XFRM */
+
+/* key management security hooks */
+#ifdef CONFIG_KEYS
+LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred,
+ unsigned long flags)
+LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key)
+LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred,
+ unsigned perm)
+LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **_buffer)
+#endif /* CONFIG_KEYS */
+
+#ifdef CONFIG_AUDIT
+LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr,
+ void **lsmrule)
+LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule)
+LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule)
+LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
+#endif /* CONFIG_AUDIT */
+
+#ifdef CONFIG_BPF_SYSCALL
+LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size)
+LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode)
+LSM_HOOK(int, 0, bpf_prog, struct bpf_prog *prog)
+LSM_HOOK(int, 0, bpf_map_alloc_security, struct bpf_map *map)
+LSM_HOOK(void, LSM_RET_VOID, bpf_map_free_security, struct bpf_map *map)
+LSM_HOOK(int, 0, bpf_prog_alloc_security, struct bpf_prog_aux *aux)
+LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free_security, struct bpf_prog_aux *aux)
+#endif /* CONFIG_BPF_SYSCALL */
+
+LSM_HOOK(int, 0, locked_down, enum lockdown_reason what)
+
+#ifdef CONFIG_PERF_EVENTS
+LSM_HOOK(int, 0, perf_event_open, struct perf_event_attr *attr, int type)
+LSM_HOOK(int, 0, perf_event_alloc, struct perf_event *event)
+LSM_HOOK(void, LSM_RET_VOID, perf_event_free, struct perf_event *event)
+LSM_HOOK(int, 0, perf_event_read, struct perf_event *event)
+LSM_HOOK(int, 0, perf_event_write, struct perf_event *event)
+#endif /* CONFIG_PERF_EVENTS */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 28d435049f24..988ca0df7824 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1490,625 +1490,15 @@
* Write perf_event security info if allowed.
*/
union security_list_options {
- int (*binder_set_context_mgr)(struct task_struct *mgr);
- int (*binder_transaction)(struct task_struct *from,
- struct task_struct *to);
- int (*binder_transfer_binder)(struct task_struct *from,
- struct task_struct *to);
- int (*binder_transfer_file)(struct task_struct *from,
- struct task_struct *to,
- struct file *file);
-
- int (*ptrace_access_check)(struct task_struct *child,
- unsigned int mode);
- int (*ptrace_traceme)(struct task_struct *parent);
- int (*capget)(struct task_struct *target, kernel_cap_t *effective,
- kernel_cap_t *inheritable, kernel_cap_t *permitted);
- int (*capset)(struct cred *new, const struct cred *old,
- const kernel_cap_t *effective,
- const kernel_cap_t *inheritable,
- const kernel_cap_t *permitted);
- int (*capable)(const struct cred *cred,
- struct user_namespace *ns,
- int cap,
- unsigned int opts);
- int (*quotactl)(int cmds, int type, int id, struct super_block *sb);
- int (*quota_on)(struct dentry *dentry);
- int (*syslog)(int type);
- int (*settime)(const struct timespec64 *ts, const struct timezone *tz);
- int (*vm_enough_memory)(struct mm_struct *mm, long pages);
-
- int (*bprm_set_creds)(struct linux_binprm *bprm);
- int (*bprm_check_security)(struct linux_binprm *bprm);
- void (*bprm_committing_creds)(struct linux_binprm *bprm);
- void (*bprm_committed_creds)(struct linux_binprm *bprm);
-
- int (*fs_context_dup)(struct fs_context *fc, struct fs_context *src_sc);
- int (*fs_context_parse_param)(struct fs_context *fc, struct fs_parameter *param);
-
- int (*sb_alloc_security)(struct super_block *sb);
- void (*sb_free_security)(struct super_block *sb);
- void (*sb_free_mnt_opts)(void *mnt_opts);
- int (*sb_eat_lsm_opts)(char *orig, void **mnt_opts);
- int (*sb_remount)(struct super_block *sb, void *mnt_opts);
- int (*sb_kern_mount)(struct super_block *sb);
- int (*sb_show_options)(struct seq_file *m, struct super_block *sb);
- int (*sb_statfs)(struct dentry *dentry);
- int (*sb_mount)(const char *dev_name, const struct path *path,
- const char *type, unsigned long flags, void *data);
- int (*sb_umount)(struct vfsmount *mnt, int flags);
- int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path);
- int (*sb_set_mnt_opts)(struct super_block *sb,
- void *mnt_opts,
- unsigned long kern_flags,
- unsigned long *set_kern_flags);
- int (*sb_clone_mnt_opts)(const struct super_block *oldsb,
- struct super_block *newsb,
- unsigned long kern_flags,
- unsigned long *set_kern_flags);
- int (*sb_add_mnt_opt)(const char *option, const char *val, int len,
- void **mnt_opts);
- int (*move_mount)(const struct path *from_path, const struct path *to_path);
- int (*dentry_init_security)(struct dentry *dentry, int mode,
- const struct qstr *name, void **ctx,
- u32 *ctxlen);
- int (*dentry_create_files_as)(struct dentry *dentry, int mode,
- struct qstr *name,
- const struct cred *old,
- struct cred *new);
-
-
-#ifdef CONFIG_SECURITY_PATH
- int (*path_unlink)(const struct path *dir, struct dentry *dentry);
- int (*path_mkdir)(const struct path *dir, struct dentry *dentry,
- umode_t mode);
- int (*path_rmdir)(const struct path *dir, struct dentry *dentry);
- int (*path_mknod)(const struct path *dir, struct dentry *dentry,
- umode_t mode, unsigned int dev);
- int (*path_truncate)(const struct path *path);
- int (*path_symlink)(const struct path *dir, struct dentry *dentry,
- const char *old_name);
- int (*path_link)(struct dentry *old_dentry, const struct path *new_dir,
- struct dentry *new_dentry);
- int (*path_rename)(const struct path *old_dir, struct dentry *old_dentry,
- const struct path *new_dir,
- struct dentry *new_dentry);
- int (*path_chmod)(const struct path *path, umode_t mode);
- int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid);
- int (*path_chroot)(const struct path *path);
-#endif
- /* Needed for inode based security check */
- int (*path_notify)(const struct path *path, u64 mask,
- unsigned int obj_type);
- int (*inode_alloc_security)(struct inode *inode);
- void (*inode_free_security)(struct inode *inode);
- int (*inode_init_security)(struct inode *inode, struct inode *dir,
- const struct qstr *qstr,
- const char **name, void **value,
- size_t *len);
- int (*inode_create)(struct inode *dir, struct dentry *dentry,
- umode_t mode);
- int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
- struct dentry *new_dentry);
- int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
- int (*inode_symlink)(struct inode *dir, struct dentry *dentry,
- const char *old_name);
- int (*inode_mkdir)(struct inode *dir, struct dentry *dentry,
- umode_t mode);
- int (*inode_rmdir)(struct inode *dir, struct dentry *dentry);
- int (*inode_mknod)(struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t dev);
- int (*inode_rename)(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir,
- struct dentry *new_dentry);
- int (*inode_readlink)(struct dentry *dentry);
- int (*inode_follow_link)(struct dentry *dentry, struct inode *inode,
- bool rcu);
- int (*inode_permission)(struct inode *inode, int mask);
- int (*inode_setattr)(struct dentry *dentry, struct iattr *attr);
- int (*inode_getattr)(const struct path *path);
- int (*inode_setxattr)(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
- void (*inode_post_setxattr)(struct dentry *dentry, const char *name,
- const void *value, size_t size,
- int flags);
- int (*inode_getxattr)(struct dentry *dentry, const char *name);
- int (*inode_listxattr)(struct dentry *dentry);
- int (*inode_removexattr)(struct dentry *dentry, const char *name);
- int (*inode_need_killpriv)(struct dentry *dentry);
- int (*inode_killpriv)(struct dentry *dentry);
- int (*inode_getsecurity)(struct inode *inode, const char *name,
- void **buffer, bool alloc);
- int (*inode_setsecurity)(struct inode *inode, const char *name,
- const void *value, size_t size,
- int flags);
- int (*inode_listsecurity)(struct inode *inode, char *buffer,
- size_t buffer_size);
- void (*inode_getsecid)(struct inode *inode, u32 *secid);
- int (*inode_copy_up)(struct dentry *src, struct cred **new);
- int (*inode_copy_up_xattr)(const char *name);
-
- int (*kernfs_init_security)(struct kernfs_node *kn_dir,
- struct kernfs_node *kn);
-
- int (*file_permission)(struct file *file, int mask);
- int (*file_alloc_security)(struct file *file);
- void (*file_free_security)(struct file *file);
- int (*file_ioctl)(struct file *file, unsigned int cmd,
- unsigned long arg);
- int (*mmap_addr)(unsigned long addr);
- int (*mmap_file)(struct file *file, unsigned long reqprot,
- unsigned long prot, unsigned long flags);
- int (*file_mprotect)(struct vm_area_struct *vma, unsigned long reqprot,
- unsigned long prot);
- int (*file_lock)(struct file *file, unsigned int cmd);
- int (*file_fcntl)(struct file *file, unsigned int cmd,
- unsigned long arg);
- void (*file_set_fowner)(struct file *file);
- int (*file_send_sigiotask)(struct task_struct *tsk,
- struct fown_struct *fown, int sig);
- int (*file_receive)(struct file *file);
- int (*file_open)(struct file *file);
-
- int (*task_alloc)(struct task_struct *task, unsigned long clone_flags);
- void (*task_free)(struct task_struct *task);
- int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp);
- void (*cred_free)(struct cred *cred);
- int (*cred_prepare)(struct cred *new, const struct cred *old,
- gfp_t gfp);
- void (*cred_transfer)(struct cred *new, const struct cred *old);
- void (*cred_getsecid)(const struct cred *c, u32 *secid);
- int (*kernel_act_as)(struct cred *new, u32 secid);
- int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
- int (*kernel_module_request)(char *kmod_name);
- int (*kernel_load_data)(enum kernel_load_data_id id);
- int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id);
- int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size,
- enum kernel_read_file_id id);
- int (*task_fix_setuid)(struct cred *new, const struct cred *old,
- int flags);
- int (*task_setpgid)(struct task_struct *p, pid_t pgid);
- int (*task_getpgid)(struct task_struct *p);
- int (*task_getsid)(struct task_struct *p);
- void (*task_getsecid)(struct task_struct *p, u32 *secid);
- int (*task_setnice)(struct task_struct *p, int nice);
- int (*task_setioprio)(struct task_struct *p, int ioprio);
- int (*task_getioprio)(struct task_struct *p);
- int (*task_prlimit)(const struct cred *cred, const struct cred *tcred,
- unsigned int flags);
- int (*task_setrlimit)(struct task_struct *p, unsigned int resource,
- struct rlimit *new_rlim);
- int (*task_setscheduler)(struct task_struct *p);
- int (*task_getscheduler)(struct task_struct *p);
- int (*task_movememory)(struct task_struct *p);
- int (*task_kill)(struct task_struct *p, struct kernel_siginfo *info,
- int sig, const struct cred *cred);
- int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5);
- void (*task_to_inode)(struct task_struct *p, struct inode *inode);
-
- int (*ipc_permission)(struct kern_ipc_perm *ipcp, short flag);
- void (*ipc_getsecid)(struct kern_ipc_perm *ipcp, u32 *secid);
-
- int (*msg_msg_alloc_security)(struct msg_msg *msg);
- void (*msg_msg_free_security)(struct msg_msg *msg);
-
- int (*msg_queue_alloc_security)(struct kern_ipc_perm *perm);
- void (*msg_queue_free_security)(struct kern_ipc_perm *perm);
- int (*msg_queue_associate)(struct kern_ipc_perm *perm, int msqflg);
- int (*msg_queue_msgctl)(struct kern_ipc_perm *perm, int cmd);
- int (*msg_queue_msgsnd)(struct kern_ipc_perm *perm, struct msg_msg *msg,
- int msqflg);
- int (*msg_queue_msgrcv)(struct kern_ipc_perm *perm, struct msg_msg *msg,
- struct task_struct *target, long type,
- int mode);
-
- int (*shm_alloc_security)(struct kern_ipc_perm *perm);
- void (*shm_free_security)(struct kern_ipc_perm *perm);
- int (*shm_associate)(struct kern_ipc_perm *perm, int shmflg);
- int (*shm_shmctl)(struct kern_ipc_perm *perm, int cmd);
- int (*shm_shmat)(struct kern_ipc_perm *perm, char __user *shmaddr,
- int shmflg);
-
- int (*sem_alloc_security)(struct kern_ipc_perm *perm);
- void (*sem_free_security)(struct kern_ipc_perm *perm);
- int (*sem_associate)(struct kern_ipc_perm *perm, int semflg);
- int (*sem_semctl)(struct kern_ipc_perm *perm, int cmd);
- int (*sem_semop)(struct kern_ipc_perm *perm, struct sembuf *sops,
- unsigned nsops, int alter);
-
- int (*netlink_send)(struct sock *sk, struct sk_buff *skb);
-
- void (*d_instantiate)(struct dentry *dentry, struct inode *inode);
-
- int (*getprocattr)(struct task_struct *p, char *name, char **value);
- int (*setprocattr)(const char *name, void *value, size_t size);
- int (*ismaclabel)(const char *name);
- int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen);
- int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid);
- void (*release_secctx)(char *secdata, u32 seclen);
-
- void (*inode_invalidate_secctx)(struct inode *inode);
- int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen);
- int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen);
- int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
-
-#ifdef CONFIG_SECURITY_NETWORK
- int (*unix_stream_connect)(struct sock *sock, struct sock *other,
- struct sock *newsk);
- int (*unix_may_send)(struct socket *sock, struct socket *other);
-
- int (*socket_create)(int family, int type, int protocol, int kern);
- int (*socket_post_create)(struct socket *sock, int family, int type,
- int protocol, int kern);
- int (*socket_socketpair)(struct socket *socka, struct socket *sockb);
- int (*socket_bind)(struct socket *sock, struct sockaddr *address,
- int addrlen);
- int (*socket_connect)(struct socket *sock, struct sockaddr *address,
- int addrlen);
- int (*socket_listen)(struct socket *sock, int backlog);
- int (*socket_accept)(struct socket *sock, struct socket *newsock);
- int (*socket_sendmsg)(struct socket *sock, struct msghdr *msg,
- int size);
- int (*socket_recvmsg)(struct socket *sock, struct msghdr *msg,
- int size, int flags);
- int (*socket_getsockname)(struct socket *sock);
- int (*socket_getpeername)(struct socket *sock);
- int (*socket_getsockopt)(struct socket *sock, int level, int optname);
- int (*socket_setsockopt)(struct socket *sock, int level, int optname);
- int (*socket_shutdown)(struct socket *sock, int how);
- int (*socket_sock_rcv_skb)(struct sock *sk, struct sk_buff *skb);
- int (*socket_getpeersec_stream)(struct socket *sock,
- char __user *optval,
- int __user *optlen, unsigned len);
- int (*socket_getpeersec_dgram)(struct socket *sock,
- struct sk_buff *skb, u32 *secid);
- int (*sk_alloc_security)(struct sock *sk, int family, gfp_t priority);
- void (*sk_free_security)(struct sock *sk);
- void (*sk_clone_security)(const struct sock *sk, struct sock *newsk);
- void (*sk_getsecid)(struct sock *sk, u32 *secid);
- void (*sock_graft)(struct sock *sk, struct socket *parent);
- int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req);
- void (*inet_csk_clone)(struct sock *newsk,
- const struct request_sock *req);
- void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb);
- int (*secmark_relabel_packet)(u32 secid);
- void (*secmark_refcount_inc)(void);
- void (*secmark_refcount_dec)(void);
- void (*req_classify_flow)(const struct request_sock *req,
- struct flowi *fl);
- int (*tun_dev_alloc_security)(void **security);
- void (*tun_dev_free_security)(void *security);
- int (*tun_dev_create)(void);
- int (*tun_dev_attach_queue)(void *security);
- int (*tun_dev_attach)(struct sock *sk, void *security);
- int (*tun_dev_open)(void *security);
- int (*sctp_assoc_request)(struct sctp_endpoint *ep,
- struct sk_buff *skb);
- int (*sctp_bind_connect)(struct sock *sk, int optname,
- struct sockaddr *address, int addrlen);
- void (*sctp_sk_clone)(struct sctp_endpoint *ep, struct sock *sk,
- struct sock *newsk);
-#endif /* CONFIG_SECURITY_NETWORK */
-
-#ifdef CONFIG_SECURITY_INFINIBAND
- int (*ib_pkey_access)(void *sec, u64 subnet_prefix, u16 pkey);
- int (*ib_endport_manage_subnet)(void *sec, const char *dev_name,
- u8 port_num);
- int (*ib_alloc_security)(void **sec);
- void (*ib_free_security)(void *sec);
-#endif /* CONFIG_SECURITY_INFINIBAND */
-
-#ifdef CONFIG_SECURITY_NETWORK_XFRM
- int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *sec_ctx,
- gfp_t gfp);
- int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *old_ctx,
- struct xfrm_sec_ctx **new_ctx);
- void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *ctx);
- int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *ctx);
- int (*xfrm_state_alloc)(struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx);
- int (*xfrm_state_alloc_acquire)(struct xfrm_state *x,
- struct xfrm_sec_ctx *polsec,
- u32 secid);
- void (*xfrm_state_free_security)(struct xfrm_state *x);
- int (*xfrm_state_delete_security)(struct xfrm_state *x);
- int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid,
- u8 dir);
- int (*xfrm_state_pol_flow_match)(struct xfrm_state *x,
- struct xfrm_policy *xp,
- const struct flowi *fl);
- int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall);
-#endif /* CONFIG_SECURITY_NETWORK_XFRM */
-
- /* key management security hooks */
-#ifdef CONFIG_KEYS
- int (*key_alloc)(struct key *key, const struct cred *cred,
- unsigned long flags);
- void (*key_free)(struct key *key);
- int (*key_permission)(key_ref_t key_ref, const struct cred *cred,
- unsigned perm);
- int (*key_getsecurity)(struct key *key, char **_buffer);
-#endif /* CONFIG_KEYS */
-
-#ifdef CONFIG_AUDIT
- int (*audit_rule_init)(u32 field, u32 op, char *rulestr,
- void **lsmrule);
- int (*audit_rule_known)(struct audit_krule *krule);
- int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule);
- void (*audit_rule_free)(void *lsmrule);
-#endif /* CONFIG_AUDIT */
-
-#ifdef CONFIG_BPF_SYSCALL
- int (*bpf)(int cmd, union bpf_attr *attr,
- unsigned int size);
- int (*bpf_map)(struct bpf_map *map, fmode_t fmode);
- int (*bpf_prog)(struct bpf_prog *prog);
- int (*bpf_map_alloc_security)(struct bpf_map *map);
- void (*bpf_map_free_security)(struct bpf_map *map);
- int (*bpf_prog_alloc_security)(struct bpf_prog_aux *aux);
- void (*bpf_prog_free_security)(struct bpf_prog_aux *aux);
-#endif /* CONFIG_BPF_SYSCALL */
- int (*locked_down)(enum lockdown_reason what);
-#ifdef CONFIG_PERF_EVENTS
- int (*perf_event_open)(struct perf_event_attr *attr, int type);
- int (*perf_event_alloc)(struct perf_event *event);
- void (*perf_event_free)(struct perf_event *event);
- int (*perf_event_read)(struct perf_event *event);
- int (*perf_event_write)(struct perf_event *event);
-
-#endif
+ #define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__);
+ #include "lsm_hook_defs.h"
+ #undef LSM_HOOK
};
struct security_hook_heads {
- struct hlist_head binder_set_context_mgr;
- struct hlist_head binder_transaction;
- struct hlist_head binder_transfer_binder;
- struct hlist_head binder_transfer_file;
- struct hlist_head ptrace_access_check;
- struct hlist_head ptrace_traceme;
- struct hlist_head capget;
- struct hlist_head capset;
- struct hlist_head capable;
- struct hlist_head quotactl;
- struct hlist_head quota_on;
- struct hlist_head syslog;
- struct hlist_head settime;
- struct hlist_head vm_enough_memory;
- struct hlist_head bprm_set_creds;
- struct hlist_head bprm_check_security;
- struct hlist_head bprm_committing_creds;
- struct hlist_head bprm_committed_creds;
- struct hlist_head fs_context_dup;
- struct hlist_head fs_context_parse_param;
- struct hlist_head sb_alloc_security;
- struct hlist_head sb_free_security;
- struct hlist_head sb_free_mnt_opts;
- struct hlist_head sb_eat_lsm_opts;
- struct hlist_head sb_remount;
- struct hlist_head sb_kern_mount;
- struct hlist_head sb_show_options;
- struct hlist_head sb_statfs;
- struct hlist_head sb_mount;
- struct hlist_head sb_umount;
- struct hlist_head sb_pivotroot;
- struct hlist_head sb_set_mnt_opts;
- struct hlist_head sb_clone_mnt_opts;
- struct hlist_head sb_add_mnt_opt;
- struct hlist_head move_mount;
- struct hlist_head dentry_init_security;
- struct hlist_head dentry_create_files_as;
-#ifdef CONFIG_SECURITY_PATH
- struct hlist_head path_unlink;
- struct hlist_head path_mkdir;
- struct hlist_head path_rmdir;
- struct hlist_head path_mknod;
- struct hlist_head path_truncate;
- struct hlist_head path_symlink;
- struct hlist_head path_link;
- struct hlist_head path_rename;
- struct hlist_head path_chmod;
- struct hlist_head path_chown;
- struct hlist_head path_chroot;
-#endif
- /* Needed for inode based modules as well */
- struct hlist_head path_notify;
- struct hlist_head inode_alloc_security;
- struct hlist_head inode_free_security;
- struct hlist_head inode_init_security;
- struct hlist_head inode_create;
- struct hlist_head inode_link;
- struct hlist_head inode_unlink;
- struct hlist_head inode_symlink;
- struct hlist_head inode_mkdir;
- struct hlist_head inode_rmdir;
- struct hlist_head inode_mknod;
- struct hlist_head inode_rename;
- struct hlist_head inode_readlink;
- struct hlist_head inode_follow_link;
- struct hlist_head inode_permission;
- struct hlist_head inode_setattr;
- struct hlist_head inode_getattr;
- struct hlist_head inode_setxattr;
- struct hlist_head inode_post_setxattr;
- struct hlist_head inode_getxattr;
- struct hlist_head inode_listxattr;
- struct hlist_head inode_removexattr;
- struct hlist_head inode_need_killpriv;
- struct hlist_head inode_killpriv;
- struct hlist_head inode_getsecurity;
- struct hlist_head inode_setsecurity;
- struct hlist_head inode_listsecurity;
- struct hlist_head inode_getsecid;
- struct hlist_head inode_copy_up;
- struct hlist_head inode_copy_up_xattr;
- struct hlist_head kernfs_init_security;
- struct hlist_head file_permission;
- struct hlist_head file_alloc_security;
- struct hlist_head file_free_security;
- struct hlist_head file_ioctl;
- struct hlist_head mmap_addr;
- struct hlist_head mmap_file;
- struct hlist_head file_mprotect;
- struct hlist_head file_lock;
- struct hlist_head file_fcntl;
- struct hlist_head file_set_fowner;
- struct hlist_head file_send_sigiotask;
- struct hlist_head file_receive;
- struct hlist_head file_open;
- struct hlist_head task_alloc;
- struct hlist_head task_free;
- struct hlist_head cred_alloc_blank;
- struct hlist_head cred_free;
- struct hlist_head cred_prepare;
- struct hlist_head cred_transfer;
- struct hlist_head cred_getsecid;
- struct hlist_head kernel_act_as;
- struct hlist_head kernel_create_files_as;
- struct hlist_head kernel_load_data;
- struct hlist_head kernel_read_file;
- struct hlist_head kernel_post_read_file;
- struct hlist_head kernel_module_request;
- struct hlist_head task_fix_setuid;
- struct hlist_head task_setpgid;
- struct hlist_head task_getpgid;
- struct hlist_head task_getsid;
- struct hlist_head task_getsecid;
- struct hlist_head task_setnice;
- struct hlist_head task_setioprio;
- struct hlist_head task_getioprio;
- struct hlist_head task_prlimit;
- struct hlist_head task_setrlimit;
- struct hlist_head task_setscheduler;
- struct hlist_head task_getscheduler;
- struct hlist_head task_movememory;
- struct hlist_head task_kill;
- struct hlist_head task_prctl;
- struct hlist_head task_to_inode;
- struct hlist_head ipc_permission;
- struct hlist_head ipc_getsecid;
- struct hlist_head msg_msg_alloc_security;
- struct hlist_head msg_msg_free_security;
- struct hlist_head msg_queue_alloc_security;
- struct hlist_head msg_queue_free_security;
- struct hlist_head msg_queue_associate;
- struct hlist_head msg_queue_msgctl;
- struct hlist_head msg_queue_msgsnd;
- struct hlist_head msg_queue_msgrcv;
- struct hlist_head shm_alloc_security;
- struct hlist_head shm_free_security;
- struct hlist_head shm_associate;
- struct hlist_head shm_shmctl;
- struct hlist_head shm_shmat;
- struct hlist_head sem_alloc_security;
- struct hlist_head sem_free_security;
- struct hlist_head sem_associate;
- struct hlist_head sem_semctl;
- struct hlist_head sem_semop;
- struct hlist_head netlink_send;
- struct hlist_head d_instantiate;
- struct hlist_head getprocattr;
- struct hlist_head setprocattr;
- struct hlist_head ismaclabel;
- struct hlist_head secid_to_secctx;
- struct hlist_head secctx_to_secid;
- struct hlist_head release_secctx;
- struct hlist_head inode_invalidate_secctx;
- struct hlist_head inode_notifysecctx;
- struct hlist_head inode_setsecctx;
- struct hlist_head inode_getsecctx;
-#ifdef CONFIG_SECURITY_NETWORK
- struct hlist_head unix_stream_connect;
- struct hlist_head unix_may_send;
- struct hlist_head socket_create;
- struct hlist_head socket_post_create;
- struct hlist_head socket_socketpair;
- struct hlist_head socket_bind;
- struct hlist_head socket_connect;
- struct hlist_head socket_listen;
- struct hlist_head socket_accept;
- struct hlist_head socket_sendmsg;
- struct hlist_head socket_recvmsg;
- struct hlist_head socket_getsockname;
- struct hlist_head socket_getpeername;
- struct hlist_head socket_getsockopt;
- struct hlist_head socket_setsockopt;
- struct hlist_head socket_shutdown;
- struct hlist_head socket_sock_rcv_skb;
- struct hlist_head socket_getpeersec_stream;
- struct hlist_head socket_getpeersec_dgram;
- struct hlist_head sk_alloc_security;
- struct hlist_head sk_free_security;
- struct hlist_head sk_clone_security;
- struct hlist_head sk_getsecid;
- struct hlist_head sock_graft;
- struct hlist_head inet_conn_request;
- struct hlist_head inet_csk_clone;
- struct hlist_head inet_conn_established;
- struct hlist_head secmark_relabel_packet;
- struct hlist_head secmark_refcount_inc;
- struct hlist_head secmark_refcount_dec;
- struct hlist_head req_classify_flow;
- struct hlist_head tun_dev_alloc_security;
- struct hlist_head tun_dev_free_security;
- struct hlist_head tun_dev_create;
- struct hlist_head tun_dev_attach_queue;
- struct hlist_head tun_dev_attach;
- struct hlist_head tun_dev_open;
- struct hlist_head sctp_assoc_request;
- struct hlist_head sctp_bind_connect;
- struct hlist_head sctp_sk_clone;
-#endif /* CONFIG_SECURITY_NETWORK */
-#ifdef CONFIG_SECURITY_INFINIBAND
- struct hlist_head ib_pkey_access;
- struct hlist_head ib_endport_manage_subnet;
- struct hlist_head ib_alloc_security;
- struct hlist_head ib_free_security;
-#endif /* CONFIG_SECURITY_INFINIBAND */
-#ifdef CONFIG_SECURITY_NETWORK_XFRM
- struct hlist_head xfrm_policy_alloc_security;
- struct hlist_head xfrm_policy_clone_security;
- struct hlist_head xfrm_policy_free_security;
- struct hlist_head xfrm_policy_delete_security;
- struct hlist_head xfrm_state_alloc;
- struct hlist_head xfrm_state_alloc_acquire;
- struct hlist_head xfrm_state_free_security;
- struct hlist_head xfrm_state_delete_security;
- struct hlist_head xfrm_policy_lookup;
- struct hlist_head xfrm_state_pol_flow_match;
- struct hlist_head xfrm_decode_session;
-#endif /* CONFIG_SECURITY_NETWORK_XFRM */
-#ifdef CONFIG_KEYS
- struct hlist_head key_alloc;
- struct hlist_head key_free;
- struct hlist_head key_permission;
- struct hlist_head key_getsecurity;
-#endif /* CONFIG_KEYS */
-#ifdef CONFIG_AUDIT
- struct hlist_head audit_rule_init;
- struct hlist_head audit_rule_known;
- struct hlist_head audit_rule_match;
- struct hlist_head audit_rule_free;
-#endif /* CONFIG_AUDIT */
-#ifdef CONFIG_BPF_SYSCALL
- struct hlist_head bpf;
- struct hlist_head bpf_map;
- struct hlist_head bpf_prog;
- struct hlist_head bpf_map_alloc_security;
- struct hlist_head bpf_map_free_security;
- struct hlist_head bpf_prog_alloc_security;
- struct hlist_head bpf_prog_free_security;
-#endif /* CONFIG_BPF_SYSCALL */
- struct hlist_head locked_down;
-#ifdef CONFIG_PERF_EVENTS
- struct hlist_head perf_event_open;
- struct hlist_head perf_event_alloc;
- struct hlist_head perf_event_free;
- struct hlist_head perf_event_read;
- struct hlist_head perf_event_write;
-#endif
+ #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME;
+ #include "lsm_hook_defs.h"
+ #undef LSM_HOOK
} __randomize_layout;
/*
@@ -2135,6 +1525,12 @@ struct lsm_blob_sizes {
};
/*
+ * LSM_RET_VOID is used as the default value in LSM_HOOK definitions for void
+ * LSM hooks (in include/linux/lsm_hook_defs.h).
+ */
+#define LSM_RET_VOID ((void) 0)
+
+/*
* Initializing a security_hook_list structure takes
* up a lot of space in a source file. This macro takes
* care of the common case and reduces the amount of
diff --git a/include/linux/mdio-xpcs.h b/include/linux/mdio-xpcs.h
new file mode 100644
index 000000000000..9a841aa5982d
--- /dev/null
+++ b/include/linux/mdio-xpcs.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare XPCS helpers
+ */
+
+#ifndef __LINUX_MDIO_XPCS_H
+#define __LINUX_MDIO_XPCS_H
+
+#include <linux/phy.h>
+#include <linux/phylink.h>
+
+struct mdio_xpcs_args {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ struct mii_bus *bus;
+ int addr;
+};
+
+struct mdio_xpcs_ops {
+ int (*validate)(struct mdio_xpcs_args *xpcs,
+ unsigned long *supported,
+ struct phylink_link_state *state);
+ int (*config)(struct mdio_xpcs_args *xpcs,
+ const struct phylink_link_state *state);
+ int (*get_state)(struct mdio_xpcs_args *xpcs,
+ struct phylink_link_state *state);
+ int (*link_up)(struct mdio_xpcs_args *xpcs, int speed,
+ phy_interface_t interface);
+ int (*probe)(struct mdio_xpcs_args *xpcs, phy_interface_t interface);
+};
+
+#if IS_ENABLED(CONFIG_MDIO_XPCS)
+struct mdio_xpcs_ops *mdio_xpcs_get_ops(void);
+#else
+static inline struct mdio_xpcs_ops *mdio_xpcs_get_ops(void)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __LINUX_MDIO_XPCS_H */
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index a7604248777b..917e4bb2ed71 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -316,11 +316,15 @@ static inline void mii_10gbt_stat_mod_linkmode_lpa_t(unsigned long *advertising,
int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
+ u16 mask, u16 set);
int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
+ u16 set);
int mdiobus_register_device(struct mdio_device *mdiodev);
int mdiobus_unregister_device(struct mdio_device *mdiodev);
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 18c6208f56fc..219b93cad1dd 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -355,24 +355,6 @@ static inline u32 mii_adv_to_ethtool_adv_x(u32 adv)
}
/**
- * mii_lpa_to_ethtool_lpa_x
- * @adv: value of the MII_LPA register
- *
- * A small helper function that translates MII_LPA
- * bits, when in 1000Base-X mode, to ethtool
- * LP advertisement settings.
- */
-static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
-{
- u32 result = 0;
-
- if (lpa & LPA_LPACK)
- result |= ADVERTISED_Autoneg;
-
- return result | mii_adv_to_ethtool_adv_x(lpa);
-}
-
-/**
* mii_lpa_mod_linkmode_adv_sgmii
* @lp_advertising: pointer to destination link mode.
* @lpa: value of the MII_LPA register
@@ -536,6 +518,45 @@ static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
}
/**
+ * mii_lpa_mod_linkmode_x - decode the link partner's config_reg to linkmodes
+ * @linkmodes: link modes array
+ * @lpa: config_reg word from link partner
+ * @fd_bit: link mode for 1000XFULL bit
+ */
+static inline void mii_lpa_mod_linkmode_x(unsigned long *linkmodes, u16 lpa,
+ int fd_bit)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, linkmodes,
+ lpa & LPA_LPACK);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes,
+ lpa & LPA_1000XPAUSE);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes,
+ lpa & LPA_1000XPAUSE_ASYM);
+ linkmode_mod_bit(fd_bit, linkmodes,
+ lpa & LPA_1000XFULL);
+}
+
+/**
+ * linkmode_adv_to_mii_adv_x - encode a linkmode to config_reg
+ * @linkmodes: linkmodes
+ * @fd_bit: full duplex bit
+ */
+static inline u16 linkmode_adv_to_mii_adv_x(const unsigned long *linkmodes,
+ int fd_bit)
+{
+ u16 adv = 0;
+
+ if (linkmode_test_bit(fd_bit, linkmodes))
+ adv |= ADVERTISE_1000XFULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes))
+ adv |= ADVERTISE_1000XPAUSE;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes))
+ adv |= ADVERTISE_1000XPSE_ASYM;
+
+ return adv;
+}
+
+/**
* mii_advertise_flowctrl - get flow control advertisement flags
* @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both)
*/
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 277a51d3ec40..d143b8bd55c9 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -518,9 +518,11 @@ struct mlx5_rate_limit {
};
struct mlx5_rl_entry {
- struct mlx5_rate_limit rl;
- u16 index;
- u16 refcount;
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
+ u16 index;
+ u64 refcount;
+ u16 uid;
+ u8 dedicated : 1;
};
struct mlx5_rl_table {
@@ -573,10 +575,6 @@ struct mlx5_priv {
/* end: alloc staff */
struct dentry *dbg_root;
- /* protect mkey key part */
- spinlock_t mkey_lock;
- u8 mkey_key;
-
struct list_head dev_list;
struct list_head ctx_list;
spinlock_t ctx_lock;
@@ -722,6 +720,7 @@ struct mlx5_core_dev {
struct mlx5_clock clock;
struct mlx5_ib_clock_info *clock_info;
struct mlx5_fw_tracer *tracer;
+ struct mlx5_rsc_dump *rsc_dump;
u32 vsc_addr;
struct mlx5_hv_vhca *hv_vhca;
};
@@ -945,12 +944,6 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
-int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- struct mlx5_async_ctx *async_ctx, u32 *in,
- int inlen, u32 *out, int outlen,
- mlx5_async_cbk_t callback,
- struct mlx5_async_work *context);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
u32 *in, int inlen);
@@ -1007,6 +1000,9 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
struct mlx5_rate_limit *rl);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
+int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
+ bool dedicated_entry, u16 *index);
+void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index);
bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
struct mlx5_rate_limit *rl_1);
int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index 98e667b176ef..c16827eeba9c 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -70,8 +70,30 @@ u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
enum devlink_eswitch_encap_mode
mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev);
+bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw);
bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw);
-u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
+
+/* Reg C0 usage:
+ * Reg C0 = < ESW_VHCA_ID_BITS(8) | ESW_VPORT BITS(8) | ESW_CHAIN_TAG(16) >
+ *
+ * Highest 8 bits of the reg c0 is the vhca_id, next 8 bits is vport_num,
+ * the rest (lowest 16 bits) is left for tc chain tag restoration.
+ * VHCA_ID + VPORT comprise the SOURCE_PORT matching.
+ */
+#define ESW_VHCA_ID_BITS 8
+#define ESW_VPORT_BITS 8
+#define ESW_SOURCE_PORT_METADATA_BITS (ESW_VHCA_ID_BITS + ESW_VPORT_BITS)
+#define ESW_SOURCE_PORT_METADATA_OFFSET (32 - ESW_SOURCE_PORT_METADATA_BITS)
+#define ESW_CHAIN_TAG_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS)
+#define ESW_CHAIN_TAG_METADATA_MASK GENMASK(ESW_CHAIN_TAG_METADATA_BITS - 1,\
+ 0)
+
+static inline u32 mlx5_eswitch_get_vport_metadata_mask(void)
+{
+ return GENMASK(31, 32 - ESW_SOURCE_PORT_METADATA_BITS);
+}
+
+u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
u16 vport_num);
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw);
#else /* CONFIG_MLX5_ESWITCH */
@@ -88,17 +110,29 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
}
static inline bool
+mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
+{
+ return false;
+};
+
+static inline bool
mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
{
return false;
};
static inline u32
-mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
+mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
int vport_num)
{
return 0;
};
+
+static inline u32
+mlx5_eswitch_get_vport_metadata_mask(void)
+{
+ return 0;
+}
#endif /* CONFIG_MLX5_ESWITCH */
#endif
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 4cae16016b2b..a5cf5c76f348 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -84,6 +84,7 @@ enum {
FDB_TC_OFFLOAD,
FDB_FT_OFFLOAD,
FDB_SLOW_PATH,
+ FDB_PER_VPORT,
};
struct mlx5_pkt_reformat;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index bfdf41537cf1..cc55cee3b53c 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -414,8 +414,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_16[0x1];
u8 table_miss_action_domain[0x1];
u8 termination_table[0x1];
- u8 reserved_at_19[0x7];
- u8 reserved_at_20[0x2];
+ u8 reformat_and_fwd_to_table[0x1];
+ u8 reserved_at_1a[0x6];
+ u8 termination_table_raw_traffic[0x1];
+ u8 reserved_at_21[0x1];
u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8];
u8 max_modify_header_actions[0x8];
@@ -741,7 +743,7 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 flow_source[0x1];
u8 reserved_at_18[0x2];
u8 multi_fdb_encap[0x1];
- u8 reserved_at_1b[0x1];
+ u8 egress_acl_forward_to_vport[0x1];
u8 fdb_multi_path_to_table[0x1];
u8 reserved_at_1d[0x3];
@@ -813,7 +815,9 @@ struct mlx5_ifc_qos_cap_bits {
u8 reserved_at_4[0x1];
u8 packet_pacing_burst_bound[0x1];
u8 packet_pacing_typical_size[0x1];
- u8 reserved_at_7[0x19];
+ u8 reserved_at_7[0x4];
+ u8 packet_pacing_uid[0x1];
+ u8 reserved_at_c[0x14];
u8 reserved_at_20[0x20];
@@ -8265,9 +8269,20 @@ struct mlx5_ifc_set_pp_rate_limit_out_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_set_pp_rate_limit_context_bits {
+ u8 rate_limit[0x20];
+
+ u8 burst_upper_bound[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 typical_packet_size[0x10];
+
+ u8 reserved_at_60[0x120];
+};
+
struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -8277,14 +8292,7 @@ struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 reserved_at_60[0x20];
- u8 rate_limit[0x20];
-
- u8 burst_upper_bound[0x20];
-
- u8 reserved_at_c0[0x10];
- u8 typical_packet_size[0x10];
-
- u8 reserved_at_e0[0x120];
+ struct mlx5_ifc_set_pp_rate_limit_context_bits ctx;
};
struct mlx5_ifc_access_register_out_bits {
@@ -8420,7 +8428,8 @@ struct mlx5_ifc_ptys_reg_bits {
u8 proto_mask[0x3];
u8 an_status[0x4];
- u8 reserved_at_24[0x1c];
+ u8 reserved_at_24[0xc];
+ u8 data_rate_oper[0x10];
u8 ext_eth_proto_capability[0x20];
@@ -10486,7 +10495,8 @@ enum {
};
enum {
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK = 0x1,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS = 0x1,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC = 0x2,
};
struct mlx5_ifc_tls_static_params_bits {
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index 37e065a80a43..07d77323f78a 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -608,7 +608,7 @@ struct mlx5_ifc_tls_cmd_bits {
struct mlx5_ifc_tls_resp_bits {
u8 syndrome[0x20];
u8 stream_id[0x20];
- u8 reserverd[0x40];
+ u8 reserved[0x40];
};
#define MLX5_TLS_COMMAND_SIZE (0x100)
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 34d050bb1ae6..9d53c5ad272c 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -83,6 +83,8 @@ enum {
NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
NETIF_F_GRO_FRAGLIST_BIT, /* Fraglist GRO */
+ NETIF_F_HW_MACSEC_BIT, /* Offload MACsec operations */
+
/*
* Add your fresh new feature above and remember to update
* netdev_features_strings[] in net/core/ethtool.c and maybe
@@ -154,6 +156,7 @@ enum {
#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
#define NETIF_F_GRO_FRAGLIST __NETIF_F(GRO_FRAGLIST)
#define NETIF_F_GSO_FRAGLIST __NETIF_F(GSO_FRAGLIST)
+#define NETIF_F_HW_MACSEC __NETIF_F(HW_MACSEC)
/* Finds the next feature with the highest number of the range of start till 0.
*/
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 6c3f7032e8d9..130a668049ab 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -53,6 +53,8 @@ struct netpoll_info;
struct device;
struct phy_device;
struct dsa_port;
+struct macsec_context;
+struct macsec_ops;
struct sfp_bus;
/* 802.11 specific */
@@ -664,7 +666,7 @@ static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node
struct rps_map {
unsigned int len;
struct rcu_head rcu;
- u16 cpus[0];
+ u16 cpus[];
};
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
@@ -686,7 +688,7 @@ struct rps_dev_flow {
struct rps_dev_flow_table {
unsigned int mask;
struct rcu_head rcu;
- struct rps_dev_flow flows[0];
+ struct rps_dev_flow flows[];
};
#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
((_num) * sizeof(struct rps_dev_flow)))
@@ -704,7 +706,7 @@ struct rps_dev_flow_table {
struct rps_sock_flow_table {
u32 mask;
- u32 ents[0] ____cacheline_aligned_in_smp;
+ u32 ents[] ____cacheline_aligned_in_smp;
};
#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
@@ -767,7 +769,7 @@ struct xps_map {
unsigned int len;
unsigned int alloc_len;
struct rcu_head rcu;
- u16 queues[0];
+ u16 queues[];
};
#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
@@ -778,7 +780,7 @@ struct xps_map {
*/
struct xps_dev_maps {
struct rcu_head rcu;
- struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */
+ struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */
};
#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
@@ -853,6 +855,7 @@ enum tc_setup_type {
TC_SETUP_FT,
TC_SETUP_QDISC_ETS,
TC_SETUP_QDISC_TBF,
+ TC_SETUP_QDISC_FIFO,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -1818,6 +1821,8 @@ enum netdev_priv_flags {
* that follow this device when it is moved
* to another network namespace.
*
+ * @macsec_ops: MACsec offloading ops
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -2112,6 +2117,11 @@ struct net_device {
unsigned wol_enabled:1;
struct list_head net_notifier_list;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ /* MACsec management functions */
+ const struct macsec_ops *macsec_ops;
+#endif
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -3767,7 +3777,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
- int fd, u32 flags);
+ int fd, int expected_fd, u32 flags);
u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
enum bpf_netdev_command cmd);
int xdp_umem_query(struct net_device *dev, u16 queue_id);
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 5448c8b443db..ab192720e2d6 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -98,7 +98,7 @@ struct ip_set_counter {
struct ip_set_comment_rcu {
struct rcu_head rcu;
- char str[0];
+ char str[];
};
struct ip_set_comment {
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 1b261c51b3a3..5da88451853b 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -264,7 +264,7 @@ struct xt_table_info {
unsigned int stacksize;
void ***jumpstack;
- unsigned char entries[0] __aligned(8);
+ unsigned char entries[] __aligned(8);
};
int xt_register_target(struct xt_target *target);
@@ -464,7 +464,7 @@ struct compat_xt_entry_match {
} kernel;
u_int16_t match_size;
} u;
- unsigned char data[0];
+ unsigned char data[];
};
struct compat_xt_entry_target {
@@ -480,7 +480,7 @@ struct compat_xt_entry_target {
} kernel;
u_int16_t target_size;
} u;
- unsigned char data[0];
+ unsigned char data[];
};
/* FIXME: this works only on 32 bit tasks
@@ -494,7 +494,7 @@ struct compat_xt_counters {
struct compat_xt_counters_info {
char name[XT_TABLE_MAXNAMELEN];
compat_uint_t num_counters;
- struct compat_xt_counters counters[0];
+ struct compat_xt_counters counters[];
};
struct _compat_xt_align {
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index e98028f00e47..7d3537c40ec9 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -67,7 +67,7 @@ struct compat_arpt_entry {
__u16 next_offset;
compat_uint_t comefrom;
struct compat_xt_counters counters;
- unsigned char elems[0];
+ unsigned char elems[];
};
static inline struct xt_entry_target *
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 162f59d0d17a..2f5c4e6ecd8a 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -85,7 +85,7 @@ struct ebt_table_info {
/* room to maintain the stack used for jumping from and into udc */
struct ebt_chainstack **chainstack;
char *entries;
- struct ebt_counter counters[0] ____cacheline_aligned;
+ struct ebt_counter counters[] ____cacheline_aligned;
};
struct ebt_table {
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index e9e1ed74cdf1..b394bd4f68a3 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -76,7 +76,7 @@ struct compat_ipt_entry {
__u16 next_offset;
compat_uint_t comefrom;
struct compat_xt_counters counters;
- unsigned char elems[0];
+ unsigned char elems[];
};
/* Helper functions */
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 78ab959c4575..8225f7821a29 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -43,7 +43,7 @@ struct compat_ip6t_entry {
__u16 next_offset;
compat_uint_t comefrom;
struct compat_xt_counters counters;
- unsigned char elems[0];
+ unsigned char elems[];
};
static inline struct xt_entry_target *
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 60739d0cbf93..e3e49f0e5c13 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -201,10 +201,10 @@ struct netlink_callback {
struct module *module;
struct netlink_ext_ack *extack;
u16 family;
- u16 min_dump_alloc;
- bool strict_check;
u16 answer_flags;
+ u32 min_dump_alloc;
unsigned int prev_seq, seq;
+ bool strict_check;
union {
u8 ctx[48];
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 3840a541a9de..a048fba311d2 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -42,6 +42,13 @@
#include <linux/pci_ids.h>
+#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
+ PCI_STATUS_SIG_SYSTEM_ERROR | \
+ PCI_STATUS_REC_MASTER_ABORT | \
+ PCI_STATUS_REC_TARGET_ABORT | \
+ PCI_STATUS_SIG_TARGET_ABORT | \
+ PCI_STATUS_PARITY)
+
/*
* The PCI interface treats multi-function devices as independent
* devices. The slot/function address of each device is encoded
@@ -1045,6 +1052,8 @@ int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
+u64 pci_get_dsn(struct pci_dev *dev);
+
struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
struct pci_dev *from);
struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
@@ -1203,6 +1212,7 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
void pci_ignore_hotplug(struct pci_dev *dev);
struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
+int pci_status_get_and_clear_errors(struct pci_dev *pdev);
int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
@@ -1699,6 +1709,9 @@ static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
{ return 0; }
+static inline u64 pci_get_dsn(struct pci_dev *dev)
+{ return 0; }
+
/* Power management related routines */
static inline int pci_save_state(struct pci_dev *dev) { return 0; }
static inline void pci_restore_state(struct pci_dev *dev) { }
@@ -2171,6 +2184,7 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
#define PCI_VPD_INFO_FLD_HDR_SIZE 3
#define PCI_VPD_RO_KEYWORD_PARTNO "PN"
+#define PCI_VPD_RO_KEYWORD_SERIALNO "SN"
#define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
#define PCI_VPD_RO_KEYWORD_VENDOR0 "V0"
#define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 452e8ba8665f..2432ca463ddc 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -23,6 +23,8 @@
#include <linux/workqueue.h>
#include <linux/mod_devicetable.h>
#include <linux/u64_stats_sync.h>
+#include <linux/irqreturn.h>
+#include <linux/iopoll.h>
#include <linux/atomic.h>
@@ -94,6 +96,7 @@ typedef enum {
PHY_INTERFACE_MODE_RTBI,
PHY_INTERFACE_MODE_SMII,
PHY_INTERFACE_MODE_XGMII,
+ PHY_INTERFACE_MODE_XLGMII,
PHY_INTERFACE_MODE_MOCA,
PHY_INTERFACE_MODE_QSGMII,
PHY_INTERFACE_MODE_TRGMII,
@@ -165,6 +168,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "smii";
case PHY_INTERFACE_MODE_XGMII:
return "xgmii";
+ case PHY_INTERFACE_MODE_XLGMII:
+ return "xlgmii";
case PHY_INTERFACE_MODE_MOCA:
return "moca";
case PHY_INTERFACE_MODE_QSGMII:
@@ -289,6 +294,7 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
return devm_mdiobus_alloc_size(dev, 0);
}
+struct mii_bus *mdio_find_bus(const char *mdio_name);
void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
@@ -360,6 +366,7 @@ struct macsec_ops;
* suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus.
* sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
* loopback_enabled: Set true if this phy has been loopbacked successfully.
+ * downshifted_rate: Set true if link speed has been downshifted.
* state: state of the PHY for management purposes
* dev_flags: Device-specific flags used by the PHY driver.
* irq: IRQ number of the PHY's interrupt (-1 if none)
@@ -400,6 +407,7 @@ struct phy_device {
unsigned suspended_by_mdio_bus:1;
unsigned sysfs_links:1;
unsigned loopback_enabled:1;
+ unsigned downshifted_rate:1;
unsigned autoneg:1;
/* The most recently read link state */
@@ -564,7 +572,7 @@ struct phy_driver {
int (*did_interrupt)(struct phy_device *phydev);
/* Override default interrupt handling */
- int (*handle_interrupt)(struct phy_device *phydev);
+ irqreturn_t (*handle_interrupt)(struct phy_device *phydev);
/* Clears up any memory if needed */
void (*remove)(struct phy_device *phydev);
@@ -693,6 +701,7 @@ static inline bool phy_is_started(struct phy_device *phydev)
void phy_resolve_aneg_pause(struct phy_device *phydev);
void phy_resolve_aneg_linkmode(struct phy_device *phydev);
+void phy_check_downshift(struct phy_device *phydev);
/**
* phy_read - Convenience function for reading a given PHY register
@@ -708,6 +717,19 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum)
return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum);
}
+#define phy_read_poll_timeout(phydev, regnum, val, cond, sleep_us, \
+ timeout_us, sleep_before_read) \
+({ \
+ int __ret = read_poll_timeout(phy_read, val, (cond) || val < 0, \
+ sleep_us, timeout_us, sleep_before_read, phydev, regnum); \
+ if (val < 0) \
+ __ret = val; \
+ if (__ret) \
+ phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \
+ __ret; \
+})
+
+
/**
* __phy_read - convenience function for reading a given PHY register
* @phydev: the phy_device struct
@@ -750,6 +772,25 @@ static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val)
}
/**
+ * __phy_modify_changed() - Convenience function for modifying a PHY register
+ * @phydev: a pointer to a &struct phy_device
+ * @regnum: register number
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Unlocked helper function which allows a PHY register to be modified as
+ * new register value = (old register value & ~mask) | set
+ *
+ * Returns negative errno, 0 if there was no change, and 1 in case of change
+ */
+static inline int __phy_modify_changed(struct phy_device *phydev, u32 regnum,
+ u16 mask, u16 set)
+{
+ return __mdiobus_modify_changed(phydev->mdio.bus, phydev->mdio.addr,
+ regnum, mask, set);
+}
+
+/**
* phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY.
* @phydev: The phy_device struct
@@ -760,6 +801,19 @@ static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val)
*/
int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
+#define phy_read_mmd_poll_timeout(phydev, devaddr, regnum, val, cond, \
+ sleep_us, timeout_us, sleep_before_read) \
+({ \
+ int __ret = read_poll_timeout(phy_read_mmd, val, (cond) || val < 0, \
+ sleep_us, timeout_us, sleep_before_read, \
+ phydev, devaddr, regnum); \
+ if (val < 0) \
+ __ret = val; \
+ if (__ret) \
+ phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \
+ __ret; \
+})
+
/**
* __phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY.
@@ -1260,6 +1314,9 @@ void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx);
bool phy_validate_pause(struct phy_device *phydev,
struct ethtool_pauseparam *pp);
+void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause);
+void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv,
+ bool *tx_pause, bool *rx_pause);
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
int (*run)(struct phy_device *));
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 523209e70947..3f8d37ec5503 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -12,12 +12,10 @@ struct net_device;
enum {
MLO_PAUSE_NONE,
- MLO_PAUSE_ASYM = BIT(0),
- MLO_PAUSE_SYM = BIT(1),
- MLO_PAUSE_RX = BIT(2),
- MLO_PAUSE_TX = BIT(3),
+ MLO_PAUSE_RX = BIT(0),
+ MLO_PAUSE_TX = BIT(1),
MLO_PAUSE_TXRX_MASK = MLO_PAUSE_TX | MLO_PAUSE_RX,
- MLO_PAUSE_AN = BIT(4),
+ MLO_PAUSE_AN = BIT(2),
MLO_AN_PHY = 0, /* Conventional PHY */
MLO_AN_FIXED, /* Fixed-link mode */
@@ -93,9 +91,10 @@ struct phylink_mac_ops {
void (*mac_an_restart)(struct phylink_config *config);
void (*mac_link_down)(struct phylink_config *config, unsigned int mode,
phy_interface_t interface);
- void (*mac_link_up)(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phy);
+ void (*mac_link_up)(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause);
};
#if 0 /* For kernel-doc purposes only. */
@@ -154,15 +153,33 @@ void mac_pcs_get_state(struct phylink_config *config,
* guaranteed to be correct, and so any mac_config() implementation must
* never reference these fields.
*
+ * (this requires a rewrite - please refer to mac_link_up() for situations
+ * where the PCS and MAC are not tightly integrated.)
+ *
+ * In all negotiation modes, as defined by @mode, @state->pause indicates the
+ * pause settings which should be applied as follows. If %MLO_PAUSE_AN is not
+ * set, %MLO_PAUSE_TX and %MLO_PAUSE_RX indicate whether the MAC should send
+ * pause frames and/or act on received pause frames respectively. Otherwise,
+ * the results of in-band negotiation/status from the MAC PCS should be used
+ * to control the MAC pause mode settings.
+ *
* The action performed depends on the currently selected mode:
*
* %MLO_AN_FIXED, %MLO_AN_PHY:
- * Configure the specified @state->speed, @state->duplex and
- * @state->pause (%MLO_PAUSE_TX / %MLO_PAUSE_RX) modes over a link
- * specified by @state->interface. @state->advertising may be used,
- * but is not required. Other members of @state must be ignored.
+ * Configure for non-inband negotiation mode, where the link settings
+ * are completely communicated via mac_link_up(). The physical link
+ * protocol from the MAC is specified by @state->interface.
+ *
+ * @state->advertising may be used, but is not required.
+ *
+ * Older drivers (prior to the mac_link_up() change) may use @state->speed,
+ * @state->duplex and @state->pause to configure the MAC, but this is
+ * deprecated; such drivers should be converted to use mac_link_up().
*
- * Valid state members: interface, speed, duplex, pause, advertising.
+ * Other members of @state must be ignored.
+ *
+ * Valid state members: interface, advertising.
+ * Deprecated state members: speed, duplex, pause.
*
* %MLO_AN_INBAND:
* place the link in an inband negotiation mode (such as 802.3z
@@ -172,11 +189,14 @@ void mac_pcs_get_state(struct phylink_config *config,
* mac_pcs_get_state() callback. Changes in link state must be made
* by calling phylink_mac_change().
*
+ * Interface mode specific details are mentioned below.
+ *
* If in 802.3z mode, the link speed is fixed, dependent on the
- * @state->interface. Duplex is negotiated, and pause is advertised
- * according to @state->an_enabled, @state->pause and
- * @state->advertising flags. Beware of MACs which only support full
- * duplex at gigabit and higher speeds.
+ * @state->interface. Duplex and pause modes are negotiated via
+ * the in-band configuration word. Advertised pause modes are set
+ * according to the @state->an_enabled and @state->advertising
+ * flags. Beware of MACs which only support full duplex at gigabit
+ * and higher speeds.
*
* If in Cisco SGMII mode, the link speed and duplex mode are passed
* in the serial bitstream 16-bit configuration word, and the MAC
@@ -220,24 +240,127 @@ void mac_link_down(struct phylink_config *config, unsigned int mode,
/**
* mac_link_up() - allow the link to come up
* @config: a pointer to a &struct phylink_config.
+ * @phy: any attached phy
* @mode: link autonegotiation mode
* @interface: link &typedef phy_interface_t mode
- * @phy: any attached phy
+ * @speed: link speed
+ * @duplex: link duplex
+ * @tx_pause: link transmit pause enablement status
+ * @rx_pause: link receive pause enablement status
*
- * If @mode is not an in-band negotiation mode (as defined by
- * phylink_autoneg_inband()), allow the link to come up. If @phy
- * is non-%NULL, configure Energy Efficient Ethernet by calling
+ * Configure the MAC for an established link.
+ *
+ * @speed, @duplex, @tx_pause and @rx_pause indicate the finalised link
+ * settings, and should be used to configure the MAC block appropriately
+ * where these settings are not automatically conveyed from the PCS block,
+ * or if in-band negotiation (as defined by phylink_autoneg_inband(@mode))
+ * is disabled.
+ *
+ * Note that when 802.3z in-band negotiation is in use, it is possible
+ * that the user wishes to override the pause settings, and this should
+ * be allowed when considering the implementation of this method.
+ *
+ * If in-band negotiation mode is disabled, allow the link to come up. If
+ * @phy is non-%NULL, configure Energy Efficient Ethernet by calling
* phy_init_eee() and perform appropriate MAC configuration for EEE.
* Interface type selection must be done in mac_config().
*/
-void mac_link_up(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phy);
+void mac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause);
+#endif
+
+/**
+ * struct phylink_pcs_ops - MAC PCS operations structure.
+ * @pcs_get_state: read the current MAC PCS link state from the hardware.
+ * @pcs_config: configure the MAC PCS for the selected mode and state.
+ * @pcs_an_restart: restart 802.3z BaseX autonegotiation.
+ * @pcs_link_up: program the PCS for the resolved link configuration
+ * (where necessary).
+ */
+struct phylink_pcs_ops {
+ void (*pcs_get_state)(struct phylink_config *config,
+ struct phylink_link_state *state);
+ int (*pcs_config)(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising);
+ void (*pcs_an_restart)(struct phylink_config *config);
+ void (*pcs_link_up)(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex);
+};
+
+#if 0 /* For kernel-doc purposes only. */
+/**
+ * pcs_get_state() - Read the current inband link state from the hardware
+ * @config: a pointer to a &struct phylink_config.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Read the current inband link state from the MAC PCS, reporting the
+ * current speed in @state->speed, duplex mode in @state->duplex, pause
+ * mode in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits,
+ * negotiation completion state in @state->an_complete, and link up state
+ * in @state->link. If possible, @state->lp_advertising should also be
+ * populated.
+ *
+ * When present, this overrides mac_pcs_get_state() in &struct
+ * phylink_mac_ops.
+ */
+void pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state);
+
+/**
+ * pcs_config() - Configure the PCS mode and advertisement
+ * @config: a pointer to a &struct phylink_config.
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @interface: interface mode to be used
+ * @advertising: adertisement ethtool link mode mask
+ *
+ * Configure the PCS for the operating mode, the interface mode, and set
+ * the advertisement mask.
+ *
+ * When operating in %MLO_AN_INBAND, inband should always be enabled,
+ * otherwise inband should be disabled.
+ *
+ * For SGMII, there is no advertisement from the MAC side, the PCS should
+ * be programmed to acknowledge the inband word from the PHY.
+ *
+ * For 1000BASE-X, the advertisement should be programmed into the PCS.
+ *
+ * For most 10GBASE-R, there is no advertisement.
+ */
+int (*pcs_config)(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface, const unsigned long *advertising);
+
+/**
+ * pcs_an_restart() - restart 802.3z BaseX autonegotiation
+ * @config: a pointer to a &struct phylink_config.
+ *
+ * When PCS ops are present, this overrides mac_an_restart() in &struct
+ * phylink_mac_ops.
+ */
+void (*pcs_an_restart)(struct phylink_config *config);
+
+/**
+ * pcs_link_up() - program the PCS for the resolved link configuration
+ * @config: a pointer to a &struct phylink_config.
+ * @mode: link autonegotiation mode
+ * @interface: link &typedef phy_interface_t mode
+ * @speed: link speed
+ * @duplex: link duplex
+ *
+ * This call will be made just before mac_link_up() to inform the PCS of
+ * the resolved link parameters. For example, a PCS operating in SGMII
+ * mode without in-band AN needs to be manually configured for the link
+ * and duplex setting. Otherwise, this should be a no-op.
+ */
+void (*pcs_link_up)(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex);
#endif
struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *,
phy_interface_t iface,
- const struct phylink_mac_ops *ops);
+ const struct phylink_mac_ops *mac_ops);
+void phylink_add_pcs(struct phylink *, const struct phylink_pcs_ops *ops);
void phylink_destroy(struct phylink *);
int phylink_connect_phy(struct phylink *, struct phy_device *);
@@ -282,4 +405,13 @@ int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
void phylink_set_port_modes(unsigned long *bits);
void phylink_helper_basex_speed(struct phylink_link_state *state);
+void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
+ struct phylink_link_state *state);
+int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
+ phy_interface_t interface,
+ const unsigned long *advertising);
+void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs);
+
+void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
+ struct phylink_link_state *state);
#endif
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 4626b1ac3b6c..adff08bfecf9 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -85,6 +85,8 @@ typedef struct ns_common *ns_get_path_helper_t(void *);
extern int ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
void *private_data);
+extern bool ns_match(const struct ns_common *ns, dev_t dev, ino_t ino);
+
extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
const struct proc_ns_operations *ns_ops);
extern void nsfs_init(void);
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index c64a1ef87240..121a7eda4593 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -223,6 +223,12 @@ extern s32 scaled_ppm_to_ppb(long ppm);
/**
* ptp_find_pin() - obtain the pin index of a given auxiliary function
*
+ * The caller must hold ptp_clock::pincfg_mux. Drivers do not have
+ * access to that mutex as ptp_clock is an opaque type. However, the
+ * core code acquires the mutex before invoking the driver's
+ * ptp_clock_info::enable() callback, and so drivers may call this
+ * function from that context.
+ *
* @ptp: The clock obtained from ptp_clock_register().
* @func: One of the ptp_pin_function enumerated values.
* @chan: The particular functional channel to find.
@@ -234,6 +240,19 @@ int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan);
/**
+ * ptp_find_pin_unlocked() - wrapper for ptp_find_pin()
+ *
+ * This function acquires the ptp_clock::pincfg_mux mutex before
+ * invoking ptp_find_pin(). Instead of using this function, drivers
+ * should most likely call ptp_find_pin() directly from their
+ * ptp_clock_info::enable() method.
+ *
+ */
+
+int ptp_find_pin_unlocked(struct ptp_clock *ptp,
+ enum ptp_pin_function func, unsigned int chan);
+
+/**
* ptp_schedule_worker() - schedule ptp auxiliary work
*
* @ptp: The clock obtained from ptp_clock_register().
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 2dd0a9ed5b36..733fad7dfbed 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -97,6 +97,11 @@ struct qed_chain_u32 {
u32 cons_idx;
};
+struct addr_tbl_entry {
+ void *virt_addr;
+ dma_addr_t dma_map;
+};
+
struct qed_chain {
/* fastpath portion of the chain - required for commands such
* as produce / consume.
@@ -107,10 +112,11 @@ struct qed_chain {
/* Fastpath portions of the PBL [if exists] */
struct {
- /* Table for keeping the virtual addresses of the chain pages,
- * respectively to the physical addresses in the pbl table.
+ /* Table for keeping the virtual and physical addresses of the
+ * chain pages, respectively to the physical addresses
+ * in the pbl table.
*/
- void **pp_virt_addr_tbl;
+ struct addr_tbl_entry *pp_addr_tbl;
union {
struct qed_chain_pbl_u16 u16;
@@ -287,7 +293,7 @@ qed_chain_advance_page(struct qed_chain *p_chain,
*(u32 *)page_to_inc = 0;
page_index = *(u32 *)page_to_inc;
}
- *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
+ *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr;
}
}
@@ -537,7 +543,7 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain,
p_chain->pbl_sp.p_phys_table = 0;
p_chain->pbl_sp.p_virt_table = NULL;
- p_chain->pbl.pp_virt_addr_tbl = NULL;
+ p_chain->pbl.pp_addr_tbl = NULL;
}
/**
@@ -575,11 +581,11 @@ static inline void qed_chain_init_mem(struct qed_chain *p_chain,
static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
void *p_virt_pbl,
dma_addr_t p_phys_pbl,
- void **pp_virt_addr_tbl)
+ struct addr_tbl_entry *pp_addr_tbl)
{
p_chain->pbl_sp.p_phys_table = p_phys_pbl;
p_chain->pbl_sp.p_virt_table = p_virt_pbl;
- p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
+ p_chain->pbl.pp_addr_tbl = pp_addr_tbl;
}
/**
@@ -644,7 +650,7 @@ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
break;
case QED_CHAIN_MODE_PBL:
last_page_idx = p_chain->page_cnt - 1;
- p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
+ p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr;
break;
}
/* p_virt_addr points at this stage to the last page of the chain */
@@ -716,7 +722,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
page_cnt = qed_chain_get_page_cnt(p_chain);
for (i = 0; i < page_cnt; i++)
- memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
+ memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
QED_CHAIN_PAGE_SIZE);
}
diff --git a/include/linux/remoteproc/qcom_q6v5_ipa_notify.h b/include/linux/remoteproc/qcom_q6v5_ipa_notify.h
new file mode 100644
index 000000000000..0820edc0ab7d
--- /dev/null
+++ b/include/linux/remoteproc/qcom_q6v5_ipa_notify.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (C) 2019 Linaro Ltd. */
+
+#ifndef __QCOM_Q6V5_IPA_NOTIFY_H__
+#define __QCOM_Q6V5_IPA_NOTIFY_H__
+
+#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY)
+
+#include <linux/remoteproc.h>
+
+enum qcom_rproc_event {
+ MODEM_STARTING = 0, /* Modem is about to be started */
+ MODEM_RUNNING = 1, /* Startup complete; modem is operational */
+ MODEM_STOPPING = 2, /* Modem is about to shut down */
+ MODEM_CRASHED = 3, /* Modem has crashed (implies stopping) */
+ MODEM_OFFLINE = 4, /* Modem is now offline */
+ MODEM_REMOVING = 5, /* Modem is about to be removed */
+};
+
+typedef void (*qcom_ipa_notify_t)(void *data, enum qcom_rproc_event event);
+
+struct qcom_rproc_ipa_notify {
+ struct rproc_subdev subdev;
+
+ qcom_ipa_notify_t notify;
+ void *data;
+};
+
+/**
+ * qcom_add_ipa_notify_subdev() - Register IPA notification subdevice
+ * @rproc: rproc handle
+ * @ipa_notify: IPA notification subdevice handle
+ *
+ * Register the @ipa_notify subdevice with the @rproc so modem events
+ * can be sent to IPA when they occur.
+ *
+ * This is defined in "qcom_q6v5_ipa_notify.c".
+ */
+void qcom_add_ipa_notify_subdev(struct rproc *rproc,
+ struct qcom_rproc_ipa_notify *ipa_notify);
+
+/**
+ * qcom_remove_ipa_notify_subdev() - Remove IPA SSR subdevice
+ * @rproc: rproc handle
+ * @ipa_notify: IPA notification subdevice handle
+ *
+ * This is defined in "qcom_q6v5_ipa_notify.c".
+ */
+void qcom_remove_ipa_notify_subdev(struct rproc *rproc,
+ struct qcom_rproc_ipa_notify *ipa_notify);
+
+/**
+ * qcom_register_ipa_notify() - Register IPA notification function
+ * @rproc: Remote processor handle
+ * @notify: Non-null IPA notification callback function pointer
+ * @data: Data supplied to IPA notification callback function
+ *
+ * @Return: 0 if successful, or a negative error code otherwise
+ *
+ * This is defined in "qcom_q6v5_mss.c".
+ */
+int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify,
+ void *data);
+/**
+ * qcom_deregister_ipa_notify() - Deregister IPA notification function
+ * @rproc: Remote processor handle
+ *
+ * This is defined in "qcom_q6v5_mss.c".
+ */
+void qcom_deregister_ipa_notify(struct rproc *rproc);
+
+#else /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
+
+struct qcom_rproc_ipa_notify { /* empty */ };
+
+#define qcom_add_ipa_notify_subdev(rproc, ipa_notify) /* no-op */
+#define qcom_remove_ipa_notify_subdev(rproc, ipa_notify) /* no-op */
+
+#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
+
+#endif /* !__QCOM_Q6V5_IPA_NOTIFY_H__ */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index e59620234415..28b1a2b4459e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3516,23 +3516,15 @@ int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
struct sk_buff_head *queue,
unsigned int flags,
- void (*destructor)(struct sock *sk,
- struct sk_buff *skb),
int *off, int *err,
struct sk_buff **last);
struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
struct sk_buff_head *queue,
- unsigned int flags,
- void (*destructor)(struct sock *sk,
- struct sk_buff *skb),
- int *off, int *err,
+ unsigned int flags, int *off, int *err,
struct sk_buff **last);
struct sk_buff *__skb_recv_datagram(struct sock *sk,
struct sk_buff_head *sk_queue,
- unsigned int flags,
- void (*destructor)(struct sock *sk,
- struct sk_buff *skb),
- int *off, int *err);
+ unsigned int flags, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
__poll_t datagram_poll(struct file *file, struct socket *sock,
@@ -4397,8 +4389,8 @@ struct skb_gso_cb {
__wsum csum;
__u16 csum_start;
};
-#define SKB_SGO_CB_OFFSET 32
-#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
+#define SKB_GSO_CB_OFFSET 32
+#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
{
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 14d61bba0b79..8a709f63c5e5 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -323,14 +323,6 @@ static inline void sk_psock_free_link(struct sk_psock_link *link)
}
struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
-#if defined(CONFIG_BPF_STREAM_PARSER)
-void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link);
-#else
-static inline void sk_psock_unlink(struct sock *sk,
- struct sk_psock_link *link)
-{
-}
-#endif
void __sk_psock_purge_ingress_msg(struct sk_psock *psock);
@@ -347,33 +339,37 @@ static inline void sk_psock_update_proto(struct sock *sk,
struct sk_psock *psock,
struct proto *ops)
{
- psock->saved_unhash = sk->sk_prot->unhash;
- psock->saved_close = sk->sk_prot->close;
- psock->saved_write_space = sk->sk_write_space;
+ /* Initialize saved callbacks and original proto only once, since this
+ * function may be called multiple times for a psock, e.g. when
+ * psock->progs.msg_parser is updated.
+ *
+ * Since we've not installed the new proto, psock is not yet in use and
+ * we can initialize it without synchronization.
+ */
+ if (!psock->sk_proto) {
+ struct proto *orig = READ_ONCE(sk->sk_prot);
- psock->sk_proto = sk->sk_prot;
- sk->sk_prot = ops;
+ psock->saved_unhash = orig->unhash;
+ psock->saved_close = orig->close;
+ psock->saved_write_space = sk->sk_write_space;
+
+ psock->sk_proto = orig;
+ }
+
+ /* Pairs with lockless read in sk_clone_lock() */
+ WRITE_ONCE(sk->sk_prot, ops);
}
static inline void sk_psock_restore_proto(struct sock *sk,
struct sk_psock *psock)
{
sk->sk_prot->unhash = psock->saved_unhash;
-
- if (psock->sk_proto) {
- struct inet_connection_sock *icsk = inet_csk(sk);
- bool has_ulp = !!icsk->icsk_ulp_data;
-
- if (has_ulp) {
- tcp_update_ulp(sk, psock->sk_proto,
- psock->saved_write_space);
- } else {
- sk->sk_prot = psock->sk_proto;
- sk->sk_write_space = psock->saved_write_space;
- }
- psock->sk_proto = NULL;
+ if (inet_csk_has_ulp(sk)) {
+ tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space);
} else {
sk->sk_write_space = psock->saved_write_space;
+ /* Pairs with lockless read in sk_clone_lock() */
+ WRITE_ONCE(sk->sk_prot, psock->sk_proto);
}
}
@@ -395,26 +391,6 @@ static inline bool sk_psock_test_state(const struct sk_psock *psock,
return test_bit(bit, &psock->state);
}
-static inline struct sk_psock *sk_psock_get_checked(struct sock *sk)
-{
- struct sk_psock *psock;
-
- rcu_read_lock();
- psock = sk_psock(sk);
- if (psock) {
- if (sk->sk_prot->recvmsg != tcp_bpf_recvmsg) {
- psock = ERR_PTR(-EBUSY);
- goto out;
- }
-
- if (!refcount_inc_not_zero(&psock->refcnt))
- psock = ERR_PTR(-EBUSY);
- }
-out:
- rcu_read_unlock();
- return psock;
-}
-
static inline struct sk_psock *sk_psock_get(struct sock *sk)
{
struct sk_psock *psock;
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 19190c609282..fbafb353e9be 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -80,6 +80,7 @@
struct stmmac_mdio_bus_data {
unsigned int phy_mask;
+ unsigned int has_xpcs;
int *irqs;
int probed_phy_irq;
bool needs_reset;
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index fa7ee503fb76..4beb51009b62 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -310,6 +310,18 @@ static inline void sysfs_enable_ns(struct kernfs_node *kn)
return kernfs_enable_ns(kn);
}
+int sysfs_file_change_owner(struct kobject *kobj, const char *name, kuid_t kuid,
+ kgid_t kgid);
+int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid);
+int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ,
+ const char *name, kuid_t kuid, kgid_t kgid);
+int sysfs_groups_change_owner(struct kobject *kobj,
+ const struct attribute_group **groups,
+ kuid_t kuid, kgid_t kgid);
+int sysfs_group_change_owner(struct kobject *kobj,
+ const struct attribute_group *groups, kuid_t kuid,
+ kgid_t kgid);
+
#else /* CONFIG_SYSFS */
static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
@@ -522,6 +534,40 @@ static inline void sysfs_enable_ns(struct kernfs_node *kn)
{
}
+static inline int sysfs_file_change_owner(struct kobject *kobj,
+ const char *name, kuid_t kuid,
+ kgid_t kgid)
+{
+ return 0;
+}
+
+static inline int sysfs_link_change_owner(struct kobject *kobj,
+ struct kobject *targ,
+ const char *name, kuid_t kuid,
+ kgid_t kgid)
+{
+ return 0;
+}
+
+static inline int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid)
+{
+ return 0;
+}
+
+static inline int sysfs_groups_change_owner(struct kobject *kobj,
+ const struct attribute_group **groups,
+ kuid_t kuid, kgid_t kgid)
+{
+ return 0;
+}
+
+static inline int sysfs_group_change_owner(struct kobject *kobj,
+ const struct attribute_group *groups,
+ kuid_t kuid, kgid_t kgid)
+{
+ return 0;
+}
+
#endif /* CONFIG_SYSFS */
static inline int __must_check sysfs_create_file(struct kobject *kobj,
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 3dc964010fef..421c99c12291 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -86,9 +86,19 @@ struct mptcp_options_received {
u64 data_seq;
u32 subflow_seq;
u16 data_len;
- u8 mp_capable : 1,
+ u16 mp_capable : 1,
mp_join : 1,
- dss : 1;
+ dss : 1,
+ add_addr : 1,
+ rm_addr : 1,
+ family : 4,
+ echo : 1,
+ backup : 1;
+ u32 token;
+ u32 nonce;
+ u64 thmac;
+ u8 hmac[20];
+ u8 join_id;
u8 use_map:1,
dsn64:1,
data_fin:1,
@@ -96,6 +106,16 @@ struct mptcp_options_received {
ack64:1,
mpc_map:1,
__unused:2;
+ u8 addr_id;
+ u8 rm_id;
+ union {
+ struct in_addr addr;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ struct in6_addr addr6;
+#endif
+ };
+ u64 ahmac;
+ u16 port;
};
#endif
@@ -131,6 +151,8 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
#if IS_ENABLED(CONFIG_MPTCP)
rx_opt->mptcp.mp_capable = 0;
rx_opt->mptcp.mp_join = 0;
+ rx_opt->mptcp.add_addr = 0;
+ rx_opt->mptcp.rm_addr = 0;
rx_opt->mptcp.dss = 0;
#endif
}
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
index ea627d1ab7e3..498dbcedb451 100644
--- a/include/linux/tnum.h
+++ b/include/linux/tnum.h
@@ -86,4 +86,16 @@ int tnum_strn(char *str, size_t size, struct tnum a);
/* Format a tnum as tristate binary expansion */
int tnum_sbin(char *str, size_t size, struct tnum a);
+/* Returns the 32-bit subreg */
+struct tnum tnum_subreg(struct tnum a);
+/* Returns the tnum with the lower 32-bit subreg cleared */
+struct tnum tnum_clear_subreg(struct tnum a);
+/* Returns the tnum with the lower 32-bit subreg set to value */
+struct tnum tnum_const_subreg(struct tnum a, u32 value);
+/* Returns true if 32-bit subreg @a is a known constant*/
+static inline bool tnum_subreg_is_const(struct tnum a)
+{
+ return !(tnum_subreg(a)).mask;
+}
+
#endif /* _LINUX_TNUM_H */
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 1646c06989df..0ce4377545f8 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -46,9 +46,12 @@
#define CDC_NCM_DATA_ALTSETTING_NCM 1
#define CDC_NCM_DATA_ALTSETTING_MBIM 2
-/* CDC NCM subclass 3.2.1 */
+/* CDC NCM subclass 3.3.1 */
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
+/* CDC NCM subclass 3.3.2 */
+#define USB_CDC_NCM_NDP32_LENGTH_MIN 0x20
+
/* Maximum NTB length */
#define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */
#define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */
@@ -84,7 +87,7 @@
/* Driver flags */
#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
#define CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE 0x04 /* Avoid altsetting toggle during init */
-#define CDC_NCM_FLAG_RESET_NTB16 0x08 /* set NDP16 one more time after altsetting switch */
+#define CDC_NCM_FLAG_PREFER_NTB32 0x08 /* prefer NDP32 over NDP16 */
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
(x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
@@ -113,7 +116,11 @@ struct cdc_ncm_ctx {
u32 timer_interval;
u32 max_ndp_size;
- struct usb_cdc_ncm_ndp16 *delayed_ndp16;
+ u8 is_ndp16;
+ union {
+ struct usb_cdc_ncm_ndp16 *delayed_ndp16;
+ struct usb_cdc_ncm_ndp32 *delayed_ndp32;
+ };
u32 tx_timer_pending;
u32 tx_curr_frame_num;
@@ -150,6 +157,8 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset);
+int cdc_ncm_rx_verify_nth32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
+int cdc_ncm_rx_verify_ndp32(struct sk_buff *skb_in, int ndpoffset);
struct sk_buff *
cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags);
int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in);
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index a71378007e61..c80539be1542 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -138,7 +138,7 @@ struct lowpan_dev {
struct lowpan_iphc_ctx_table ctx;
/* must be last */
- u8 priv[0] __aligned(sizeof(void *));
+ u8 priv[] __aligned(sizeof(void *));
};
struct lowpan_802154_neigh {
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 71347a90a9d1..c24d7643548e 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -41,6 +41,9 @@ struct tc_action {
struct tc_cookie __rcu *act_cookie;
struct tcf_chain __rcu *goto_chain;
u32 tcfa_flags;
+ u8 hw_stats;
+ u8 used_hw_stats;
+ bool used_hw_stats_valid;
};
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
@@ -52,6 +55,9 @@ struct tc_action {
#define tcf_rate_est common.tcfa_rate_est
#define tcf_lock common.tcfa_lock
+#define TCA_ACT_HW_STATS_ANY (TCA_ACT_HW_STATS_IMMEDIATE | \
+ TCA_ACT_HW_STATS_DELAYED)
+
/* Update lastuse only if needed, to avoid dirtying a cache line.
* We use a temp variable to avoid fetching jiffies twice.
*/
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index a088349dd94f..e0eabe58aa8b 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -90,6 +90,9 @@ int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr);
#endif
+int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs,
+ unsigned char nsegs);
+
bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
const unsigned int prefix_len,
struct net_device *dev);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 17e10fba2152..f42fdddecd41 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -27,7 +27,7 @@ struct unix_address {
refcount_t refcnt;
int len;
unsigned int hash;
- struct sockaddr_un name[0];
+ struct sockaddr_un name[];
};
struct unix_skb_parms {
@@ -42,7 +42,7 @@ struct unix_skb_parms {
} __randomize_layout;
struct scm_stat {
- u32 nr_fds;
+ atomic_t nr_fds;
};
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
diff --git a/include/net/bareudp.h b/include/net/bareudp.h
new file mode 100644
index 000000000000..cb03f6f15956
--- /dev/null
+++ b/include/net/bareudp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NET_BAREUDP_H
+#define __NET_BAREUDP_H
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+struct bareudp_conf {
+ __be16 ethertype;
+ __be16 port;
+ u16 sport_min;
+ bool multi_proto_mode;
+};
+
+struct net_device *bareudp_dev_create(struct net *net, const char *name,
+ u8 name_assign_type,
+ struct bareudp_conf *info);
+
+#endif
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index e42bb8e03c09..1576353a2773 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -121,6 +121,23 @@ struct bt_voice {
#define BT_SNDMTU 12
#define BT_RCVMTU 13
+#define BT_PHY 14
+
+#define BT_PHY_BR_1M_1SLOT 0x00000001
+#define BT_PHY_BR_1M_3SLOT 0x00000002
+#define BT_PHY_BR_1M_5SLOT 0x00000004
+#define BT_PHY_EDR_2M_1SLOT 0x00000008
+#define BT_PHY_EDR_2M_3SLOT 0x00000010
+#define BT_PHY_EDR_2M_5SLOT 0x00000020
+#define BT_PHY_EDR_3M_1SLOT 0x00000040
+#define BT_PHY_EDR_3M_3SLOT 0x00000080
+#define BT_PHY_EDR_3M_5SLOT 0x00000100
+#define BT_PHY_LE_1M_TX 0x00000200
+#define BT_PHY_LE_1M_RX 0x00000400
+#define BT_PHY_LE_2M_TX 0x00000800
+#define BT_PHY_LE_2M_RX 0x00001000
+#define BT_PHY_LE_CODED_TX 0x00002000
+#define BT_PHY_LE_CODED_RX 0x00004000
__printf(1, 2)
void bt_info(const char *fmt, ...);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 6293bdd7d862..5f60e135aeb6 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -115,7 +115,7 @@ enum {
* wrongly configured local features that will require forcing
* them to enable this mode. Getting RSSI information with the
* inquiry responses is preferred since it allows for a better
- * user expierence.
+ * user experience.
*
* This quirk must be set before hci_register_dev is called.
*/
@@ -142,7 +142,7 @@ enum {
/* When this quirk is set, an external configuration step
* is required and will be indicated with the controller
- * configuation.
+ * configuration.
*
* This quirk can be set before hci_register_dev is called or
* during the hdev->setup vendor callback.
@@ -205,6 +205,15 @@ enum {
*
*/
HCI_QUIRK_NON_PERSISTENT_SETUP,
+
+ /* When this quirk is set, wide band speech is supported by
+ * the driver since no reliable mechanism exist to report
+ * this from the hardware, a driver flag is use to convey
+ * this support
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
};
/* HCI device flags */
@@ -277,6 +286,7 @@ enum {
HCI_FAST_CONNECTABLE,
HCI_BREDR_ENABLED,
HCI_LE_SCAN_INTERRUPTED,
+ HCI_WIDEBAND_SPEECH_ENABLED,
HCI_DUT_MODE,
HCI_VENDOR_DIAG,
@@ -932,10 +942,14 @@ struct hci_cp_sniff_subrate {
#define HCI_OP_RESET 0x0c03
#define HCI_OP_SET_EVENT_FLT 0x0c05
-struct hci_cp_set_event_flt {
- __u8 flt_type;
- __u8 cond_type;
- __u8 condition[0];
+#define HCI_SET_EVENT_FLT_SIZE 9
+struct hci_cp_set_event_filter {
+ __u8 flt_type;
+ __u8 cond_type;
+ struct {
+ bdaddr_t bdaddr;
+ __u8 auto_accept;
+ } __packed addr_conn_flt;
} __packed;
/* Filter types */
@@ -949,8 +963,9 @@ struct hci_cp_set_event_flt {
#define HCI_CONN_SETUP_ALLOW_BDADDR 0x02
/* CONN_SETUP Conditions */
-#define HCI_CONN_SETUP_AUTO_OFF 0x01
-#define HCI_CONN_SETUP_AUTO_ON 0x02
+#define HCI_CONN_SETUP_AUTO_OFF 0x01
+#define HCI_CONN_SETUP_AUTO_ON 0x02
+#define HCI_CONN_SETUP_AUTO_ON_WITH_RS 0x03
#define HCI_OP_READ_STORED_LINK_KEY 0x0c0d
struct hci_cp_read_stored_link_key {
@@ -1086,6 +1101,19 @@ struct hci_rp_read_inq_rsp_tx_power {
__s8 tx_power;
} __packed;
+#define HCI_OP_READ_DEF_ERR_DATA_REPORTING 0x0c5a
+ #define ERR_DATA_REPORTING_DISABLED 0x00
+ #define ERR_DATA_REPORTING_ENABLED 0x01
+struct hci_rp_read_def_err_data_reporting {
+ __u8 status;
+ __u8 err_data_reporting;
+} __packed;
+
+#define HCI_OP_WRITE_DEF_ERR_DATA_REPORTING 0x0c5b
+struct hci_cp_write_def_err_data_reporting {
+ __u8 err_data_reporting;
+} __packed;
+
#define HCI_OP_SET_EVENT_MASK_PAGE_2 0x0c63
#define HCI_OP_READ_LOCATION_DATA 0x0c64
@@ -1335,7 +1363,7 @@ struct hci_rp_read_local_amp_assoc {
__u8 status;
__u8 phy_handle;
__le16 rem_len;
- __u8 frag[0];
+ __u8 frag[];
} __packed;
#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b
@@ -1343,7 +1371,7 @@ struct hci_cp_write_remote_amp_assoc {
__u8 phy_handle;
__le16 len_so_far;
__le16 rem_len;
- __u8 frag[0];
+ __u8 frag[];
} __packed;
struct hci_rp_write_remote_amp_assoc {
__u8 status;
@@ -1613,7 +1641,7 @@ struct hci_cp_le_set_ext_scan_params {
__u8 own_addr_type;
__u8 filter_policy;
__u8 scanning_phys;
- __u8 data[0];
+ __u8 data[];
} __packed;
#define LE_SCAN_PHY_1M 0x01
@@ -1641,7 +1669,7 @@ struct hci_cp_le_ext_create_conn {
__u8 peer_addr_type;
bdaddr_t peer_addr;
__u8 phys;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct hci_cp_le_ext_conn_param {
@@ -1693,7 +1721,7 @@ struct hci_rp_le_set_ext_adv_params {
struct hci_cp_le_set_ext_adv_enable {
__u8 enable;
__u8 num_of_sets;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct hci_cp_ext_adv_set {
@@ -1724,6 +1752,8 @@ struct hci_cp_le_set_ext_scan_rsp_data {
#define LE_SET_ADV_DATA_NO_FRAG 0x01
+#define HCI_OP_LE_REMOVE_ADV_SET 0x203c
+
#define HCI_OP_LE_CLEAR_ADV_SETS 0x203d
#define HCI_OP_LE_SET_ADV_SET_RAND_ADDR 0x2035
@@ -1775,14 +1805,14 @@ struct hci_cp_le_set_cig_params {
__le16 m_latency;
__le16 s_latency;
__u8 num_cis;
- struct hci_cis_params cis[0];
+ struct hci_cis_params cis[];
} __packed;
struct hci_rp_le_set_cig_params {
__u8 status;
__u8 cig_id;
__u8 num_handles;
- __le16 handle[0];
+ __le16 handle[];
} __packed;
#define HCI_OP_LE_CREATE_CIS 0x2064
@@ -1793,7 +1823,7 @@ struct hci_cis {
struct hci_cp_le_create_cis {
__u8 num_cis;
- struct hci_cis cis[0];
+ struct hci_cis cis[];
} __packed;
#define HCI_OP_LE_REMOVE_CIG 0x2065
@@ -1937,7 +1967,7 @@ struct hci_comp_pkts_info {
struct hci_ev_num_comp_pkts {
__u8 num_hndl;
- struct hci_comp_pkts_info handles[0];
+ struct hci_comp_pkts_info handles[];
} __packed;
#define HCI_EV_MODE_CHANGE 0x14
@@ -2170,7 +2200,7 @@ struct hci_comp_blocks_info {
struct hci_ev_num_comp_blocks {
__le16 num_blocks;
__u8 num_hndl;
- struct hci_comp_blocks_info handles[0];
+ struct hci_comp_blocks_info handles[];
} __packed;
#define HCI_EV_SYNC_TRAIN_COMPLETE 0x4F
@@ -2226,7 +2256,7 @@ struct hci_ev_le_advertising_info {
__u8 bdaddr_type;
bdaddr_t bdaddr;
__u8 length;
- __u8 data[0];
+ __u8 data[];
} __packed;
#define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03
@@ -2302,7 +2332,7 @@ struct hci_ev_le_ext_adv_report {
__u8 direct_addr_type;
bdaddr_t direct_addr;
__u8 length;
- __u8 data[0];
+ __u8 data[];
} __packed;
#define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a
@@ -2362,7 +2392,7 @@ struct hci_evt_le_cis_req {
#define HCI_EV_STACK_INTERNAL 0xfd
struct hci_ev_stack_internal {
__u16 type;
- __u8 data[0];
+ __u8 data[];
} __packed;
#define HCI_EV_SI_DEVICE 0x01
@@ -2409,7 +2439,7 @@ struct hci_sco_hdr {
struct hci_iso_hdr {
__le16 handle;
__le16 dlen;
- __u8 data[0];
+ __u8 data[];
} __packed;
/* ISO data packet status flags */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 89ecf0a80aa1..d4e28773d378 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -88,6 +88,31 @@ struct discovery_state {
unsigned long scan_duration;
};
+#define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+
+enum suspend_tasks {
+ SUSPEND_PAUSE_DISCOVERY,
+ SUSPEND_UNPAUSE_DISCOVERY,
+
+ SUSPEND_PAUSE_ADVERTISING,
+ SUSPEND_UNPAUSE_ADVERTISING,
+
+ SUSPEND_SCAN_DISABLE,
+ SUSPEND_SCAN_ENABLE,
+ SUSPEND_DISCONNECTING,
+
+ SUSPEND_POWERING_DOWN,
+
+ SUSPEND_PREPARE_NOTIFIER,
+ __SUSPEND_NUM_TASKS
+};
+
+enum suspended_state {
+ BT_RUNNING = 0,
+ BT_SUSPEND_DISCONNECT,
+ BT_SUSPEND_COMPLETE,
+};
+
struct hci_conn_hash {
struct list_head list;
unsigned int acl_num;
@@ -260,6 +285,7 @@ struct hci_dev {
__u8 stored_num_keys;
__u8 io_capability;
__s8 inq_tx_power;
+ __u8 err_data_reporting;
__u16 page_scan_interval;
__u16 page_scan_window;
__u8 page_scan_type;
@@ -389,11 +415,28 @@ struct hci_dev {
void *smp_bredr_data;
struct discovery_state discovery;
+
+ int discovery_old_state;
+ bool discovery_paused;
+ int advertising_old_state;
+ bool advertising_paused;
+
+ struct notifier_block suspend_notifier;
+ struct work_struct suspend_prepare;
+ enum suspended_state suspend_state_next;
+ enum suspended_state suspend_state;
+ bool scanning_paused;
+ bool suspended;
+
+ wait_queue_head_t suspend_wait_q;
+ DECLARE_BITMAP(suspend_tasks, __SUSPEND_NUM_TASKS);
+
struct hci_conn_hash conn_hash;
struct list_head mgmt_pending;
struct list_head blacklist;
struct list_head whitelist;
+ struct list_head wakeable;
struct list_head uuids;
struct list_head link_keys;
struct list_head long_term_keys;
@@ -575,6 +618,7 @@ struct hci_conn_params {
struct hci_conn *conn;
bool explicit_connect;
+ bool wakeable;
};
extern struct list_head hci_dev_list;
@@ -1477,6 +1521,8 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout);
+u32 hci_conn_get_phy(struct hci_conn *conn);
+
/* ----- HCI Sockets ----- */
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h
index 8e9138acdae1..9352bb1bf34c 100644
--- a/include/net/bluetooth/hci_sock.h
+++ b/include/net/bluetooth/hci_sock.h
@@ -144,19 +144,19 @@ struct hci_dev_req {
struct hci_dev_list_req {
__u16 dev_num;
- struct hci_dev_req dev_req[0]; /* hci_dev_req structures */
+ struct hci_dev_req dev_req[]; /* hci_dev_req structures */
};
struct hci_conn_list_req {
__u16 dev_id;
__u16 conn_num;
- struct hci_conn_info conn_info[0];
+ struct hci_conn_info conn_info[];
};
struct hci_conn_info_req {
bdaddr_t bdaddr;
__u8 type;
- struct hci_conn_info conn_info[0];
+ struct hci_conn_info conn_info[];
};
struct hci_auth_info_req {
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 093aedebdf0c..dada14d0622c 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -47,6 +47,7 @@
#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF
#define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */
#define L2CAP_LE_MIN_MTU 23
+#define L2CAP_ECRED_CONN_SCID_MAX 5
#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100)
#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000)
@@ -119,6 +120,10 @@ struct l2cap_conninfo {
#define L2CAP_LE_CONN_REQ 0x14
#define L2CAP_LE_CONN_RSP 0x15
#define L2CAP_LE_CREDITS 0x16
+#define L2CAP_ECRED_CONN_REQ 0x17
+#define L2CAP_ECRED_CONN_RSP 0x18
+#define L2CAP_ECRED_RECONF_REQ 0x19
+#define L2CAP_ECRED_RECONF_RSP 0x1a
/* L2CAP extended feature mask */
#define L2CAP_FEAT_FLOWCTL 0x00000001
@@ -290,6 +295,8 @@ struct l2cap_conn_rsp {
#define L2CAP_CR_LE_ENCRYPTION 0x0008
#define L2CAP_CR_LE_INVALID_SCID 0x0009
#define L2CAP_CR_LE_SCID_IN_USE 0X000A
+#define L2CAP_CR_LE_UNACCEPT_PARAMS 0X000B
+#define L2CAP_CR_LE_INVALID_PARAMS 0X000C
/* connect/create channel status */
#define L2CAP_CS_NO_INFO 0x0000
@@ -299,14 +306,14 @@ struct l2cap_conn_rsp {
struct l2cap_conf_req {
__le16 dcid;
__le16 flags;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct l2cap_conf_rsp {
__le16 scid;
__le16 flags;
__le16 result;
- __u8 data[0];
+ __u8 data[];
} __packed;
#define L2CAP_CONF_SUCCESS 0x0000
@@ -322,7 +329,7 @@ struct l2cap_conf_rsp {
struct l2cap_conf_opt {
__u8 type;
__u8 len;
- __u8 val[0];
+ __u8 val[];
} __packed;
#define L2CAP_CONF_OPT_SIZE 2
@@ -359,6 +366,7 @@ struct l2cap_conf_rfc {
* ever be used in the BR/EDR configuration phase.
*/
#define L2CAP_MODE_LE_FLOWCTL 0x80
+#define L2CAP_MODE_EXT_FLOWCTL 0x81
struct l2cap_conf_efs {
__u8 id;
@@ -392,7 +400,7 @@ struct l2cap_info_req {
struct l2cap_info_rsp {
__le16 type;
__le16 result;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct l2cap_create_chan_req {
@@ -483,6 +491,39 @@ struct l2cap_le_credits {
__le16 credits;
} __packed;
+#define L2CAP_ECRED_MIN_MTU 64
+#define L2CAP_ECRED_MIN_MPS 64
+
+struct l2cap_ecred_conn_req {
+ __le16 psm;
+ __le16 mtu;
+ __le16 mps;
+ __le16 credits;
+ __le16 scid[0];
+} __packed;
+
+struct l2cap_ecred_conn_rsp {
+ __le16 mtu;
+ __le16 mps;
+ __le16 credits;
+ __le16 result;
+ __le16 dcid[0];
+};
+
+struct l2cap_ecred_reconf_req {
+ __le16 mtu;
+ __le16 mps;
+ __le16 scid[0];
+} __packed;
+
+#define L2CAP_RECONF_SUCCESS 0x0000
+#define L2CAP_RECONF_INVALID_MTU 0x0001
+#define L2CAP_RECONF_INVALID_MPS 0x0002
+
+struct l2cap_ecred_reconf_rsp {
+ __le16 result;
+} __packed;
+
/* ----- L2CAP channels and connections ----- */
struct l2cap_seq_list {
__u16 head;
@@ -620,6 +661,7 @@ struct l2cap_ops {
void (*suspend) (struct l2cap_chan *chan);
void (*set_shutdown) (struct l2cap_chan *chan);
long (*get_sndtimeo) (struct l2cap_chan *chan);
+ struct pid *(*get_peer_pid) (struct l2cap_chan *chan);
struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
unsigned long hdr_len,
unsigned long len, int nb);
@@ -724,6 +766,7 @@ enum {
FLAG_EFS_ENABLE,
FLAG_DEFER_SETUP,
FLAG_LE_CONN_REQ_SENT,
+ FLAG_ECRED_CONN_REQ_SENT,
FLAG_PENDING_SECURITY,
FLAG_HOLD_HCI_CONN,
};
@@ -917,12 +960,14 @@ static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan)
}
extern bool disable_ertm;
+extern bool enable_ecred;
int l2cap_init_sockets(void);
void l2cap_cleanup_sockets(void);
bool l2cap_is_socket(struct socket *sock);
void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan);
+void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan);
void __l2cap_connect_rsp_defer(struct l2cap_chan *chan);
int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm);
@@ -932,6 +977,7 @@ struct l2cap_chan *l2cap_chan_create(void);
void l2cap_chan_close(struct l2cap_chan *chan, int reason);
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
bdaddr_t *dst, u8 dst_type);
+int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu);
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator);
@@ -939,6 +985,9 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan);
int l2cap_ertm_init(struct l2cap_chan *chan);
void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
+typedef void (*l2cap_chan_func_t)(struct l2cap_chan *chan, void *data);
+void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
+ void *data);
void l2cap_chan_del(struct l2cap_chan *chan, int err);
void l2cap_send_conn_req(struct l2cap_chan *chan);
void l2cap_move_start(struct l2cap_chan *chan);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index a90666af05bd..f41cd87550dc 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -101,7 +101,8 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_PRIVACY 0x00002000
#define MGMT_SETTING_CONFIGURATION 0x00004000
#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
-#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000
+#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000
+#define MGMT_SETTING_WIDEBAND_SPEECH 0x00020000
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
@@ -671,6 +672,8 @@ struct mgmt_cp_set_blocked_keys {
} __packed;
#define MGMT_OP_SET_BLOCKED_KEYS_SIZE 2
+#define MGMT_OP_SET_WIDEBAND_SPEECH 0x0047
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index da4acefe39c8..99d26879b02a 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -34,7 +34,6 @@
#define RFCOMM_DEFAULT_MTU 127
#define RFCOMM_DEFAULT_CREDITS 7
-#define RFCOMM_MAX_L2CAP_MTU 1013
#define RFCOMM_MAX_CREDITS 40
#define RFCOMM_SKB_HEAD_RESERVE 8
@@ -356,7 +355,7 @@ struct rfcomm_dev_info {
struct rfcomm_dev_list_req {
u16 dev_num;
- struct rfcomm_dev_info dev_info[0];
+ struct rfcomm_dev_info dev_info[];
};
int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 3d56b026bb9e..dc2ce31a1f52 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -183,7 +183,7 @@ struct slave {
struct bond_up_slave {
unsigned int count;
struct rcu_head rcu;
- struct slave *arr[0];
+ struct slave *arr[];
};
/*
diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h
index 8e4f831d2e52..5036c94c0503 100644
--- a/include/net/bpf_sk_storage.h
+++ b/include/net/bpf_sk_storage.h
@@ -10,14 +10,41 @@ void bpf_sk_storage_free(struct sock *sk);
extern const struct bpf_func_proto bpf_sk_storage_get_proto;
extern const struct bpf_func_proto bpf_sk_storage_delete_proto;
+struct bpf_sk_storage_diag;
+struct sk_buff;
+struct nlattr;
+struct sock;
+
#ifdef CONFIG_BPF_SYSCALL
int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk);
+struct bpf_sk_storage_diag *
+bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs);
+void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag);
+int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
+ struct sock *sk, struct sk_buff *skb,
+ int stg_array_type,
+ unsigned int *res_diag_size);
#else
static inline int bpf_sk_storage_clone(const struct sock *sk,
struct sock *newsk)
{
return 0;
}
+static inline struct bpf_sk_storage_diag *
+bpf_sk_storage_diag_alloc(const struct nlattr *nla)
+{
+ return NULL;
+}
+static inline void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
+{
+}
+static inline int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
+ struct sock *sk, struct sk_buff *skb,
+ int stg_array_type,
+ unsigned int *res_diag_size)
+{
+ return 0;
+}
#endif
#endif /* _BPF_SK_STORAGE_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index f22bd6c838a3..c78bd4ff9e33 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -7,7 +7,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*/
#include <linux/netdevice.h>
@@ -72,12 +72,12 @@ struct wiphy;
*
* @IEEE80211_CHAN_DISABLED: This channel is disabled.
* @IEEE80211_CHAN_NO_IR: do not initiate radiation, this includes
- * sending probe requests or beaconing.
+ * sending probe requests or beaconing.
* @IEEE80211_CHAN_RADAR: Radar detection is required on this channel.
* @IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel
- * is not permitted.
+ * is not permitted.
* @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel
- * is not permitted.
+ * is not permitted.
* @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel.
* @IEEE80211_CHAN_NO_80MHZ: If the driver supports 80 MHz on the band,
* this flag indicates that an 80 MHz channel cannot use this
@@ -95,6 +95,7 @@ struct wiphy;
* on this channel.
* @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
* on this channel.
+ * @IEEE80211_CHAN_NO_HE: HE operation is not permitted on this channel.
*
*/
enum ieee80211_channel_flags {
@@ -111,6 +112,7 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_IR_CONCURRENT = 1<<10,
IEEE80211_CHAN_NO_20MHZ = 1<<11,
IEEE80211_CHAN_NO_10MHZ = 1<<12,
+ IEEE80211_CHAN_NO_HE = 1<<13,
};
#define IEEE80211_CHAN_NO_HT40 \
@@ -260,6 +262,32 @@ struct ieee80211_he_obss_pd {
};
/**
+ * struct cfg80211_he_bss_color - AP settings for BSS coloring
+ *
+ * @color: the current color.
+ * @disabled: is the feature disabled.
+ * @partial: define the AID equation.
+ */
+struct cfg80211_he_bss_color {
+ u8 color;
+ bool disabled;
+ bool partial;
+};
+
+/**
+ * struct ieee80211_he_bss_color - AP settings for BSS coloring
+ *
+ * @color: the current color.
+ * @disabled: is the feature disabled.
+ * @partial: define the AID equation.
+ */
+struct ieee80211_he_bss_color {
+ u8 color;
+ bool disabled;
+ bool partial;
+};
+
+/**
* struct ieee80211_sta_ht_cap - STA's HT capabilities
*
* This structure describes most essential parameters needed
@@ -599,6 +627,41 @@ struct cfg80211_chan_def {
};
/**
+ * struct cfg80211_tid_cfg - TID specific configuration
+ * @config_override: Flag to notify driver to reset TID configuration
+ * of the peer.
+ * @tids: bitmap of TIDs to modify
+ * @mask: bitmap of attributes indicating which parameter changed,
+ * similar to &nl80211_tid_config_supp.
+ * @noack: noack configuration value for the TID
+ * @retry_long: retry count value
+ * @retry_short: retry count value
+ * @ampdu: Enable/Disable aggregation
+ * @rtscts: Enable/Disable RTS/CTS
+ */
+struct cfg80211_tid_cfg {
+ bool config_override;
+ u8 tids;
+ u32 mask;
+ enum nl80211_tid_config noack;
+ u8 retry_long, retry_short;
+ enum nl80211_tid_config ampdu;
+ enum nl80211_tid_config rtscts;
+};
+
+/**
+ * struct cfg80211_tid_config - TID configuration
+ * @peer: Station's MAC address
+ * @n_tid_conf: Number of TID specific configurations to be applied
+ * @tid_conf: Configuration change info
+ */
+struct cfg80211_tid_config {
+ const u8 *peer;
+ u32 n_tid_conf;
+ struct cfg80211_tid_cfg tid_conf[];
+};
+
+/**
* cfg80211_get_chandef_type - return old channel type from chandef
* @chandef: the channel definition
*
@@ -861,6 +924,7 @@ struct cfg80211_crypto_settings {
__be16 control_port_ethertype;
bool control_port_no_encrypt;
bool control_port_over_nl80211;
+ bool control_port_no_preauth;
struct key_params *wep_keys;
int wep_tx_key;
const u8 *psk;
@@ -990,6 +1054,8 @@ enum cfg80211_ap_settings_flags {
* @twt_responder: Enable Target Wait Time
* @flags: flags, as defined in enum cfg80211_ap_settings_flags
* @he_obss_pd: OBSS Packet Detection settings
+ * @he_bss_color: BSS Color settings
+ * @he_oper: HE operation IE (or %NULL if HE isn't enabled)
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1014,10 +1080,12 @@ struct cfg80211_ap_settings {
const struct ieee80211_ht_cap *ht_cap;
const struct ieee80211_vht_cap *vht_cap;
const struct ieee80211_he_cap_elem *he_cap;
+ const struct ieee80211_he_operation *he_oper;
bool ht_required, vht_required;
bool twt_responder;
u32 flags;
struct ieee80211_he_obss_pd he_obss_pd;
+ struct cfg80211_he_bss_color he_bss_color;
};
/**
@@ -1656,7 +1724,7 @@ struct mpath_info {
* @basic_rates_len: number of basic rates
* @ap_isolate: do not forward packets between connected stations
* @ht_opmode: HT Operation mode
- * (u16 = opmode, -1 = do not change)
+ * (u16 = opmode, -1 = do not change)
* @p2p_ctwindow: P2P CT Window (-1 = no change)
* @p2p_opp_ps: P2P opportunistic PS (-1 = no change)
*/
@@ -2032,8 +2100,8 @@ struct cfg80211_bss_select_adjust {
* @ie_len: length of ie in octets
* @flags: bit field of flags controlling operation
* @match_sets: sets of parameters to be matched for a scan result
- * entry to be considered valid and to be passed to the host
- * (others are filtered out).
+ * entry to be considered valid and to be passed to the host
+ * (others are filtered out).
* If ommited, all results are passed.
* @n_match_sets: number of match sets
* @report_results: indicates that results were reported for this request
@@ -2426,7 +2494,7 @@ struct cfg80211_disassoc_request {
* will be used in ht_capa. Un-supported values will be ignored.
* @ht_capa_mask: The bits of ht_capa which are to be used.
* @wep_keys: static WEP keys, if not NULL points to an array of
- * CFG80211_MAX_WEP_KEYS WEP keys
+ * CFG80211_MAX_WEP_KEYS WEP keys
* @wep_tx_key: key index (0..3) of the default TX static WEP key
*/
struct cfg80211_ibss_params {
@@ -2631,6 +2699,17 @@ enum wiphy_params_flags {
* @cache_id: 2-octet cache identifier advertized by a FILS AP identifying the
* scope of PMKSA. This is valid only if @ssid_len is non-zero (may be
* %NULL).
+ * @pmk_lifetime: Maximum lifetime for PMKSA in seconds
+ * (dot11RSNAConfigPMKLifetime) or 0 if not specified.
+ * The configured PMKSA must not be used for PMKSA caching after
+ * expiration and any keys derived from this PMK become invalid on
+ * expiration, i.e., the current association must be dropped if the PMK
+ * used for it expires.
+ * @pmk_reauth_threshold: Threshold time for reauthentication (percentage of
+ * PMK lifetime, dot11RSNAConfigPMKReauthThreshold) or 0 if not specified.
+ * Drivers are expected to trigger a full authentication instead of using
+ * this PMKSA for caching when reassociating to a new BSS after this
+ * threshold to generate a new PMK before the current one expires.
*/
struct cfg80211_pmksa {
const u8 *bssid;
@@ -2640,6 +2719,8 @@ struct cfg80211_pmksa {
const u8 *ssid;
size_t ssid_len;
const u8 *cache_id;
+ u32 pmk_lifetime;
+ u8 pmk_reauth_threshold;
};
/**
@@ -3204,6 +3285,12 @@ struct cfg80211_pmsr_result {
* @ftmr_retries: number of retries for FTM request
* @request_lci: request LCI information
* @request_civicloc: request civic location information
+ * @trigger_based: use trigger based ranging for the measurement
+ * If neither @trigger_based nor @non_trigger_based is set,
+ * EDCA based ranging will be used.
+ * @non_trigger_based: use non trigger based ranging for the measurement
+ * If neither @trigger_based nor @non_trigger_based is set,
+ * EDCA based ranging will be used.
*
* See also nl80211 for the respective attribute documentation.
*/
@@ -3213,7 +3300,9 @@ struct cfg80211_pmsr_ftm_request_peer {
u8 requested:1,
asap:1,
request_lci:1,
- request_civicloc:1;
+ request_civicloc:1,
+ trigger_based:1,
+ non_trigger_based:1;
u8 num_bursts_exp;
u8 burst_duration;
u8 ftms_per_burst;
@@ -3340,6 +3429,8 @@ struct cfg80211_update_owe_info {
*
* @set_default_mgmt_key: set the default management frame key on an interface
*
+ * @set_default_beacon_key: set the default Beacon frame key on an interface
+ *
* @set_rekey_data: give the data necessary for GTK rekeying to the driver
*
* @start_ap: Start acting in AP mode defined by the parameters.
@@ -3639,6 +3730,10 @@ struct cfg80211_update_owe_info {
*
* @probe_mesh_link: Probe direct Mesh peer's link quality by sending data frame
* and overrule HWMP path selection algorithm.
+ * @set_tid_config: TID specific configuration, this can be peer or BSS specific
+ * This callback may sleep.
+ * @reset_tid_config: Reset TID specific configuration for the peer, for the
+ * given TIDs. This callback may sleep.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -3672,6 +3767,9 @@ struct cfg80211_ops {
int (*set_default_mgmt_key)(struct wiphy *wiphy,
struct net_device *netdev,
u8 key_index);
+ int (*set_default_beacon_key)(struct wiphy *wiphy,
+ struct net_device *netdev,
+ u8 key_index);
int (*start_ap)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_settings *settings);
@@ -3959,6 +4057,10 @@ struct cfg80211_ops {
struct cfg80211_update_owe_info *owe_info);
int (*probe_mesh_link)(struct wiphy *wiphy, struct net_device *dev,
const u8 *buf, size_t len);
+ int (*set_tid_config)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_tid_config *tid_conf);
+ int (*reset_tid_config)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u8 tids);
};
/*
@@ -4356,6 +4458,8 @@ struct wiphy_iftype_ext_capab {
* forbid using the value 15 to let the responder pick)
* @ftm.max_ftms_per_burst: maximum FTMs per burst supported (set to 0 if
* not limited)
+ * @ftm.trigger_based: trigger based ranging measurement is supported
+ * @ftm.non_trigger_based: non trigger based ranging measurement is supported
*/
struct cfg80211_pmsr_capabilities {
unsigned int max_peers;
@@ -4371,24 +4475,49 @@ struct cfg80211_pmsr_capabilities {
asap:1,
non_asap:1,
request_lci:1,
- request_civicloc:1;
+ request_civicloc:1,
+ trigger_based:1,
+ non_trigger_based:1;
} ftm;
};
/**
+ * struct wiphy_iftype_akm_suites - This structure encapsulates supported akm
+ * suites for interface types defined in @iftypes_mask. Each type in the
+ * @iftypes_mask must be unique across all instances of iftype_akm_suites.
+ *
+ * @iftypes_mask: bitmask of interfaces types
+ * @akm_suites: points to an array of supported akm suites
+ * @n_akm_suites: number of supported AKM suites
+ */
+struct wiphy_iftype_akm_suites {
+ u16 iftypes_mask;
+ const u32 *akm_suites;
+ int n_akm_suites;
+};
+
+/**
* struct wiphy - wireless hardware description
* @reg_notifier: the driver's regulatory notification callback,
* note that if your driver uses wiphy_apply_custom_regulatory()
* the reg_notifier's request can be passed as NULL
* @regd: the driver's regulatory domain, if one was requested via
- * the regulatory_hint() API. This can be used by the driver
+ * the regulatory_hint() API. This can be used by the driver
* on the reg_notifier() if it chooses to ignore future
* regulatory domain changes caused by other drivers.
* @signal_type: signal type reported in &struct cfg80211_bss.
* @cipher_suites: supported cipher suites
* @n_cipher_suites: number of supported cipher suites
- * @akm_suites: supported AKM suites
+ * @akm_suites: supported AKM suites. These are the default AKMs supported if
+ * the supported AKMs not advertized for a specific interface type in
+ * iftype_akm_suites.
* @n_akm_suites: number of supported AKM suites
+ * @iftype_akm_suites: array of supported akm suites info per interface type.
+ * Note that the bits in @iftypes_mask inside this structure cannot
+ * overlap (i.e. only one occurrence of each type is allowed across all
+ * instances of iftype_akm_suites).
+ * @num_iftype_akm_suites: number of interface types for which supported akm
+ * suites are specified separately.
* @retry_short: Retry limit for short frames (dot11ShortRetryLimit)
* @retry_long: Retry limit for long frames (dot11LongRetryLimit)
* @frag_threshold: Fragmentation threshold (dot11FragmentationThreshold);
@@ -4409,10 +4538,11 @@ struct cfg80211_pmsr_capabilities {
* the same number of arbitrary MAC addresses.
* @registered: protects ->resume and ->suspend sysfs callbacks against
* unregister hardware
- * @debugfsdir: debugfs directory used for this wiphy, will be renamed
- * automatically on wiphy renames
- * @dev: (virtual) struct device for this wiphy
- * @registered: helps synchronize suspend/resume with wiphy unregister
+ * @debugfsdir: debugfs directory used for this wiphy (ieee80211/<wiphyname>).
+ * It will be renamed automatically on wiphy renames
+ * @dev: (virtual) struct device for this wiphy. The item in
+ * /sys/class/ieee80211/ points to this. You need use set_wiphy_dev()
+ * (see below).
* @wext: wireless extension handlers
* @priv: driver private data (sized according to wiphy_new() parameter)
* @interface_modes: bitmask of interfaces types valid for this wiphy,
@@ -4523,12 +4653,6 @@ struct cfg80211_pmsr_capabilities {
* and probe responses. This value should be set if the driver
* wishes to limit the number of csa counters. Default (0) means
* infinite.
- * @max_adj_channel_rssi_comp: max offset of between the channel on which the
- * frame was sent and the channel on which the frame was heard for which
- * the reported rssi is still valid. If a driver is able to compensate the
- * low rssi when a frame is heard on different channel, then it should set
- * this variable to the maximal offset for which it can compensate.
- * This value should be set in MHz.
* @bss_select_support: bitmask indicating the BSS selection criteria supported
* by the driver in the .connect() callback. The bit position maps to the
* attribute indices defined in &enum nl80211_bss_select_attr.
@@ -4548,11 +4672,19 @@ struct cfg80211_pmsr_capabilities {
* @support_mbssid must be set for this to have any effect.
*
* @pmsr_capa: peer measurement capabilities
+ *
+ * @tid_config_support: describes the per-TID config support that the
+ * device has
+ * @tid_config_support.vif: bitmap of attributes (configurations)
+ * supported by the driver for each vif
+ * @tid_config_support.peer: bitmap of attributes (configurations)
+ * supported by the driver for each peer
+ * @tid_config_support.max_retry: maximum supported retry count for
+ * long/short retry configuration
*/
struct wiphy {
/* assign these fields before you register the wiphy */
- /* permanent MAC address(es) */
u8 perm_addr[ETH_ALEN];
u8 addr_mask[ETH_ALEN];
@@ -4595,6 +4727,9 @@ struct wiphy {
int n_akm_suites;
const u32 *akm_suites;
+ const struct wiphy_iftype_akm_suites *iftype_akm_suites;
+ unsigned int num_iftype_akm_suites;
+
u8 retry_short;
u8 retry_long;
u32 frag_threshold;
@@ -4616,11 +4751,6 @@ struct wiphy {
u32 available_antennas_tx;
u32 available_antennas_rx;
- /*
- * Bitmap of supported protocols for probe response offloading
- * see &enum nl80211_probe_resp_offload_support_attr. Only valid
- * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set.
- */
u32 probe_resp_offload;
const u8 *extended_capabilities, *extended_capabilities_mask;
@@ -4629,16 +4759,10 @@ struct wiphy {
const struct wiphy_iftype_ext_capab *iftype_ext_capab;
unsigned int num_iftype_ext_capab;
- /* If multiple wiphys are registered and you're handed e.g.
- * a regular netdev with assigned ieee80211_ptr, you won't
- * know whether it points to a wiphy your driver has registered
- * or not. Assign this to something global to your driver to
- * help determine whether you own this wiphy or not. */
const void *privid;
struct ieee80211_supported_band *bands[NUM_NL80211_BANDS];
- /* Lets us get back the wiphy on the callback */
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request);
@@ -4646,14 +4770,10 @@ struct wiphy {
const struct ieee80211_regdomain __rcu *regd;
- /* the item in /sys/class/ieee80211/ points to this,
- * you need use set_wiphy_dev() (see below) */
struct device dev;
- /* protects ->resume, ->suspend sysfs callbacks against unregister hw */
bool registered;
- /* dir in debugfs: ieee80211/<wiphyname> */
struct dentry *debugfsdir;
const struct ieee80211_ht_cap *ht_capa_mod_mask;
@@ -4661,7 +4781,6 @@ struct wiphy {
struct list_head wdev_list;
- /* the network namespace this phy lives in currently */
possible_net_t _net;
#ifdef CONFIG_CFG80211_WEXT
@@ -4677,7 +4796,6 @@ struct wiphy {
u16 max_ap_assoc_sta;
u8 max_num_csa_counters;
- u8 max_adj_channel_rssi_comp;
u32 bss_select_support;
@@ -4687,11 +4805,20 @@ struct wiphy {
u32 txq_memory_limit;
u32 txq_quantum;
+ unsigned long tx_queue_len;
+
u8 support_mbssid:1,
support_only_he_mbssid:1;
const struct cfg80211_pmsr_capabilities *pmsr_capa;
+ struct {
+ u64 peer, vif;
+ u8 max_retry;
+ } tid_config_support;
+
+ u8 max_data_retry_count;
+
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -5467,9 +5594,9 @@ void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr);
* @wiphy: the wireless device giving the hint (used only for reporting
* conflicts)
* @alpha2: the ISO/IEC 3166 alpha2 the driver claims its regulatory domain
- * should be in. If @rd is set this should be NULL. Note that if you
- * set this to NULL you should still set rd->alpha2 to some accepted
- * alpha2.
+ * should be in. If @rd is set this should be NULL. Note that if you
+ * set this to NULL you should still set rd->alpha2 to some accepted
+ * alpha2.
*
* Wireless drivers can use this function to hint to the wireless core
* what it believes should be the current regulatory domain by
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 6f86073a5d7d..6ed07844eb24 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -214,7 +214,7 @@ struct wpan_phy {
/* the network namespace this phy lives in currently */
possible_net_t _net;
- char priv[0] __aligned(NETDEV_ALIGN);
+ char priv[] __aligned(NETDEV_ALIGN);
};
static inline struct net *wpan_phy_net(struct wpan_phy *wpan_phy)
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index 4295de3e6a4b..7e78e7d6f015 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -45,9 +45,14 @@ static inline void sock_update_classid(struct sock_cgroup_data *skcd)
sock_cgroup_set_classid(skcd, classid);
}
+static inline u32 __task_get_classid(struct task_struct *task)
+{
+ return task_cls_state(task)->classid;
+}
+
static inline u32 task_get_classid(const struct sk_buff *skb)
{
- u32 classid = task_cls_state(current)->classid;
+ u32 classid = __task_get_classid(current);
/* Due to the nature of the classifier it is required to ignore all
* packets originating from softirq context as accessing `current'
diff --git a/include/net/devlink.h b/include/net/devlink.h
index ce5cea428fdc..8ffc1b5cd89b 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -16,7 +16,9 @@
#include <linux/workqueue.h>
#include <linux/refcount.h>
#include <net/net_namespace.h>
+#include <net/flow_offload.h>
#include <uapi/linux/devlink.h>
+#include <linux/xarray.h>
struct devlink_ops;
@@ -28,13 +30,14 @@ struct devlink {
struct list_head resource_list;
struct list_head param_list;
struct list_head region_list;
- u32 snapshot_id;
struct list_head reporter_list;
struct mutex reporters_lock; /* protects reporter_list */
struct devlink_dpipe_headers *dpipe_headers;
struct list_head trap_list;
struct list_head trap_group_list;
+ struct list_head trap_policer_list;
const struct devlink_ops *ops;
+ struct xarray snapshot_ids;
struct device *dev;
possible_net_t _net;
struct mutex lock;
@@ -479,6 +482,8 @@ enum devlink_param_generic_id {
#define DEVLINK_INFO_VERSION_GENERIC_FW "fw"
/* Control processor FW version */
#define DEVLINK_INFO_VERSION_GENERIC_FW_MGMT "fw.mgmt"
+/* FW interface specification version */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API "fw.mgmt.api"
/* Data path microcode controlling high-speed packet processing */
#define DEVLINK_INFO_VERSION_GENERIC_FW_APP "fw.app"
/* UNDI software version */
@@ -489,11 +494,27 @@ enum devlink_param_generic_id {
#define DEVLINK_INFO_VERSION_GENERIC_FW_PSID "fw.psid"
/* RoCE FW version */
#define DEVLINK_INFO_VERSION_GENERIC_FW_ROCE "fw.roce"
+/* Firmware bundle identifier */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id"
struct devlink_region;
struct devlink_info_req;
-typedef void devlink_snapshot_data_dest_t(const void *data);
+/**
+ * struct devlink_region_ops - Region operations
+ * @name: region name
+ * @destructor: callback used to free snapshot memory when deleting
+ * @snapshot: callback to request an immediate snapshot. On success,
+ * the data variable must be updated to point to the snapshot data.
+ * The function will be called while the devlink instance lock is
+ * held.
+ */
+struct devlink_region_ops {
+ const char *name;
+ void (*destructor)(const void *data);
+ int (*snapshot)(struct devlink *devlink, struct netlink_ext_ack *extack,
+ u8 **data);
+};
struct devlink_fmsg;
struct devlink_health_reporter;
@@ -526,10 +547,34 @@ struct devlink_health_reporter_ops {
};
/**
+ * struct devlink_trap_policer - Immutable packet trap policer attributes.
+ * @id: Policer identifier.
+ * @init_rate: Initial rate in packets / sec.
+ * @init_burst: Initial burst size in packets.
+ * @max_rate: Maximum rate.
+ * @min_rate: Minimum rate.
+ * @max_burst: Maximum burst size.
+ * @min_burst: Minimum burst size.
+ *
+ * Describes immutable attributes of packet trap policers that drivers register
+ * with devlink.
+ */
+struct devlink_trap_policer {
+ u32 id;
+ u64 init_rate;
+ u64 init_burst;
+ u64 max_rate;
+ u64 min_rate;
+ u64 max_burst;
+ u64 min_burst;
+};
+
+/**
* struct devlink_trap_group - Immutable packet trap group attributes.
* @name: Trap group name.
* @id: Trap group identifier.
* @generic: Whether the trap group is generic or not.
+ * @init_policer_id: Initial policer identifier.
*
* Describes immutable attributes of packet trap groups that drivers register
* with devlink.
@@ -538,9 +583,11 @@ struct devlink_trap_group {
const char *name;
u16 id;
bool generic;
+ u32 init_policer_id;
};
#define DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT BIT(0)
+#define DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE BIT(1)
/**
* struct devlink_trap - Immutable packet trap attributes.
@@ -549,7 +596,7 @@ struct devlink_trap_group {
* @generic: Whether the trap is generic or not.
* @id: Trap identifier.
* @name: Trap name.
- * @group: Immutable packet trap group attributes.
+ * @init_group_id: Initial group identifier.
* @metadata_cap: Metadata types that can be provided by the trap.
*
* Describes immutable attributes of packet traps that drivers register with
@@ -561,7 +608,7 @@ struct devlink_trap {
bool generic;
u16 id;
const char *name;
- struct devlink_trap_group group;
+ u16 init_group_id;
u32 metadata_cap;
};
@@ -596,6 +643,8 @@ enum devlink_trap_generic_id {
DEVLINK_TRAP_GENERIC_ID_NON_ROUTABLE,
DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC,
+ DEVLINK_TRAP_GENERIC_ID_INGRESS_FLOW_ACTION_DROP,
+ DEVLINK_TRAP_GENERIC_ID_EGRESS_FLOW_ACTION_DROP,
/* Add new generic trap IDs above */
__DEVLINK_TRAP_GENERIC_ID_MAX,
@@ -610,6 +659,7 @@ enum devlink_trap_group_generic_id {
DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS,
DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_DROPS,
/* Add new generic trap group IDs above */
__DEVLINK_TRAP_GROUP_GENERIC_ID_MAX,
@@ -671,6 +721,10 @@ enum devlink_trap_group_generic_id {
"decap_error"
#define DEVLINK_TRAP_GENERIC_NAME_OVERLAY_SMAC_MC \
"overlay_smac_is_mc"
+#define DEVLINK_TRAP_GENERIC_NAME_INGRESS_FLOW_ACTION_DROP \
+ "ingress_flow_action_drop"
+#define DEVLINK_TRAP_GENERIC_NAME_EGRESS_FLOW_ACTION_DROP \
+ "egress_flow_action_drop"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \
"l2_drops"
@@ -680,19 +734,22 @@ enum devlink_trap_group_generic_id {
"buffer_drops"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_TUNNEL_DROPS \
"tunnel_drops"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_ACL_DROPS \
+ "acl_drops"
-#define DEVLINK_TRAP_GENERIC(_type, _init_action, _id, _group, _metadata_cap) \
+#define DEVLINK_TRAP_GENERIC(_type, _init_action, _id, _group_id, \
+ _metadata_cap) \
{ \
.type = DEVLINK_TRAP_TYPE_##_type, \
.init_action = DEVLINK_TRAP_ACTION_##_init_action, \
.generic = true, \
.id = DEVLINK_TRAP_GENERIC_ID_##_id, \
.name = DEVLINK_TRAP_GENERIC_NAME_##_id, \
- .group = _group, \
+ .init_group_id = _group_id, \
.metadata_cap = _metadata_cap, \
}
-#define DEVLINK_TRAP_DRIVER(_type, _init_action, _id, _name, _group, \
+#define DEVLINK_TRAP_DRIVER(_type, _init_action, _id, _name, _group_id, \
_metadata_cap) \
{ \
.type = DEVLINK_TRAP_TYPE_##_type, \
@@ -700,15 +757,28 @@ enum devlink_trap_group_generic_id {
.generic = false, \
.id = _id, \
.name = _name, \
- .group = _group, \
+ .init_group_id = _group_id, \
.metadata_cap = _metadata_cap, \
}
-#define DEVLINK_TRAP_GROUP_GENERIC(_id) \
+#define DEVLINK_TRAP_GROUP_GENERIC(_id, _policer_id) \
{ \
.name = DEVLINK_TRAP_GROUP_GENERIC_NAME_##_id, \
.id = DEVLINK_TRAP_GROUP_GENERIC_ID_##_id, \
.generic = true, \
+ .init_policer_id = _policer_id, \
+ }
+
+#define DEVLINK_TRAP_POLICER(_id, _rate, _burst, _max_rate, _min_rate, \
+ _max_burst, _min_burst) \
+ { \
+ .id = _id, \
+ .init_rate = _rate, \
+ .init_burst = _burst, \
+ .max_rate = _max_rate, \
+ .min_rate = _min_rate, \
+ .max_burst = _max_burst, \
+ .min_burst = _min_burst, \
}
struct devlink_ops {
@@ -807,6 +877,47 @@ struct devlink_ops {
*/
int (*trap_group_init)(struct devlink *devlink,
const struct devlink_trap_group *group);
+ /**
+ * @trap_group_set: Trap group parameters set function.
+ *
+ * Note: @policer can be NULL when a policer is being unbound from
+ * @group.
+ */
+ int (*trap_group_set)(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer);
+ /**
+ * @trap_policer_init: Trap policer initialization function.
+ *
+ * Should be used by device drivers to initialize the trap policer in
+ * the underlying device.
+ */
+ int (*trap_policer_init)(struct devlink *devlink,
+ const struct devlink_trap_policer *policer);
+ /**
+ * @trap_policer_fini: Trap policer de-initialization function.
+ *
+ * Should be used by device drivers to de-initialize the trap policer
+ * in the underlying device.
+ */
+ void (*trap_policer_fini)(struct devlink *devlink,
+ const struct devlink_trap_policer *policer);
+ /**
+ * @trap_policer_set: Trap policer parameters set function.
+ */
+ int (*trap_policer_set)(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst,
+ struct netlink_ext_ack *extack);
+ /**
+ * @trap_policer_counter_get: Trap policer counter get function.
+ *
+ * Should be used by device drivers to report number of packets dropped
+ * by the policer.
+ */
+ int (*trap_policer_counter_get)(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops);
};
static inline void *devlink_priv(struct devlink *devlink)
@@ -949,15 +1060,15 @@ void devlink_port_param_value_changed(struct devlink_port *devlink_port,
u32 param_id);
void devlink_param_value_str_fill(union devlink_param_value *dst_val,
const char *src);
-struct devlink_region *devlink_region_create(struct devlink *devlink,
- const char *region_name,
- u32 region_max_snapshots,
- u64 region_size);
+struct devlink_region *
+devlink_region_create(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
void devlink_region_destroy(struct devlink_region *region);
-u32 devlink_region_snapshot_id_get(struct devlink *devlink);
+int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id);
+void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id);
int devlink_region_snapshot_create(struct devlink_region *region,
- u8 *data, u32 snapshot_id,
- devlink_snapshot_data_dest_t *data_destructor);
+ u8 *data, u32 snapshot_id);
int devlink_info_serial_number_put(struct devlink_info_req *req,
const char *sn);
int devlink_info_driver_name_put(struct devlink_info_req *req,
@@ -981,12 +1092,17 @@ int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg);
int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
const char *name);
int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg);
+int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg,
+ const char *name);
+int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg);
int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value);
int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value);
int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value);
int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value);
int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value);
+int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
+ u16 value_len);
int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
bool value);
@@ -1004,8 +1120,7 @@ int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
struct devlink_health_reporter *
devlink_health_reporter_create(struct devlink *devlink,
const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, bool auto_recover,
- void *priv);
+ u64 graceful_period, void *priv);
void
devlink_health_reporter_destroy(struct devlink_health_reporter *reporter);
@@ -1035,10 +1150,24 @@ int devlink_traps_register(struct devlink *devlink,
void devlink_traps_unregister(struct devlink *devlink,
const struct devlink_trap *traps,
size_t traps_count);
-void devlink_trap_report(struct devlink *devlink,
- struct sk_buff *skb, void *trap_ctx,
- struct devlink_port *in_devlink_port);
+void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb,
+ void *trap_ctx, struct devlink_port *in_devlink_port,
+ const struct flow_action_cookie *fa_cookie);
void *devlink_trap_ctx_priv(void *trap_ctx);
+int devlink_trap_groups_register(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count);
+void devlink_trap_groups_unregister(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count);
+int
+devlink_trap_policers_register(struct devlink *devlink,
+ const struct devlink_trap_policer *policers,
+ size_t policers_count);
+void
+devlink_trap_policers_unregister(struct devlink *devlink,
+ const struct devlink_trap_policer *policers,
+ size_t policers_count);
#if IS_ENABLED(CONFIG_NET_DEVLINK)
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
index 6dd2213c5eb2..ccc6e9df178b 100644
--- a/include/net/dn_fib.h
+++ b/include/net/dn_fib.h
@@ -90,7 +90,7 @@ struct dn_fib_table {
int (*flush)(struct dn_fib_table *t);
int (*dump)(struct dn_fib_table *t, struct sk_buff *skb, struct netlink_callback *cb);
- unsigned char data[0];
+ unsigned char data[];
};
#ifdef CONFIG_DECNET_ROUTER
diff --git a/include/net/drop_monitor.h b/include/net/drop_monitor.h
index f68bc373544a..3f5b6ddb3179 100644
--- a/include/net/drop_monitor.h
+++ b/include/net/drop_monitor.h
@@ -6,17 +6,20 @@
#include <linux/ktime.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
+#include <net/flow_offload.h>
/**
* struct net_dm_hw_metadata - Hardware-supplied packet metadata.
* @trap_group_name: Hardware trap group name.
* @trap_name: Hardware trap name.
* @input_dev: Input netdevice.
+ * @fa_cookie: Flow action user cookie.
*/
struct net_dm_hw_metadata {
const char *trap_group_name;
const char *trap_name;
struct net_device *input_dev;
+ const struct flow_action_cookie *fa_cookie;
};
#if IS_REACHABLE(CONFIG_NET_DROP_MONITOR)
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 63495e3443ac..fb3f9222f2a1 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -130,9 +130,10 @@ struct dsa_switch_tree {
struct list_head rtable;
};
-/* TC matchall action types, only mirroring for now */
+/* TC matchall action types */
enum dsa_port_mall_action_type {
DSA_PORT_MALL_MIRROR,
+ DSA_PORT_MALL_POLICER,
};
/* TC mirroring entry */
@@ -141,6 +142,12 @@ struct dsa_mall_mirror_tc_entry {
bool ingress;
};
+/* TC port policer entry */
+struct dsa_mall_policer_tc_entry {
+ s64 burst;
+ u64 rate_bytes_per_sec;
+};
+
/* TC matchall entry */
struct dsa_mall_tc_entry {
struct list_head list;
@@ -148,6 +155,7 @@ struct dsa_mall_tc_entry {
enum dsa_port_mall_action_type type;
union {
struct dsa_mall_mirror_tc_entry mirror;
+ struct dsa_mall_policer_tc_entry policer;
};
};
@@ -284,6 +292,12 @@ struct dsa_switch {
*/
bool pcs_poll;
+ /* For switches that only have the MRU configurable. To ensure the
+ * configured MTU is not exceeded, normalization of MRU on all bridged
+ * interfaces is needed.
+ */
+ bool mtu_enforcement_ingress;
+
size_t num_ports;
};
@@ -420,7 +434,9 @@ struct dsa_switch_ops {
void (*phylink_mac_link_up)(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev);
+ struct phy_device *phydev,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause);
void (*phylink_fixed_state)(struct dsa_switch *ds, int port,
struct phylink_link_state *state);
/*
@@ -538,11 +554,20 @@ struct dsa_switch_ops {
/*
* TC integration
*/
+ int (*cls_flower_add)(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+ int (*cls_flower_del)(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
+ int (*cls_flower_stats)(struct dsa_switch *ds, int port,
+ struct flow_cls_offload *cls, bool ingress);
int (*port_mirror_add)(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress);
void (*port_mirror_del)(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
+ int (*port_policer_add)(struct dsa_switch *ds, int port,
+ struct dsa_mall_policer_tc_entry *policer);
+ void (*port_policer_del)(struct dsa_switch *ds, int port);
int (*port_setup_tc)(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data);
@@ -571,6 +596,16 @@ struct dsa_switch_ops {
struct devlink_param_gset_ctx *ctx);
int (*devlink_param_set)(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
+
+ /*
+ * MTU change functionality. Switches can also adjust their MRU through
+ * this method. By MTU, one understands the SDU (L2 payload) length.
+ * If the switch needs to account for the DSA tag on the CPU port, this
+ * method needs to to do so privately.
+ */
+ int (*port_change_mtu)(struct dsa_switch *ds, int port,
+ int new_mtu);
+ int (*port_max_mtu)(struct dsa_switch *ds, int port);
};
#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
diff --git a/include/net/dst.h b/include/net/dst.h
index 3448cf865ede..07adfacd8088 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -35,7 +35,6 @@ struct dst_entry {
int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
unsigned short flags;
-#define DST_HOST 0x0001
#define DST_NOXFRM 0x0002
#define DST_NOPOLICY 0x0004
#define DST_NOCOUNT 0x0008
diff --git a/include/net/esp.h b/include/net/esp.h
index 117652eb6ea3..9c5637d41d95 100644
--- a/include/net/esp.h
+++ b/include/net/esp.h
@@ -11,6 +11,22 @@ static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
return (struct ip_esp_hdr *)skb_transport_header(skb);
}
+static inline void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
+{
+ /* Fill padding... */
+ if (tfclen) {
+ memset(tail, 0, tfclen);
+ tail += tfclen;
+ }
+ do {
+ int i;
+ for (i = 0; i < plen - 2; i++)
+ tail[i] = i + 1;
+ } while (0);
+ tail[plen - 2] = plen - 2;
+ tail[plen - 1] = proto;
+}
+
struct esp_info {
struct ip_esp_hdr *esph;
__be64 seqno;
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index c6f7bd22db60..3619c6acf60f 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -3,6 +3,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/netlink.h>
#include <net/flow_dissector.h>
#include <linux/rhashtable.h>
@@ -68,6 +69,10 @@ struct flow_match_enc_opts {
struct flow_dissector_key_enc_opts *key, *mask;
};
+struct flow_match_ct {
+ struct flow_dissector_key_ct *key, *mask;
+};
+
struct flow_rule;
void flow_rule_match_meta(const struct flow_rule *rule,
@@ -110,6 +115,8 @@ void flow_rule_match_enc_keyid(const struct flow_rule *rule,
struct flow_match_enc_keyid *out);
void flow_rule_match_enc_opts(const struct flow_rule *rule,
struct flow_match_enc_opts *out);
+void flow_rule_match_ct(const struct flow_rule *rule,
+ struct flow_match_ct *out);
enum flow_action_id {
FLOW_ACTION_ACCEPT = 0,
@@ -130,11 +137,13 @@ enum flow_action_id {
FLOW_ACTION_CSUM,
FLOW_ACTION_MARK,
FLOW_ACTION_PTYPE,
+ FLOW_ACTION_PRIORITY,
FLOW_ACTION_WAKE,
FLOW_ACTION_QUEUE,
FLOW_ACTION_SAMPLE,
FLOW_ACTION_POLICE,
FLOW_ACTION_CT,
+ FLOW_ACTION_CT_METADATA,
FLOW_ACTION_MPLS_PUSH,
FLOW_ACTION_MPLS_POP,
FLOW_ACTION_MPLS_MANGLE,
@@ -154,10 +163,35 @@ enum flow_action_mangle_base {
FLOW_ACT_MANGLE_HDR_TYPE_UDP,
};
+enum flow_action_hw_stats_bit {
+ FLOW_ACTION_HW_STATS_IMMEDIATE_BIT,
+ FLOW_ACTION_HW_STATS_DELAYED_BIT,
+};
+
+enum flow_action_hw_stats {
+ FLOW_ACTION_HW_STATS_DISABLED = 0,
+ FLOW_ACTION_HW_STATS_IMMEDIATE =
+ BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT),
+ FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT),
+ FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE |
+ FLOW_ACTION_HW_STATS_DELAYED,
+};
+
typedef void (*action_destr)(void *priv);
+struct flow_action_cookie {
+ u32 cookie_len;
+ u8 cookie[];
+};
+
+struct flow_action_cookie *flow_action_cookie_create(void *data,
+ unsigned int len,
+ gfp_t gfp);
+void flow_action_cookie_destroy(struct flow_action_cookie *cookie);
+
struct flow_action_entry {
enum flow_action_id id;
+ enum flow_action_hw_stats hw_stats;
action_destr destructor;
void *destructor_priv;
union {
@@ -168,7 +202,8 @@ struct flow_action_entry {
__be16 proto;
u8 prio;
} vlan;
- struct { /* FLOW_ACTION_PACKET_EDIT */
+ struct { /* FLOW_ACTION_MANGLE */
+ /* FLOW_ACTION_ADD */
enum flow_action_mangle_base htype;
u32 offset;
u32 mask;
@@ -178,6 +213,7 @@ struct flow_action_entry {
u32 csum_flags; /* FLOW_ACTION_CSUM */
u32 mark; /* FLOW_ACTION_MARK */
u16 ptype; /* FLOW_ACTION_PTYPE */
+ u32 priority; /* FLOW_ACTION_PRIORITY */
struct { /* FLOW_ACTION_QUEUE */
u32 ctx;
u32 index;
@@ -196,7 +232,13 @@ struct flow_action_entry {
struct { /* FLOW_ACTION_CT */
int action;
u16 zone;
+ struct nf_flowtable *flow_table;
} ct;
+ struct {
+ unsigned long cookie;
+ u32 mark;
+ u32 labels[4];
+ } ct_metadata;
struct { /* FLOW_ACTION_MPLS_PUSH */
u32 label;
__be16 proto;
@@ -214,11 +256,12 @@ struct flow_action_entry {
u8 ttl;
} mpls_mangle;
};
+ struct flow_action_cookie *cookie; /* user defined action cookie */
};
struct flow_action {
unsigned int num_entries;
- struct flow_action_entry entries[0];
+ struct flow_action_entry entries[];
};
static inline bool flow_action_has_entries(const struct flow_action *action)
@@ -238,7 +281,77 @@ static inline bool flow_offload_has_one_action(const struct flow_action *action)
}
#define flow_action_for_each(__i, __act, __actions) \
- for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i])
+ for (__i = 0, __act = &(__actions)->entries[0]; \
+ __i < (__actions)->num_entries; \
+ __act = &(__actions)->entries[++__i])
+
+static inline bool
+flow_action_mixed_hw_stats_check(const struct flow_action *action,
+ struct netlink_ext_ack *extack)
+{
+ const struct flow_action_entry *action_entry;
+ u8 uninitialized_var(last_hw_stats);
+ int i;
+
+ if (flow_offload_has_one_action(action))
+ return true;
+
+ flow_action_for_each(i, action_entry, action) {
+ if (i && action_entry->hw_stats != last_hw_stats) {
+ NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
+ return false;
+ }
+ last_hw_stats = action_entry->hw_stats;
+ }
+ return true;
+}
+
+static inline const struct flow_action_entry *
+flow_action_first_entry_get(const struct flow_action *action)
+{
+ WARN_ON(!flow_action_has_entries(action));
+ return &action->entries[0];
+}
+
+static inline bool
+__flow_action_hw_stats_check(const struct flow_action *action,
+ struct netlink_ext_ack *extack,
+ bool check_allow_bit,
+ enum flow_action_hw_stats_bit allow_bit)
+{
+ const struct flow_action_entry *action_entry;
+
+ if (!flow_action_has_entries(action))
+ return true;
+ if (!flow_action_mixed_hw_stats_check(action, extack))
+ return false;
+ action_entry = flow_action_first_entry_get(action);
+ if (!check_allow_bit &&
+ action_entry->hw_stats != FLOW_ACTION_HW_STATS_ANY) {
+ NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
+ return false;
+ } else if (check_allow_bit &&
+ !(action_entry->hw_stats & BIT(allow_bit))) {
+ NL_SET_ERR_MSG_MOD(extack, "Driver does not support selected HW stats type");
+ return false;
+ }
+ return true;
+}
+
+static inline bool
+flow_action_hw_stats_check(const struct flow_action *action,
+ struct netlink_ext_ack *extack,
+ enum flow_action_hw_stats_bit allow_bit)
+{
+ return __flow_action_hw_stats_check(action, extack, true, allow_bit);
+}
+
+static inline bool
+flow_action_basic_hw_stats_check(const struct flow_action *action,
+ struct netlink_ext_ack *extack)
+{
+ return __flow_action_hw_stats_check(action, extack, false, 0);
+}
struct flow_rule {
struct flow_match match;
@@ -257,14 +370,24 @@ struct flow_stats {
u64 pkts;
u64 bytes;
u64 lastused;
+ enum flow_action_hw_stats used_hw_stats;
+ bool used_hw_stats_valid;
};
static inline void flow_stats_update(struct flow_stats *flow_stats,
- u64 bytes, u64 pkts, u64 lastused)
+ u64 bytes, u64 pkts, u64 lastused,
+ enum flow_action_hw_stats used_hw_stats)
{
flow_stats->pkts += pkts;
flow_stats->bytes += bytes;
flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused);
+
+ /* The driver should pass value with a maximum of one bit set.
+ * Passing FLOW_ACTION_HW_STATS_ANY is invalid.
+ */
+ WARN_ON(used_hw_stats == FLOW_ACTION_HW_STATS_ANY);
+ flow_stats->used_hw_stats |= used_hw_stats;
+ flow_stats->used_hw_stats_valid = true;
}
enum flow_block_command {
@@ -410,6 +533,7 @@ void flow_indr_block_cb_unregister(struct net_device *dev,
void flow_indr_block_call(struct net_device *dev,
struct flow_block_offload *bo,
- enum flow_block_command command);
+ enum flow_block_command command,
+ enum tc_setup_type type);
#endif /* _NET_FLOW_OFFLOAD_H */
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index fe96bf247aac..81b965953036 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -85,9 +85,8 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
int iif, int sdif,
bool *refcounted)
{
- struct sock *sk = skb_steal_sock(skb);
+ struct sock *sk = skb_steal_sock(skb, refcounted);
- *refcounted = true;
if (sk)
return sk;
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 895546058a20..a3f076befa4f 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -335,4 +335,10 @@ static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
if (icsk->icsk_ack.pingpong < U8_MAX)
icsk->icsk_ack.pingpong++;
}
+
+static inline bool inet_csk_has_ulp(struct sock *sk)
+{
+ return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
+}
+
#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index d0019d3395cf..ad64ba6a057f 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -379,10 +379,9 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
const int sdif,
bool *refcounted)
{
- struct sock *sk = skb_steal_sock(skb);
+ struct sock *sk = skb_steal_sock(skb, refcounted);
const struct iphdr *iph = ip_hdr(skb);
- *refcounted = true;
if (sk)
return sk;
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 34c4436fd18f..a7ce00af6c44 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -52,7 +52,7 @@ struct ip_options {
unsigned char router_alert;
unsigned char cipso;
unsigned char __pad2;
- unsigned char __data[0];
+ unsigned char __data[];
};
struct ip_options_rcu {
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index 7bec95df4f80..27ec612cd4a4 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -76,6 +76,15 @@ static inline void __tcp_v6_send_check(struct sk_buff *skb,
}
}
+static inline void tcp_v6_gso_csum_prep(struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct tcphdr *th = tcp_hdr(skb);
+
+ ipv6h->payload_len = 0;
+ th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
+}
+
#if IS_ENABLED(CONFIG_IPV6)
static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
{
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index fd60a8ac02ee..80262d2980f5 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -190,15 +190,14 @@ struct fib6_info {
u8 should_flush:1,
dst_nocount:1,
dst_nopolicy:1,
- dst_host:1,
fib6_destroying:1,
offload:1,
trap:1,
- unused:1;
+ unused:2;
struct rcu_head rcu;
struct nexthop *nh;
- struct fib6_nh fib6_nh[0];
+ struct fib6_nh fib6_nh[];
};
struct rt6_info {
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index b69c16cbbf71..f7543c095b33 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -16,7 +16,7 @@ struct route_info {
reserved_h:3;
#endif
__be32 lifetime;
- __u8 prefix[0]; /* 0,8 or 16 */
+ __u8 prefix[]; /* 0,8 or 16 */
};
#include <net/addrconf.h>
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 6a1ae49809de..59e0d4e99f94 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -153,7 +153,7 @@ struct fib_info {
bool nh_updated;
struct nexthop *nh;
struct rcu_head rcu;
- struct fib_nh fib_nh[0];
+ struct fib_nh fib_nh[];
};
@@ -250,7 +250,7 @@ struct fib_table {
int tb_num_default;
struct rcu_head rcu;
unsigned long *tb_data;
- unsigned long __data[0];
+ unsigned long __data[];
};
struct fib_dump_filter {
@@ -470,8 +470,9 @@ int fib_nh_init(struct net *net, struct fib_nh *fib_nh,
struct fib_config *cfg, int nh_weight,
struct netlink_ext_ack *extack);
void fib_nh_release(struct net *net, struct fib_nh *fib_nh);
-int fib_nh_common_init(struct fib_nh_common *nhc, struct nlattr *fc_encap,
- u16 fc_encap_type, void *cfg, gfp_t gfp_flags,
+int fib_nh_common_init(struct net *net, struct fib_nh_common *nhc,
+ struct nlattr *fc_encap, u16 fc_encap_type,
+ void *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
void fib_nh_common_release(struct fib_nh_common *nhc);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index cec1a54401f2..1bf8065fe871 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -1027,6 +1027,12 @@ struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, st
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst,
bool connected);
+struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb,
+ struct net_device *dev,
+ struct net *net, struct socket *sock,
+ struct in6_addr *saddr,
+ const struct ip_tunnel_info *info,
+ u8 protocol, bool use_cache);
struct dst_entry *ip6_blackhole_route(struct net *net,
struct dst_entry *orig_dst);
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index 5d6c5b1fc695..05cfd6ff6528 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -30,11 +30,11 @@ struct lwtunnel_state {
int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb);
int (*orig_input)(struct sk_buff *);
struct rcu_head rcu;
- __u8 data[0];
+ __u8 data[];
};
struct lwtunnel_encap_ops {
- int (*build_state)(struct nlattr *encap,
+ int (*build_state)(struct net *net, struct nlattr *encap,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts,
struct netlink_ext_ack *extack);
@@ -113,7 +113,7 @@ int lwtunnel_valid_encap_type(u16 encap_type,
struct netlink_ext_ack *extack);
int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len,
struct netlink_ext_ack *extack);
-int lwtunnel_build_state(u16 encap_type,
+int lwtunnel_build_state(struct net *net, u16 encap_type,
struct nlattr *encap,
unsigned int family, const void *cfg,
struct lwtunnel_state **lws,
@@ -209,7 +209,7 @@ static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len,
return 0;
}
-static inline int lwtunnel_build_state(u16 encap_type,
+static inline int lwtunnel_build_state(struct net *net, u16 encap_type,
struct nlattr *encap,
unsigned int family, const void *cfg,
struct lwtunnel_state **lws,
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 77e6b5a83b06..b6b4de0e4b5e 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -316,6 +316,7 @@ struct ieee80211_vif_chanctx_switch {
* functionality changed for this BSS (AP mode).
* @BSS_CHANGED_TWT: TWT status changed
* @BSS_CHANGED_HE_OBSS_PD: OBSS Packet Detection status changed.
+ * @BSS_CHANGED_HE_BSS_COLOR: BSS Color has changed
*
*/
enum ieee80211_bss_change {
@@ -348,6 +349,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_FTM_RESPONDER = 1<<26,
BSS_CHANGED_TWT = 1<<27,
BSS_CHANGED_HE_OBSS_PD = 1<<28,
+ BSS_CHANGED_HE_BSS_COLOR = 1<<29,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -494,7 +496,6 @@ struct ieee80211_ftm_responder_params {
* This structure keeps information about a BSS (and an association
* to that BSS) that can change during the lifetime of the BSS.
*
- * @bss_color: 6-bit value to mark inter-BSS frame, if BSS supports HE
* @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE
* @multi_sta_back_32bit: supports BA bitmap of 32-bits in Multi-STA BACK
* @uora_exists: is the UORA element advertised by AP
@@ -573,7 +574,7 @@ struct ieee80211_ftm_responder_params {
* @ssid: The SSID of the current vif. Valid in AP and IBSS mode.
* @ssid_len: Length of SSID given in @ssid.
* @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode.
- * @txpower: TX power in dBm
+ * @txpower: TX power in dBm. INT_MIN means not configured.
* @txpower_type: TX power adjustment used to control per packet Transmit
* Power Control (TPC) in lower driver for the current vif. In particular
* TPC is enabled if value passed in %txpower_type is
@@ -604,10 +605,10 @@ struct ieee80211_ftm_responder_params {
* in order to discover all the nontransmitted BSSIDs in the set.
* @he_operation: HE operation information of the AP we are connected to
* @he_obss_pd: OBSS Packet Detection parameters.
+ * @he_bss_color: BSS coloring settings, if BSS supports HE
*/
struct ieee80211_bss_conf {
const u8 *bssid;
- u8 bss_color;
u8 htc_trig_based_pkt_ext;
bool multi_sta_back_32bit;
bool uora_exists;
@@ -667,6 +668,7 @@ struct ieee80211_bss_conf {
u8 profile_periodicity;
struct ieee80211_he_operation he_operation;
struct ieee80211_he_obss_pd he_obss_pd;
+ struct cfg80211_he_bss_color he_bss_color;
};
/**
@@ -826,6 +828,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_AMSDU = BIT(3),
IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP = BIT(5),
+ IEEE80211_TX_CTRL_HW_80211_ENCAP = BIT(6),
};
/*
@@ -1984,6 +1987,7 @@ struct ieee80211_sta_txpwr {
* @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not.
* @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control.
* @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID
+ * @txpwr: the station tx power configuration
* @txq: per-TID data TX queues (if driver uses the TXQ abstraction); note that
* the last entry (%IEEE80211_NUM_TIDS) is used for non-data frames
*/
@@ -3448,6 +3452,10 @@ enum ieee80211_reconfig_type {
* in AP mode, this callback will not be called when the flag
* %IEEE80211_HW_AP_LINK_PS is set. Must be atomic.
*
+ * @sta_set_txpwr: Configure the station tx power. This callback set the tx
+ * power for the station.
+ * This callback can sleep.
+ *
* @sta_state: Notifies low level driver about state transition of a
* station (which can be the AP, a client, IBSS/WDS/mesh peer etc.)
* This callback is mutually exclusive with @sta_add/@sta_remove.
@@ -3773,6 +3781,9 @@ enum ieee80211_reconfig_type {
*
* @start_pmsr: start peer measurement (e.g. FTM) (this call can sleep)
* @abort_pmsr: abort peer measurement (this call can sleep)
+ * @set_tid_config: Apply TID specific configurations. This callback may sleep.
+ * @reset_tid_config: Reset TID specific configuration for the peer.
+ * This callback may sleep.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -4077,6 +4088,13 @@ struct ieee80211_ops {
struct cfg80211_pmsr_request *request);
void (*abort_pmsr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *request);
+ int (*set_tid_config)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_config *tid_conf);
+ int (*reset_tid_config)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u8 tids);
};
/**
@@ -4660,6 +4678,26 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
struct sk_buff *skb);
/**
+ * ieee80211_tx_status_8023 - transmit status callback for 802.3 frame format
+ *
+ * Call this function for all transmitted data frames after their transmit
+ * completion. This callback should only be called for data frames which
+ * are are using driver's (or hardware's) offload capability of encap/decap
+ * 802.11 frames.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other and all
+ * calls in the same tx status family.
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @vif: the interface for which the frame was transmitted
+ * @skb: the frame that was transmitted, owned by mac80211 after this call
+ */
+void ieee80211_tx_status_8023(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb);
+
+/**
* ieee80211_report_low_ack - report non-responding station
*
* When operating in AP-mode, call this function to report a non-responding
@@ -6479,5 +6517,16 @@ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info,
int len);
+/**
+ * ieee80211_set_hw_80211_encap - enable hardware encapsulation offloading.
+ *
+ * This function is used to notify mac80211 that a vif can be passed raw 802.3
+ * frames. The driver needs to then handle the 802.11 encapsulation inside the
+ * hardware or firmware.
+ *
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @enable: indicate if the feature should be turned on or off
+ */
+bool ieee80211_set_hw_80211_encap(struct ieee80211_vif *vif, bool enable);
#endif /* MAC80211_H */
diff --git a/include/net/macsec.h b/include/net/macsec.h
index 92e43db8b566..52874cdfe226 100644
--- a/include/net/macsec.h
+++ b/include/net/macsec.h
@@ -11,18 +11,48 @@
#include <uapi/linux/if_link.h>
#include <uapi/linux/if_macsec.h>
-typedef u64 __bitwise sci_t;
+#define MACSEC_DEFAULT_PN_LEN 4
+#define MACSEC_XPN_PN_LEN 8
+#define MACSEC_SALT_LEN 12
#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
+typedef u64 __bitwise sci_t;
+typedef u32 __bitwise ssci_t;
+
+typedef union salt {
+ struct {
+ u32 ssci;
+ u64 pn;
+ } __packed;
+ u8 bytes[MACSEC_SALT_LEN];
+} __packed salt_t;
+
+typedef union pn {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 lower;
+ u32 upper;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u32 upper;
+ u32 lower;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ };
+ u64 full64;
+} pn_t;
+
/**
* struct macsec_key - SA key
* @id: user-provided key identifier
* @tfm: crypto struct, key storage
+ * @salt: salt used to generate IV in XPN cipher suites
*/
struct macsec_key {
u8 id[MACSEC_KEYID_LEN];
struct crypto_aead *tfm;
+ salt_t salt;
};
struct macsec_rx_sc_stats {
@@ -58,18 +88,34 @@ struct macsec_tx_sc_stats {
__u64 OutOctetsEncrypted;
};
+struct macsec_dev_stats {
+ __u64 OutPktsUntagged;
+ __u64 InPktsUntagged;
+ __u64 OutPktsTooLong;
+ __u64 InPktsNoTag;
+ __u64 InPktsBadTag;
+ __u64 InPktsUnknownSCI;
+ __u64 InPktsNoSCI;
+ __u64 InPktsOverrun;
+};
+
/**
* struct macsec_rx_sa - receive secure association
* @active:
* @next_pn: packet number expected for the next packet
* @lock: protects next_pn manipulations
* @key: key structure
+ * @ssci: short secure channel identifier
* @stats: per-SA stats
*/
struct macsec_rx_sa {
struct macsec_key key;
+ ssci_t ssci;
spinlock_t lock;
- u32 next_pn;
+ union {
+ pn_t next_pn_halves;
+ u64 next_pn;
+ };
refcount_t refcnt;
bool active;
struct macsec_rx_sa_stats __percpu *stats;
@@ -110,12 +156,17 @@ struct macsec_rx_sc {
* @next_pn: packet number to use for the next packet
* @lock: protects next_pn manipulations
* @key: key structure
+ * @ssci: short secure channel identifier
* @stats: per-SA stats
*/
struct macsec_tx_sa {
struct macsec_key key;
+ ssci_t ssci;
spinlock_t lock;
- u32 next_pn;
+ union {
+ pn_t next_pn_halves;
+ u64 next_pn;
+ };
refcount_t refcnt;
bool active;
struct macsec_tx_sa_stats __percpu *stats;
@@ -152,6 +203,7 @@ struct macsec_tx_sc {
* @key_len: length of keys used by the cipher suite
* @icv_len: length of ICV used by the cipher suite
* @validate_frames: validation mode
+ * @xpn: enable XPN for this SecY
* @operational: MAC_Operational flag
* @protect_frames: enable protection for this SecY
* @replay_protect: enable packet number checks on receive
@@ -166,6 +218,7 @@ struct macsec_secy {
u16 key_len;
u16 icv_len;
enum macsec_validation_type validate_frames;
+ bool xpn;
bool operational;
bool protect_frames;
bool replay_protect;
@@ -178,7 +231,10 @@ struct macsec_secy {
* struct macsec_context - MACsec context for hardware offloading
*/
struct macsec_context {
- struct phy_device *phydev;
+ union {
+ struct net_device *netdev;
+ struct phy_device *phydev;
+ };
enum macsec_offload offload;
struct macsec_secy *secy;
@@ -191,6 +247,13 @@ struct macsec_context {
struct macsec_tx_sa *tx_sa;
};
} sa;
+ union {
+ struct macsec_tx_sc_stats *tx_sc_stats;
+ struct macsec_tx_sa_stats *tx_sa_stats;
+ struct macsec_rx_sc_stats *rx_sc_stats;
+ struct macsec_rx_sa_stats *rx_sa_stats;
+ struct macsec_dev_stats *dev_stats;
+ } stats;
u8 prepare:1;
};
@@ -217,6 +280,12 @@ struct macsec_ops {
int (*mdo_add_txsa)(struct macsec_context *ctx);
int (*mdo_upd_txsa)(struct macsec_context *ctx);
int (*mdo_del_txsa)(struct macsec_context *ctx);
+ /* Statistics */
+ int (*mdo_get_dev_stats)(struct macsec_context *ctx);
+ int (*mdo_get_tx_sc_stats)(struct macsec_context *ctx);
+ int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
+ int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
+ int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
};
void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
diff --git a/include/net/mip6.h b/include/net/mip6.h
index f1c28971c362..67cd7e50804c 100644
--- a/include/net/mip6.h
+++ b/include/net/mip6.h
@@ -25,7 +25,7 @@ struct ip6_mh {
__u8 ip6mh_reserved;
__u16 ip6mh_cksum;
/* Followed by type specific messages */
- __u8 data[0];
+ __u8 data[];
} __packed;
#define IP6_MH_TYPE_BRR 0 /* Binding Refresh Request */
diff --git a/include/net/mld.h b/include/net/mld.h
index b0f5b3105ef0..496bddb59942 100644
--- a/include/net/mld.h
+++ b/include/net/mld.h
@@ -24,12 +24,12 @@ struct mld2_grec {
__u8 grec_auxwords;
__be16 grec_nsrcs;
struct in6_addr grec_mca;
- struct in6_addr grec_src[0];
+ struct in6_addr grec_src[];
};
struct mld2_report {
struct icmp6hdr mld2r_hdr;
- struct mld2_grec mld2r_grec[0];
+ struct mld2_grec mld2r_grec[];
};
#define mld2r_type mld2r_hdr.icmp6_type
@@ -55,7 +55,7 @@ struct mld2_query {
#endif
__u8 mld2q_qqic;
__be16 mld2q_nsrcs;
- struct in6_addr mld2q_srcs[0];
+ struct in6_addr mld2q_srcs[];
};
#define mld2q_type mld2q_hdr.icmp6_type
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
index 6b4759eae158..9deb3a3735da 100644
--- a/include/net/mpls_iptunnel.h
+++ b/include/net/mpls_iptunnel.h
@@ -11,7 +11,7 @@ struct mpls_iptunnel_encap {
u8 ttl_propagate;
u8 default_ttl;
u8 reserved1;
- u32 label[0];
+ u32 label[];
};
static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index c971d25431ea..0e7c5471010b 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -12,6 +12,8 @@
#include <linux/tcp.h>
#include <linux/types.h>
+struct seq_file;
+
/* MPTCP sk_buff extension data */
struct mptcp_ext {
u64 data_ack;
@@ -33,6 +35,21 @@ struct mptcp_out_options {
u16 suboptions;
u64 sndr_key;
u64 rcvr_key;
+ union {
+ struct in_addr addr;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ struct in6_addr addr6;
+#endif
+ };
+ u8 addr_id;
+ u64 ahmac;
+ u8 rm_id;
+ u8 join_id;
+ u8 backup;
+ u32 nonce;
+ u64 thmac;
+ u32 token;
+ u8 hmac[20];
struct mptcp_ext ext_copy;
#endif
};
@@ -106,6 +123,9 @@ static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
skb_ext_find(from, SKB_EXT_MPTCP));
}
+bool mptcp_sk_is_subflow(const struct sock *sk);
+
+void mptcp_seq_show(struct seq_file *seq);
#else
static inline void mptcp_init(void)
@@ -172,6 +192,12 @@ static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
return true;
}
+static inline bool mptcp_sk_is_subflow(const struct sock *sk)
+{
+ return false;
+}
+
+static inline void mptcp_seq_show(struct seq_file *seq) { }
#endif /* CONFIG_MPTCP */
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index b5ebeb3b0de0..7d107113f988 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -41,6 +41,7 @@ enum {
ND_OPT_DNSSL = 31, /* RFC6106 */
ND_OPT_6CO = 34, /* RFC6775 */
ND_OPT_CAPTIVE_PORTAL = 37, /* RFC7710 */
+ ND_OPT_PREF64 = 38, /* RFC-ietf-6man-ra-pref64-09 */
__ND_OPT_MAX
};
@@ -80,12 +81,12 @@ extern struct neigh_table nd_tbl;
struct nd_msg {
struct icmp6hdr icmph;
struct in6_addr target;
- __u8 opt[0];
+ __u8 opt[];
};
struct rs_msg {
struct icmp6hdr icmph;
- __u8 opt[0];
+ __u8 opt[];
};
struct ra_msg {
@@ -98,7 +99,7 @@ struct rd_msg {
struct icmp6hdr icmph;
struct in6_addr target;
struct in6_addr dest;
- __u8 opt[0];
+ __u8 opt[];
};
struct nd_opt_hdr {
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 8ec77bfdc1a4..e1476775769c 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -174,7 +174,7 @@ struct pneigh_entry {
struct net_device *dev;
u8 flags;
u8 protocol;
- u8 key[0];
+ u8 key[];
};
/*
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 854d39ef1ca3..ab96fb59131c 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -168,6 +168,9 @@ struct net {
#ifdef CONFIG_XFRM
struct netns_xfrm xfrm;
#endif
+
+ atomic64_t net_cookie; /* written once */
+
#if IS_ENABLED(CONFIG_IP_VS)
struct netns_ipvs *ipvs;
#endif
@@ -225,6 +228,8 @@ extern struct list_head net_namespace_list;
struct net *get_net_ns_by_pid(pid_t pid);
struct net *get_net_ns_by_fd(int fd);
+u64 net_gen_cookie(struct net *net);
+
#ifdef CONFIG_SYSCTL
void ipx_register_sysctl(void);
void ipx_unregister_sysctl(void);
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
index f7a060c6eb28..7f44a771530e 100644
--- a/include/net/netfilter/nf_conntrack_acct.h
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -65,6 +65,17 @@ static inline void nf_ct_set_acct(struct net *net, bool enable)
#endif
}
+void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets,
+ unsigned int bytes);
+
+static inline void nf_ct_acct_update(struct nf_conn *ct, u32 dir,
+ unsigned int bytes)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ nf_ct_acct_add(ct, dir, 1, bytes);
+#endif
+}
+
void nf_conntrack_acct_pernet_init(struct net *net);
int nf_conntrack_acct_init(void);
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 5ae5295aa46d..e1e588387103 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -45,7 +45,7 @@ enum nf_ct_ext_id {
struct nf_ct_ext {
u8 offset[NF_CT_EXT_NUM];
u8 len;
- char data[0];
+ char data[];
};
static inline bool __nf_ct_ext_exist(const struct nf_ct_ext *ext, u8 id)
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index 6dd72396f534..659b0ea25b4d 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -14,7 +14,7 @@
struct nf_ct_timeout {
__u16 l3num;
const struct nf_conntrack_l4proto *l4proto;
- char data[0];
+ char data[];
};
struct ctnl_timeout {
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index e0f709d9d547..6bf69652f57d 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -16,6 +16,35 @@ struct nf_flow_rule;
struct flow_offload;
enum flow_offload_tuple_dir;
+struct nf_flow_key {
+ struct flow_dissector_key_meta meta;
+ struct flow_dissector_key_control control;
+ struct flow_dissector_key_control enc_control;
+ struct flow_dissector_key_basic basic;
+ union {
+ struct flow_dissector_key_ipv4_addrs ipv4;
+ struct flow_dissector_key_ipv6_addrs ipv6;
+ };
+ struct flow_dissector_key_keyid enc_key_id;
+ union {
+ struct flow_dissector_key_ipv4_addrs enc_ipv4;
+ struct flow_dissector_key_ipv6_addrs enc_ipv6;
+ };
+ struct flow_dissector_key_tcp tcp;
+ struct flow_dissector_key_ports tp;
+} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
+
+struct nf_flow_match {
+ struct flow_dissector dissector;
+ struct nf_flow_key key;
+ struct nf_flow_key mask;
+};
+
+struct nf_flow_rule {
+ struct nf_flow_match match;
+ struct flow_rule *rule;
+};
+
struct nf_flowtable_type {
struct list_head list;
int family;
@@ -33,7 +62,8 @@ struct nf_flowtable_type {
};
enum nf_flowtable_flags {
- NF_FLOWTABLE_HW_OFFLOAD = 0x1,
+ NF_FLOWTABLE_HW_OFFLOAD = 0x1, /* NFT_FLOWTABLE_HW_OFFLOAD */
+ NF_FLOWTABLE_COUNTER = 0x2, /* NFT_FLOWTABLE_COUNTER */
};
struct nf_flowtable {
@@ -44,6 +74,7 @@ struct nf_flowtable {
struct delayed_work gc_work;
unsigned int flags;
struct flow_block flow_block;
+ struct rw_semaphore flow_block_lock; /* Guards flow_block */
possible_net_t net;
};
@@ -129,10 +160,18 @@ struct nf_flow_route {
struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
void flow_offload_free(struct flow_offload *flow);
+int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
+ flow_setup_cb_t *cb, void *cb_priv);
+void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
+ flow_setup_cb_t *cb, void *cb_priv);
+
int flow_offload_route_init(struct flow_offload *flow,
const struct nf_flow_route *route);
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
+void flow_offload_refresh(struct nf_flowtable *flow_table,
+ struct flow_offload *flow);
+
struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
struct flow_offload_tuple *tuple);
void nf_flow_table_cleanup(struct net_device *dev);
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 47088083667b..e770bba00066 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -14,7 +14,10 @@ struct nf_queue_entry {
struct sk_buff *skb;
unsigned int id;
unsigned int hook_index; /* index in hook_entries->hook[] */
-
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ struct net_device *physin;
+ struct net_device *physout;
+#endif
struct nf_hook_state state;
u16 size; /* sizeof(entry) + saved route keys */
@@ -35,7 +38,7 @@ void nf_unregister_queue_handler(struct net *net);
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
-void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
+void nf_queue_entry_free(struct nf_queue_entry *entry);
static inline void init_hashrandom(u32 *jhash_initval)
{
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 4170c033d461..6eb627b3c99b 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -224,7 +224,7 @@ int nft_validate_register_store(const struct nft_ctx *ctx,
*/
struct nft_userdata {
u8 len;
- unsigned char data[0];
+ unsigned char data[];
};
/**
@@ -266,6 +266,7 @@ struct nft_set_iter {
* @size: number of set elements
* @field_len: length of each field in concatenation, bytes
* @field_count: number of concatenated fields in element
+ * @expr: set must support for expressions
*/
struct nft_set_desc {
unsigned int klen;
@@ -273,6 +274,7 @@ struct nft_set_desc {
unsigned int size;
u8 field_len[NFT_REG32_COUNT];
u8 field_count;
+ bool expr;
};
/**
@@ -385,21 +387,14 @@ struct nft_set_ops {
* struct nft_set_type - nf_tables set type
*
* @ops: set ops for this type
- * @list: used internally
- * @owner: module reference
* @features: features supported by the implementation
*/
struct nft_set_type {
const struct nft_set_ops ops;
- struct list_head list;
- struct module *owner;
u32 features;
};
#define to_set_type(o) container_of(o, struct nft_set_type, ops)
-int nft_register_set(struct nft_set_type *type);
-void nft_unregister_set(struct nft_set_type *type);
-
/**
* struct nft_set - nf_tables set instance
*
@@ -423,6 +418,7 @@ void nft_unregister_set(struct nft_set_type *type);
* @policy: set parameterization (see enum nft_set_policies)
* @udlen: user data length
* @udata: user data
+ * @expr: stateful expression
* @ops: set ops
* @flags: set flags
* @genmask: generation mask
@@ -451,6 +447,7 @@ struct nft_set {
u16 policy;
u16 udlen;
unsigned char *udata;
+ struct nft_expr *expr;
/* runtime data below here */
const struct nft_set_ops *ops ____cacheline_aligned;
u16 flags:14,
@@ -572,7 +569,7 @@ struct nft_set_ext_tmpl {
struct nft_set_ext {
u8 genmask;
u8 offset[NFT_SET_EXT_NUM];
- char data[0];
+ char data[];
};
static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
@@ -673,6 +670,10 @@ static inline struct nft_object **nft_set_ext_obj(const struct nft_set_ext *ext)
return nft_set_ext(ext, NFT_SET_EXT_OBJREF);
}
+struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx,
+ const struct nft_set *set,
+ const struct nlattr *attr);
+
void *nft_set_elem_init(const struct nft_set *set,
const struct nft_set_ext_tmpl *tmpl,
const u32 *key, const u32 *key_end, const u32 *data,
@@ -849,8 +850,7 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
return (void *)expr->data;
}
-struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
- const struct nlattr *nla);
+int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src);
void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
const struct nft_expr *expr);
@@ -895,6 +895,18 @@ static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
return (void *)&rule->data[rule->dlen];
}
+static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_expr *expr;
+
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) {
+ expr = nft_set_ext_expr(ext);
+ expr->ops->eval(expr, regs, pkt);
+ }
+}
+
/*
* The last pointer isn't really necessary, but the compiler isn't able to
* determine that the result of nft_expr_last() is always the same since it
@@ -1253,9 +1265,6 @@ void nft_trace_notify(struct nft_traceinfo *info);
#define MODULE_ALIAS_NFT_EXPR(name) \
MODULE_ALIAS("nft-expr-" name)
-#define MODULE_ALIAS_NFT_SET() \
- MODULE_ALIAS("nft-set")
-
#define MODULE_ALIAS_NFT_OBJ(type) \
MODULE_ALIAS("nft-obj-" __stringify(type))
@@ -1385,7 +1394,7 @@ struct nft_trans {
int msg_type;
bool put_net;
struct nft_ctx ctx;
- char data[0];
+ char data[];
};
struct nft_trans_rule {
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 29e7e1021267..78516de14d31 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -69,12 +69,13 @@ extern const struct nft_expr_ops nft_payload_fast_ops;
extern struct static_key_false nft_counters_enabled;
extern struct static_key_false nft_trace_enabled;
-extern struct nft_set_type nft_set_rhash_type;
-extern struct nft_set_type nft_set_hash_type;
-extern struct nft_set_type nft_set_hash_fast_type;
-extern struct nft_set_type nft_set_rbtree_type;
-extern struct nft_set_type nft_set_bitmap_type;
-extern struct nft_set_type nft_set_pipapo_type;
+extern const struct nft_set_type nft_set_rhash_type;
+extern const struct nft_set_type nft_set_hash_type;
+extern const struct nft_set_type nft_set_hash_fast_type;
+extern const struct nft_set_type nft_set_rbtree_type;
+extern const struct nft_set_type nft_set_bitmap_type;
+extern const struct nft_set_type nft_set_pipapo_type;
+extern const struct nft_set_type nft_set_pipapo_avx2_type;
struct nft_expr;
struct nft_regs;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 56c365dc6dc7..67c57d6942e3 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -1466,6 +1466,21 @@ static inline int nla_put_in6_addr(struct sk_buff *skb, int attrtype,
}
/**
+ * nla_put_bitfield32 - Add a bitfield32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: value carrying bits
+ * @selector: selector of valid bits
+ */
+static inline int nla_put_bitfield32(struct sk_buff *skb, int attrtype,
+ __u32 value, __u32 selector)
+{
+ struct nla_bitfield32 tmp = { value, selector, };
+
+ return nla_put(skb, attrtype, sizeof(tmp), &tmp);
+}
+
+/**
* nla_get_u32 - return payload of u32 attribute
* @nla: u32 netlink attribute
*/
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 08b98414d94e..154b8f01499b 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -101,6 +101,7 @@ struct netns_ipv4 {
int sysctl_ip_fwd_use_pmtu;
int sysctl_ip_fwd_update_priority;
int sysctl_ip_nonlocal_bind;
+ int sysctl_ip_autobind_reuse;
/* Shall we try to damage output packets if routing dev changes? */
int sysctl_ip_dynaddr;
int sysctl_ip_early_demux;
diff --git a/include/net/netns/mib.h b/include/net/netns/mib.h
index b5fdb108d602..59b2c3a3db42 100644
--- a/include/net/netns/mib.h
+++ b/include/net/netns/mib.h
@@ -27,6 +27,9 @@ struct netns_mib {
#if IS_ENABLED(CONFIG_TLS)
DEFINE_SNMP_STAT(struct linux_tls_mib, tls_statistics);
#endif
+#ifdef CONFIG_MPTCP
+ DEFINE_SNMP_STAT(struct mptcp_mib, mptcp_statistics);
+#endif
};
#endif
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 331ebbc94fe7..c440ccc861fc 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -73,7 +73,7 @@ struct nh_group {
u16 num_nh;
bool mpath;
bool has_v4;
- struct nh_grp_entry nh_entries[0];
+ struct nh_grp_entry nh_entries[];
};
struct nexthop {
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index 6ab5a83f597c..0550e0380b8d 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -244,13 +244,13 @@ struct dest_spec_params {
struct core_conn_create_dest_spec_params {
__u8 type;
__u8 length;
- __u8 value[0];
+ __u8 value[];
} __packed;
struct nci_core_conn_create_cmd {
__u8 destination_type;
__u8 number_destination_params;
- struct core_conn_create_dest_spec_params params[0];
+ struct core_conn_create_dest_spec_params params[];
} __packed;
#define NCI_OP_CORE_CONN_CLOSE_CMD nci_opcode_pack(NCI_GID_CORE, 0x05)
@@ -321,7 +321,7 @@ struct nci_core_init_rsp_1 {
__u8 status;
__le32 nfcc_features;
__u8 num_supported_rf_interfaces;
- __u8 supported_rf_interfaces[0]; /* variable size array */
+ __u8 supported_rf_interfaces[]; /* variable size array */
/* continuted in nci_core_init_rsp_2 */
} __packed;
@@ -338,7 +338,7 @@ struct nci_core_init_rsp_2 {
struct nci_core_set_config_rsp {
__u8 status;
__u8 num_params;
- __u8 params_id[0]; /* variable size array */
+ __u8 params_id[]; /* variable size array */
} __packed;
#define NCI_OP_CORE_CONN_CREATE_RSP nci_opcode_pack(NCI_GID_CORE, 0x04)
@@ -501,18 +501,18 @@ struct nci_rf_nfcee_action_ntf {
__u8 nfcee_id;
__u8 trigger;
__u8 supported_data_length;
- __u8 supported_data[0];
+ __u8 supported_data[];
} __packed;
#define NCI_OP_NFCEE_DISCOVER_NTF nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00)
struct nci_nfcee_supported_protocol {
__u8 num_protocol;
- __u8 supported_protocol[0];
+ __u8 supported_protocol[];
} __packed;
struct nci_nfcee_information_tlv {
__u8 num_tlv;
- __u8 information_tlv[0];
+ __u8 information_tlv[];
} __packed;
struct nci_nfcee_discover_ntf {
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 5d277d68fd8d..2cd3a261bcbc 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -146,7 +146,7 @@ struct nfc_evt_transaction {
u32 aid_len;
u8 aid[NFC_MAX_AID_LENGTH];
u8 params_len;
- u8 params[0];
+ u8 params[];
} __packed;
struct nfc_genl_data {
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index cfbed00ba7ee..81d7773f96cd 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -151,6 +151,7 @@ struct page_pool *page_pool_create(const struct page_pool_params *params);
#ifdef CONFIG_PAGE_POOL
void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
+void page_pool_release_page(struct page_pool *pool, struct page *page);
#else
static inline void page_pool_destroy(struct page_pool *pool)
{
@@ -160,41 +161,32 @@ static inline void page_pool_use_xdp_mem(struct page_pool *pool,
void (*disconnect)(void *))
{
}
+static inline void page_pool_release_page(struct page_pool *pool,
+ struct page *page)
+{
+}
#endif
-/* Never call this directly, use helpers below */
-void __page_pool_put_page(struct page_pool *pool, struct page *page,
- unsigned int dma_sync_size, bool allow_direct);
+void page_pool_put_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size, bool allow_direct);
-static inline void page_pool_put_page(struct page_pool *pool,
- struct page *page, bool allow_direct)
+/* Same as above but will try to sync the entire area pool->max_len */
+static inline void page_pool_put_full_page(struct page_pool *pool,
+ struct page *page, bool allow_direct)
{
/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
*/
#ifdef CONFIG_PAGE_POOL
- __page_pool_put_page(pool, page, -1, allow_direct);
+ page_pool_put_page(pool, page, -1, allow_direct);
#endif
}
-/* Very limited use-cases allow recycle direct */
+
+/* Same as above but the caller must guarantee safe context. e.g NAPI */
static inline void page_pool_recycle_direct(struct page_pool *pool,
struct page *page)
{
- __page_pool_put_page(pool, page, -1, true);
-}
-
-/* Disconnects a page (from a page_pool). API users can have a need
- * to disconnect a page (from a page_pool), to allow it to be used as
- * a regular page (that will eventually be returned to the normal
- * page-allocator via put_page).
- */
-void page_pool_unmap_page(struct page_pool *pool, struct page *page);
-static inline void page_pool_release_page(struct page_pool *pool,
- struct page *page)
-{
-#ifdef CONFIG_PAGE_POOL
- page_pool_unmap_page(pool, page);
-#endif
+ page_pool_put_full_page(pool, page, true);
}
static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
diff --git a/include/net/pie.h b/include/net/pie.h
index fd5a37cb7993..3fe2361e03b4 100644
--- a/include/net/pie.h
+++ b/include/net/pie.h
@@ -8,7 +8,7 @@
#include <net/inet_ecn.h>
#include <net/pkt_sched.h>
-#define MAX_PROB U64_MAX
+#define MAX_PROB (U64_MAX >> BITS_PER_BYTE)
#define DTIME_INVALID U64_MAX
#define QUEUE_THRESHOLD 16384
#define DQCOUNT_INVALID -1
@@ -38,16 +38,15 @@ struct pie_params {
/**
* struct pie_vars - contains pie variables
- * @qdelay: current queue delay
- * @qdelay_old: queue delay in previous qdelay calculation
- * @burst_time: burst time allowance
- * @dq_tstamp: timestamp at which dq rate was last calculated
- * @prob: drop probability
- * @accu_prob: accumulated drop probability
- * @dq_count: number of bytes dequeued in a measurement cycle
- * @avg_dq_rate: calculated average dq rate
- * @qlen_old: queue length during previous qdelay calculation
- * @accu_prob_overflows: number of times accu_prob overflows
+ * @qdelay: current queue delay
+ * @qdelay_old: queue delay in previous qdelay calculation
+ * @burst_time: burst time allowance
+ * @dq_tstamp: timestamp at which dq rate was last calculated
+ * @prob: drop probability
+ * @accu_prob: accumulated drop probability
+ * @dq_count: number of bytes dequeued in a measurement cycle
+ * @avg_dq_rate: calculated average dq rate
+ * @backlog_old: queue backlog during previous qdelay calculation
*/
struct pie_vars {
psched_time_t qdelay;
@@ -58,8 +57,7 @@ struct pie_vars {
u64 accu_prob;
u64 dq_count;
u32 avg_dq_rate;
- u32 qlen_old;
- u8 accu_prob_overflows;
+ u32 backlog_old;
};
/**
@@ -107,7 +105,6 @@ static inline void pie_vars_init(struct pie_vars *vars)
vars->accu_prob = 0;
vars->dq_count = DQCOUNT_INVALID;
vars->avg_dq_rate = 0;
- vars->accu_prob_overflows = 0;
}
static inline struct pie_skb_cb *get_pie_cb(const struct sk_buff *skb)
@@ -127,12 +124,12 @@ static inline void pie_set_enqueue_time(struct sk_buff *skb)
}
bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
- struct pie_vars *vars, u32 qlen, u32 packet_size);
+ struct pie_vars *vars, u32 backlog, u32 packet_size);
void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
- struct pie_vars *vars, u32 qlen);
+ struct pie_vars *vars, u32 backlog);
void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
- u32 qlen);
+ u32 backlog);
#endif
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index a972244ab193..04aa0649f3b0 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -72,6 +72,10 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode);
+int tcf_classify_ingress(struct sk_buff *skb,
+ const struct tcf_block *ingress_block,
+ const struct tcf_proto *tp, struct tcf_result *res,
+ bool compat_mode);
#else
static inline bool tcf_block_shared(struct tcf_block *block)
@@ -133,6 +137,15 @@ static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
{
return TC_ACT_UNSPEC;
}
+
+static inline int tcf_classify_ingress(struct sk_buff *skb,
+ const struct tcf_block *ingress_block,
+ const struct tcf_proto *tp,
+ struct tcf_result *res, bool compat_mode)
+{
+ return TC_ACT_UNSPEC;
+}
+
#endif
static inline unsigned long
@@ -249,7 +262,8 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
static inline void
tcf_exts_stats_update(const struct tcf_exts *exts,
- u64 bytes, u64 packets, u64 lastuse)
+ u64 bytes, u64 packets, u64 lastuse,
+ u8 used_hw_stats, bool used_hw_stats_valid)
{
#ifdef CONFIG_NET_CLS_ACT
int i;
@@ -260,6 +274,8 @@ tcf_exts_stats_update(const struct tcf_exts *exts,
struct tc_action *a = exts->actions[i];
tcf_action_stats_update(a, bytes, packets, lastuse, true);
+ a->used_hw_stats = used_hw_stats;
+ a->used_hw_stats_valid = used_hw_stats_valid;
}
preempt_enable();
@@ -489,12 +505,16 @@ tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
struct net_device *dev;
if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
- NL_SET_ERR_MSG(extack, "Interface name too long");
+ NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
+ "Interface name too long");
return -EINVAL;
}
dev = __dev_get_by_name(net, indev);
- if (!dev)
+ if (!dev) {
+ NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
+ "Network device not found");
return -ENODEV;
+ }
return dev->ifindex;
}
@@ -509,7 +529,7 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
}
int tc_setup_flow_action(struct flow_action *flow_action,
- const struct tcf_exts *exts, bool rtnl_held);
+ const struct tcf_exts *exts);
void tc_cleanup_flow_action(struct flow_action *flow_action);
int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
@@ -727,6 +747,7 @@ struct tc_red_qopt_offload_params {
u32 limit;
bool is_ecn;
bool is_harddrop;
+ bool is_nodrop;
struct gnet_stats_queue *qstats;
};
@@ -881,4 +902,19 @@ struct tc_tbf_qopt_offload {
};
};
+enum tc_fifo_command {
+ TC_FIFO_REPLACE,
+ TC_FIFO_DESTROY,
+ TC_FIFO_STATS,
+};
+
+struct tc_fifo_qopt_offload {
+ enum tc_fifo_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_qopt_offload_stats stats;
+ };
+};
+
#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 6a70845bd9ab..9092e697059e 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -75,7 +75,15 @@ struct qdisc_watchdog {
void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
clockid_t clockid);
void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
-void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
+
+void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
+ u64 delta_ns);
+
+static inline void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd,
+ u64 expires)
+{
+ return qdisc_watchdog_schedule_range_ns(wd, expires, 0ULL);
+}
static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
psched_time_t expires)
@@ -181,7 +189,7 @@ struct tc_taprio_qopt_offload {
u64 cycle_time_extension;
size_t num_entries;
- struct tc_taprio_sched_entry entries[0];
+ struct tc_taprio_sched_entry entries[];
};
/* Reference counting */
diff --git a/include/net/red.h b/include/net/red.h
index 9665582c4687..fc455445f4b2 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -179,6 +179,44 @@ static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
return true;
}
+static inline int red_get_flags(unsigned char qopt_flags,
+ unsigned char historic_mask,
+ struct nlattr *flags_attr,
+ unsigned char supported_mask,
+ struct nla_bitfield32 *p_flags,
+ unsigned char *p_userbits,
+ struct netlink_ext_ack *extack)
+{
+ struct nla_bitfield32 flags;
+
+ if (qopt_flags && flags_attr) {
+ NL_SET_ERR_MSG_MOD(extack, "flags should be passed either through qopt, or through a dedicated attribute");
+ return -EINVAL;
+ }
+
+ if (flags_attr) {
+ flags = nla_get_bitfield32(flags_attr);
+ } else {
+ flags.selector = historic_mask;
+ flags.value = qopt_flags & historic_mask;
+ }
+
+ *p_flags = flags;
+ *p_userbits = qopt_flags & ~historic_mask;
+ return 0;
+}
+
+static inline int red_validate_flags(unsigned char flags,
+ struct netlink_ext_ack *extack)
+{
+ if ((flags & TC_RED_NODROP) && !(flags & TC_RED_ECN)) {
+ NL_SET_ERR_MSG_MOD(extack, "nodrop mode is only meaningful with ECN");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static inline void red_set_parms(struct red_parms *p,
u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
u8 Scell_log, u8 *stab, u32 max_P)
diff --git a/include/net/route.h b/include/net/route.h
index a9c60fc68e36..ff021cab657e 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -128,6 +128,12 @@ static inline struct rtable *__ip_route_output_key(struct net *net,
struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
const struct sock *sk);
+struct rtable *ip_route_output_tunnel(struct sk_buff *skb,
+ struct net_device *dev,
+ struct net *net, __be32 *saddr,
+ const struct ip_tunnel_info *info,
+ u8 protocol, bool use_cache);
+
struct dst_entry *ipv4_blackhole_route(struct net *net,
struct dst_entry *dst_orig);
@@ -225,7 +231,7 @@ int ip_rt_ioctl(struct net *, unsigned int cmd, struct rtentry *rt);
void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
struct rtable *rt_dst_alloc(struct net_device *dev,
unsigned int flags, u16 type,
- bool nopolicy, bool noxfrm, bool will_cache);
+ bool nopolicy, bool noxfrm);
struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt);
struct in_ifaddr;
diff --git a/include/net/rpl.h b/include/net/rpl.h
new file mode 100644
index 000000000000..dceff60e8baf
--- /dev/null
+++ b/include/net/rpl.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RPL implementation
+ *
+ * Author:
+ * (C) 2020 Alexander Aring <alex.aring@gmail.com>
+ */
+
+#ifndef _NET_RPL_H
+#define _NET_RPL_H
+
+#include <linux/rpl.h>
+
+#if IS_ENABLED(CONFIG_IPV6_RPL_LWTUNNEL)
+extern int rpl_init(void);
+extern void rpl_exit(void);
+#else
+static inline int rpl_init(void)
+{
+ return 0;
+}
+
+static inline void rpl_exit(void) {}
+#endif
+
+/* Worst decompression memory usage ipv6 address (16) + pad 7 */
+#define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7)
+
+static inline size_t ipv6_rpl_srh_alloc_size(unsigned char n)
+{
+ return sizeof(struct ipv6_rpl_sr_hdr) +
+ ((n + 1) * sizeof(struct in6_addr));
+}
+
+size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
+ unsigned char cmpre);
+
+void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr,
+ const struct ipv6_rpl_sr_hdr *inhdr,
+ const struct in6_addr *daddr, unsigned char n);
+
+void ipv6_rpl_srh_compress(struct ipv6_rpl_sr_hdr *outhdr,
+ const struct ipv6_rpl_sr_hdr *inhdr,
+ const struct in6_addr *daddr, unsigned char n);
+
+#endif /* _NET_RPL_H */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c30f914867e6..25d2ec4c8f00 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -1253,6 +1253,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
*/
struct mini_Qdisc {
struct tcf_proto *filter_list;
+ struct tcf_block *block;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
struct rcu_head rcu;
@@ -1279,6 +1280,8 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
struct tcf_proto *tp_head);
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
struct mini_Qdisc __rcu **p_miniq);
+void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
+ struct tcf_block *block);
static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
{
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 314a2fa21d6b..fb42c90348d3 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -326,7 +326,7 @@ struct sctp_cookie {
* the association TCB is re-constructed from the cookie.
*/
__u32 raw_addr_list_len;
- struct sctp_init_chunk peer_init[0];
+ struct sctp_init_chunk peer_init[];
};
diff --git a/include/net/sock.h b/include/net/sock.h
index 328564525526..6d84784d33fa 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -527,10 +527,43 @@ enum sk_pacing {
SK_PACING_FQ = 2,
};
+/* Pointer stored in sk_user_data might not be suitable for copying
+ * when cloning the socket. For instance, it can point to a reference
+ * counted object. sk_user_data bottom bit is set if pointer must not
+ * be copied.
+ */
+#define SK_USER_DATA_NOCOPY 1UL
+#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY)
+
+/**
+ * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
+ * @sk: socket
+ */
+static inline bool sk_user_data_is_nocopy(const struct sock *sk)
+{
+ return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY);
+}
+
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
-#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk)))
-#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr)
+#define rcu_dereference_sk_user_data(sk) \
+({ \
+ void *__tmp = rcu_dereference(__sk_user_data((sk))); \
+ (void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK); \
+})
+#define rcu_assign_sk_user_data(sk, ptr) \
+({ \
+ uintptr_t __tmp = (uintptr_t)(ptr); \
+ WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
+ rcu_assign_pointer(__sk_user_data((sk)), __tmp); \
+})
+#define rcu_assign_sk_user_data_nocopy(sk, ptr) \
+({ \
+ uintptr_t __tmp = (uintptr_t)(ptr); \
+ WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
+ rcu_assign_pointer(__sk_user_data((sk)), \
+ __tmp | SK_USER_DATA_NOCOPY); \
+})
/*
* SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
@@ -1626,6 +1659,7 @@ void sock_rfree(struct sk_buff *skb);
void sock_efree(struct sk_buff *skb);
#ifdef CONFIG_INET
void sock_edemux(struct sk_buff *skb);
+void sock_pfree(struct sk_buff *skb);
#else
#define sock_edemux sock_efree
#endif
@@ -2493,16 +2527,14 @@ void sock_net_set(struct sock *sk, struct net *net)
write_pnet(&sk->sk_net, net);
}
-static inline struct sock *skb_steal_sock(struct sk_buff *skb)
+static inline bool
+skb_sk_is_prefetched(struct sk_buff *skb)
{
- if (skb->sk) {
- struct sock *sk = skb->sk;
-
- skb->destructor = NULL;
- skb->sk = NULL;
- return sk;
- }
- return NULL;
+#ifdef CONFIG_INET
+ return skb->destructor == sock_pfree;
+#else
+ return false;
+#endif /* CONFIG_INET */
}
/* This helper checks if a socket is a full socket,
@@ -2513,6 +2545,35 @@ static inline bool sk_fullsock(const struct sock *sk)
return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
}
+static inline bool
+sk_is_refcounted(struct sock *sk)
+{
+ /* Only full sockets have sk->sk_flags. */
+ return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
+}
+
+/**
+ * skb_steal_sock
+ * @skb to steal the socket from
+ * @refcounted is set to true if the socket is reference-counted
+ */
+static inline struct sock *
+skb_steal_sock(struct sk_buff *skb, bool *refcounted)
+{
+ if (skb->sk) {
+ struct sock *sk = skb->sk;
+
+ *refcounted = true;
+ if (skb_sk_is_prefetched(skb))
+ *refcounted = sk_is_refcounted(sk);
+ skb->destructor = NULL;
+ skb->sk = NULL;
+ return sk;
+ }
+ *refcounted = false;
+ return NULL;
+}
+
/* Checks if this SKB belongs to an HW offloaded socket
* and whether any SW fallbacks are required based on dev.
* Check decrypted mark in case skb_orphan() cleared socket.
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index 43f4a818d88f..505f1e18e9bf 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -24,7 +24,7 @@ struct sock_reuseport {
unsigned int bind_inany:1;
unsigned int has_conns:1;
struct bpf_prog __rcu *prog; /* optional BPF sock selector */
- struct sock *socks[0]; /* array of sock pointers */
+ struct sock *socks[]; /* array of sock pointers */
};
extern int reuseport_alloc(struct sock *sk, bool bind_inany);
@@ -55,6 +55,4 @@ static inline bool reuseport_has_conns(struct sock *sk, bool set)
return ret;
}
-int reuseport_get_id(struct sock_reuseport *reuse);
-
#endif /* _SOCK_REUSEPORT_H */
diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h
index bdc20ab3b88d..79654bcb9a29 100644
--- a/include/net/tc_act/tc_ct.h
+++ b/include/net/tc_act/tc_ct.h
@@ -25,6 +25,9 @@ struct tcf_ct_params {
u16 ct_action;
struct rcu_head rcu;
+
+ struct tcf_ct_flow_table *ct_ft;
+ struct nf_flowtable *nf_ft;
};
struct tcf_ct {
@@ -33,8 +36,10 @@ struct tcf_ct {
};
#define to_ct(a) ((struct tcf_ct *)a)
-#define to_ct_params(a) ((struct tcf_ct_params *) \
- rtnl_dereference((to_ct(a)->params)))
+#define to_ct_params(a) \
+ ((struct tcf_ct_params *) \
+ rcu_dereference_protected(to_ct(a)->params, \
+ lockdep_is_held(&a->tcfa_lock)))
static inline uint16_t tcf_ct_zone(const struct tc_action *a)
{
@@ -46,11 +51,27 @@ static inline int tcf_ct_action(const struct tc_action *a)
return to_ct_params(a)->ct_action;
}
+static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
+{
+ return to_ct_params(a)->nf_ft;
+}
+
#else
static inline uint16_t tcf_ct_zone(const struct tc_action *a) { return 0; }
static inline int tcf_ct_action(const struct tc_action *a) { return 0; }
+static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
+{
+ return NULL;
+}
#endif /* CONFIG_NF_CONNTRACK */
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+void tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie);
+#else
+static inline void
+tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie) { }
+#endif
+
static inline bool is_tcf_ct(const struct tc_action *a)
{
#if defined(CONFIG_NET_CLS_ACT) && IS_ENABLED(CONFIG_NF_CONNTRACK)
diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h
index cfdc7cb82cad..f098ad4424be 100644
--- a/include/net/tc_act/tc_police.h
+++ b/include/net/tc_act/tc_police.h
@@ -54,7 +54,8 @@ static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act)
struct tcf_police *police = to_police(act);
struct tcf_police_params *params;
- params = rcu_dereference_bh_rtnl(police->params);
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
return params->rate.rate_bytes_ps;
}
@@ -63,7 +64,8 @@ static inline s64 tcf_police_tcfp_burst(const struct tc_action *act)
struct tcf_police *police = to_police(act);
struct tcf_police_params *params;
- params = rcu_dereference_bh_rtnl(police->params);
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
return params->tcfp_burst;
}
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index b22a1f641f02..00bfee70609e 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -27,8 +27,8 @@ struct tcf_skbedit {
};
#define to_skbedit(a) ((struct tcf_skbedit *)a)
-/* Return true iff action is mark */
-static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
+/* Return true iff action is the one identified by FLAG. */
+static inline bool is_tcf_skbedit_with_flag(const struct tc_action *a, u32 flag)
{
#ifdef CONFIG_NET_CLS_ACT
u32 flags;
@@ -37,12 +37,18 @@ static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
rcu_read_lock();
flags = rcu_dereference(to_skbedit(a)->params)->flags;
rcu_read_unlock();
- return flags == SKBEDIT_F_MARK;
+ return flags == flag;
}
#endif
return false;
}
+/* Return true iff action is mark */
+static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
+{
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_MARK);
+}
+
static inline u32 tcf_skbedit_mark(const struct tc_action *a)
{
u32 mark;
@@ -57,17 +63,7 @@ static inline u32 tcf_skbedit_mark(const struct tc_action *a)
/* Return true iff action is ptype */
static inline bool is_tcf_skbedit_ptype(const struct tc_action *a)
{
-#ifdef CONFIG_NET_CLS_ACT
- u32 flags;
-
- if (a->ops && a->ops->id == TCA_ID_SKBEDIT) {
- rcu_read_lock();
- flags = rcu_dereference(to_skbedit(a)->params)->flags;
- rcu_read_unlock();
- return flags == SKBEDIT_F_PTYPE;
- }
-#endif
- return false;
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_PTYPE);
}
static inline u32 tcf_skbedit_ptype(const struct tc_action *a)
@@ -81,4 +77,21 @@ static inline u32 tcf_skbedit_ptype(const struct tc_action *a)
return ptype;
}
+/* Return true iff action is priority */
+static inline bool is_tcf_skbedit_priority(const struct tc_action *a)
+{
+ return is_tcf_skbedit_with_flag(a, SKBEDIT_F_PRIORITY);
+}
+
+static inline u32 tcf_skbedit_priority(const struct tc_action *a)
+{
+ u32 priority;
+
+ rcu_read_lock();
+ priority = rcu_dereference(to_skbedit(a)->params)->priority;
+ rcu_read_unlock();
+
+ return priority;
+}
+
#endif /* __NET_TC_SKBEDIT_H */
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
index 0689d9bcdf84..e1057b255f69 100644
--- a/include/net/tc_act/tc_tunnel_key.h
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -28,8 +28,10 @@ static inline bool is_tcf_tunnel_set(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
struct tcf_tunnel_key *t = to_tunnel_key(a);
- struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
+ struct tcf_tunnel_key_params *params;
+ params = rcu_dereference_protected(t->params,
+ lockdep_is_held(&a->tcfa_lock));
if (a->ops && a->ops->id == TCA_ID_TUNNEL_KEY)
return params->tcft_action == TCA_TUNNEL_KEY_ACT_SET;
#endif
@@ -40,8 +42,10 @@ static inline bool is_tcf_tunnel_release(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
struct tcf_tunnel_key *t = to_tunnel_key(a);
- struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
+ struct tcf_tunnel_key_params *params;
+ params = rcu_dereference_protected(t->params,
+ lockdep_is_held(&a->tcfa_lock));
if (a->ops && a->ops->id == TCA_ID_TUNNEL_KEY)
return params->tcft_action == TCA_TUNNEL_KEY_ACT_RELEASE;
#endif
@@ -69,7 +73,7 @@ tcf_tunnel_info_copy(const struct tc_action *a)
if (tun) {
size_t tun_size = sizeof(*tun) + tun->options_len;
struct ip_tunnel_info *tun_copy = kmemdup(tun, tun_size,
- GFP_KERNEL);
+ GFP_ATOMIC);
return tun_copy;
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a5ea27df3c2b..5fa9eacd965a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -2195,14 +2195,21 @@ void tcp_update_ulp(struct sock *sk, struct proto *p,
struct sk_msg;
struct sk_psock;
-int tcp_bpf_init(struct sock *sk);
-void tcp_bpf_reinit(struct sock *sk);
+#ifdef CONFIG_BPF_STREAM_PARSER
+struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
+void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
+#else
+static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
+{
+}
+#endif /* CONFIG_BPF_STREAM_PARSER */
+
+#ifdef CONFIG_NET_SOCK_MSG
int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
int flags);
-int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len);
int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
struct msghdr *msg, int len, int flags);
+#endif /* CONFIG_NET_SOCK_MSG */
/* Call BPF_SOCK_OPS program that returns an int. If the return value
* is < 0, then the BPF op failed (for example if the loaded BPF
diff --git a/include/net/udp.h b/include/net/udp.h
index e55d5f765807..a8fa6c0c6ded 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -503,4 +503,9 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
return segs;
}
+#ifdef CONFIG_BPF_STREAM_PARSER
+struct sk_psock;
+struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
+#endif /* BPF_STREAM_PARSER */
+
#endif /* _UDP_H */
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index 068f96b1a83e..ebffcb36a7e3 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -11,6 +11,66 @@
#include <linux/regmap.h>
#include <net/dsa.h>
+/* Port Group IDs (PGID) are masks of destination ports.
+ *
+ * For L2 forwarding, the switch performs 3 lookups in the PGID table for each
+ * frame, and forwards the frame to the ports that are present in the logical
+ * AND of all 3 PGIDs.
+ *
+ * These PGID lookups are:
+ * - In one of PGID[0-63]: for the destination masks. There are 2 paths by
+ * which the switch selects a destination PGID:
+ * - The {DMAC, VID} is present in the MAC table. In that case, the
+ * destination PGID is given by the DEST_IDX field of the MAC table entry
+ * that matched.
+ * - The {DMAC, VID} is not present in the MAC table (it is unknown). The
+ * frame is disseminated as being either unicast, multicast or broadcast,
+ * and according to that, the destination PGID is chosen as being the
+ * value contained by ANA_FLOODING_FLD_UNICAST,
+ * ANA_FLOODING_FLD_MULTICAST or ANA_FLOODING_FLD_BROADCAST.
+ * The destination PGID can be an unicast set: the first PGIDs, 0 to
+ * ocelot->num_phys_ports - 1, or a multicast set: the PGIDs from
+ * ocelot->num_phys_ports to 63. By convention, a unicast PGID corresponds to
+ * a physical port and has a single bit set in the destination ports mask:
+ * that corresponding to the port number itself. In contrast, a multicast
+ * PGID will have potentially more than one single bit set in the destination
+ * ports mask.
+ * - In one of PGID[64-79]: for the aggregation mask. The switch classifier
+ * dissects each frame and generates a 4-bit Link Aggregation Code which is
+ * used for this second PGID table lookup. The goal of link aggregation is to
+ * hash multiple flows within the same LAG on to different destination ports.
+ * The first lookup will result in a PGID with all the LAG members present in
+ * the destination ports mask, and the second lookup, by Link Aggregation
+ * Code, will ensure that each flow gets forwarded only to a single port out
+ * of that mask (there are no duplicates).
+ * - In one of PGID[80-90]: for the source mask. The third time, the PGID table
+ * is indexed with the ingress port (plus 80). These PGIDs answer the
+ * question "is port i allowed to forward traffic to port j?" If yes, then
+ * BIT(j) of PGID 80+i will be found set. The third PGID lookup can be used
+ * to enforce the L2 forwarding matrix imposed by e.g. a Linux bridge.
+ */
+
+/* Reserve some destination PGIDs at the end of the range:
+ * PGID_CPU: used for whitelisting certain MAC addresses, such as the addresses
+ * of the switch port net devices, towards the CPU port module.
+ * PGID_UC: the flooding destinations for unknown unicast traffic.
+ * PGID_MC: the flooding destinations for broadcast and non-IP multicast
+ * traffic.
+ * PGID_MCIPV4: the flooding destinations for IPv4 multicast traffic.
+ * PGID_MCIPV6: the flooding destinations for IPv6 multicast traffic.
+ */
+#define PGID_CPU 59
+#define PGID_UC 60
+#define PGID_MC 61
+#define PGID_MCIPV4 62
+#define PGID_MCIPV6 63
+
+/* Aggregation PGIDs, one per Link Aggregation Code */
+#define PGID_AGGR 64
+
+/* Source PGIDs, one per physical port */
+#define PGID_SRC 80
+
#define IFH_INJ_BYPASS BIT(31)
#define IFH_INJ_POP_CNT_DISABLE (3 << 28)
@@ -402,10 +462,15 @@ enum ocelot_tag_prefix {
struct ocelot;
struct ocelot_ops {
- void (*pcs_init)(struct ocelot *ocelot, int port);
int (*reset)(struct ocelot *ocelot);
};
+struct ocelot_acl_block {
+ struct list_head rules;
+ int count;
+ int pol_lpr;
+};
+
struct ocelot_port {
struct ocelot *ocelot;
@@ -447,14 +512,27 @@ struct ocelot {
/* Keep track of the vlan port masks */
u32 vlan_mask[VLAN_N_VID];
+ /* In tables like ANA:PORT and the ANA:PGID:PGID mask,
+ * the CPU is located after the physical ports (at the
+ * num_phys_ports index).
+ */
u8 num_phys_ports;
- u8 num_cpu_ports;
- u8 cpu;
+
+ int npi;
+
+ enum ocelot_tag_prefix inj_prefix;
+ enum ocelot_tag_prefix xtr_prefix;
u32 *lags;
struct list_head multicast;
+ struct ocelot_acl_block acl_block;
+
+ const struct vcap_field *vcap_is2_keys;
+ const struct vcap_field *vcap_is2_actions;
+ const struct vcap_props *vcap;
+
/* Workqueue to check statistics for overflow with its lock */
struct mutex stats_lock;
u64 *stats;
@@ -469,8 +547,11 @@ struct ocelot {
struct mutex ptp_lock;
/* Protects the PTP clock */
spinlock_t ptp_clock_lock;
+};
- void (*port_pcs_init)(struct ocelot_port *port);
+struct ocelot_policer {
+ u32 rate; /* kilobit per second */
+ u32 burst; /* bytes */
};
#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
@@ -500,9 +581,9 @@ void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
int ocelot_regfields_init(struct ocelot *ocelot,
const struct reg_field *const regfields);
struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res);
-void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
- enum ocelot_tag_prefix injection,
- enum ocelot_tag_prefix extraction);
+void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
+ enum ocelot_tag_prefix injection,
+ enum ocelot_tag_prefix extraction);
int ocelot_init(struct ocelot *ocelot);
void ocelot_deinit(struct ocelot *ocelot);
void ocelot_init_port(struct ocelot *ocelot, int port);
@@ -541,5 +622,16 @@ int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
struct sk_buff *skb);
void ocelot_get_txtstamp(struct ocelot *ocelot);
+void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu);
+int ocelot_get_max_mtu(struct ocelot *ocelot, int port);
+int ocelot_port_policer_add(struct ocelot *ocelot, int port,
+ struct ocelot_policer *pol);
+int ocelot_port_policer_del(struct ocelot *ocelot, int port);
+int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress);
+int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress);
+int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f, bool ingress);
#endif
diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h
new file mode 100644
index 000000000000..5748373ab4d3
--- /dev/null
+++ b/include/soc/mscc/ocelot_vcap.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ * Microsemi Ocelot Switch driver
+ * Copyright (c) 2019 Microsemi Corporation
+ */
+
+#ifndef _OCELOT_VCAP_H_
+#define _OCELOT_VCAP_H_
+
+/* =================================================================
+ * VCAP Common
+ * =================================================================
+ */
+
+enum {
+ /* VCAP_IS1, */
+ VCAP_IS2,
+ /* VCAP_ES0, */
+};
+
+struct vcap_props {
+ u16 tg_width; /* Type-group width (in bits) */
+ u16 sw_count; /* Sub word count */
+ u16 entry_count; /* Entry count */
+ u16 entry_words; /* Number of entry words */
+ u16 entry_width; /* Entry width (in bits) */
+ u16 action_count; /* Action count */
+ u16 action_words; /* Number of action words */
+ u16 action_width; /* Action width (in bits) */
+ u16 action_type_width; /* Action type width (in bits) */
+ struct {
+ u16 width; /* Action type width (in bits) */
+ u16 count; /* Action type sub word count */
+ } action_table[2];
+ u16 counter_words; /* Number of counter words */
+ u16 counter_width; /* Counter width (in bits) */
+};
+
+/* VCAP Type-Group values */
+#define VCAP_TG_NONE 0 /* Entry is invalid */
+#define VCAP_TG_FULL 1 /* Full entry */
+#define VCAP_TG_HALF 2 /* Half entry */
+#define VCAP_TG_QUARTER 3 /* Quarter entry */
+
+/* =================================================================
+ * VCAP IS2
+ * =================================================================
+ */
+
+/* IS2 half key types */
+#define IS2_TYPE_ETYPE 0
+#define IS2_TYPE_LLC 1
+#define IS2_TYPE_SNAP 2
+#define IS2_TYPE_ARP 3
+#define IS2_TYPE_IP_UDP_TCP 4
+#define IS2_TYPE_IP_OTHER 5
+#define IS2_TYPE_IPV6 6
+#define IS2_TYPE_OAM 7
+#define IS2_TYPE_SMAC_SIP6 8
+#define IS2_TYPE_ANY 100 /* Pseudo type */
+
+/* IS2 half key type mask for matching any IP */
+#define IS2_TYPE_MASK_IP_ANY 0xe
+
+enum {
+ IS2_ACTION_TYPE_NORMAL,
+ IS2_ACTION_TYPE_SMAC_SIP,
+ IS2_ACTION_TYPE_MAX,
+};
+
+/* IS2 MASK_MODE values */
+#define IS2_ACT_MASK_MODE_NONE 0
+#define IS2_ACT_MASK_MODE_FILTER 1
+#define IS2_ACT_MASK_MODE_POLICY 2
+#define IS2_ACT_MASK_MODE_REDIR 3
+
+/* IS2 REW_OP values */
+#define IS2_ACT_REW_OP_NONE 0
+#define IS2_ACT_REW_OP_PTP_ONE 2
+#define IS2_ACT_REW_OP_PTP_TWO 3
+#define IS2_ACT_REW_OP_SPECIAL 8
+#define IS2_ACT_REW_OP_PTP_ORG 9
+#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_1 (IS2_ACT_REW_OP_PTP_ONE | (1 << 3))
+#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_2 (IS2_ACT_REW_OP_PTP_ONE | (2 << 3))
+#define IS2_ACT_REW_OP_PTP_ONE_ADD_DELAY (IS2_ACT_REW_OP_PTP_ONE | (1 << 5))
+#define IS2_ACT_REW_OP_PTP_ONE_ADD_SUB BIT(7)
+
+#define VCAP_PORT_WIDTH 4
+
+/* IS2 quarter key - SMAC_SIP4 */
+#define IS2_QKO_IGR_PORT 0
+#define IS2_QKL_IGR_PORT VCAP_PORT_WIDTH
+#define IS2_QKO_L2_SMAC (IS2_QKO_IGR_PORT + IS2_QKL_IGR_PORT)
+#define IS2_QKL_L2_SMAC 48
+#define IS2_QKO_L3_IP4_SIP (IS2_QKO_L2_SMAC + IS2_QKL_L2_SMAC)
+#define IS2_QKL_L3_IP4_SIP 32
+
+enum vcap_is2_half_key_field {
+ /* Common */
+ VCAP_IS2_TYPE,
+ VCAP_IS2_HK_FIRST,
+ VCAP_IS2_HK_PAG,
+ VCAP_IS2_HK_RSV1,
+ VCAP_IS2_HK_IGR_PORT_MASK,
+ VCAP_IS2_HK_RSV2,
+ VCAP_IS2_HK_HOST_MATCH,
+ VCAP_IS2_HK_L2_MC,
+ VCAP_IS2_HK_L2_BC,
+ VCAP_IS2_HK_VLAN_TAGGED,
+ VCAP_IS2_HK_VID,
+ VCAP_IS2_HK_DEI,
+ VCAP_IS2_HK_PCP,
+ /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+ VCAP_IS2_HK_L2_DMAC,
+ VCAP_IS2_HK_L2_SMAC,
+ /* MAC_ETYPE (TYPE=000) */
+ VCAP_IS2_HK_MAC_ETYPE_ETYPE,
+ VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
+ VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1,
+ VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2,
+ /* MAC_LLC (TYPE=001) */
+ VCAP_IS2_HK_MAC_LLC_DMAC,
+ VCAP_IS2_HK_MAC_LLC_SMAC,
+ VCAP_IS2_HK_MAC_LLC_L2_LLC,
+ /* MAC_SNAP (TYPE=010) */
+ VCAP_IS2_HK_MAC_SNAP_SMAC,
+ VCAP_IS2_HK_MAC_SNAP_DMAC,
+ VCAP_IS2_HK_MAC_SNAP_L2_SNAP,
+ /* MAC_ARP (TYPE=011) */
+ VCAP_IS2_HK_MAC_ARP_SMAC,
+ VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK,
+ VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK,
+ VCAP_IS2_HK_MAC_ARP_LEN_OK,
+ VCAP_IS2_HK_MAC_ARP_TARGET_MATCH,
+ VCAP_IS2_HK_MAC_ARP_SENDER_MATCH,
+ VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN,
+ VCAP_IS2_HK_MAC_ARP_OPCODE,
+ VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP,
+ VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP,
+ VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP,
+ /* IP4_TCP_UDP / IP4_OTHER common */
+ VCAP_IS2_HK_IP4,
+ VCAP_IS2_HK_L3_FRAGMENT,
+ VCAP_IS2_HK_L3_FRAG_OFS_GT0,
+ VCAP_IS2_HK_L3_OPTIONS,
+ VCAP_IS2_HK_IP4_L3_TTL_GT0,
+ VCAP_IS2_HK_L3_TOS,
+ VCAP_IS2_HK_L3_IP4_DIP,
+ VCAP_IS2_HK_L3_IP4_SIP,
+ VCAP_IS2_HK_DIP_EQ_SIP,
+ /* IP4_TCP_UDP (TYPE=100) */
+ VCAP_IS2_HK_TCP,
+ VCAP_IS2_HK_L4_SPORT,
+ VCAP_IS2_HK_L4_DPORT,
+ VCAP_IS2_HK_L4_RNG,
+ VCAP_IS2_HK_L4_SPORT_EQ_DPORT,
+ VCAP_IS2_HK_L4_SEQUENCE_EQ0,
+ VCAP_IS2_HK_L4_URG,
+ VCAP_IS2_HK_L4_ACK,
+ VCAP_IS2_HK_L4_PSH,
+ VCAP_IS2_HK_L4_RST,
+ VCAP_IS2_HK_L4_SYN,
+ VCAP_IS2_HK_L4_FIN,
+ VCAP_IS2_HK_L4_1588_DOM,
+ VCAP_IS2_HK_L4_1588_VER,
+ /* IP4_OTHER (TYPE=101) */
+ VCAP_IS2_HK_IP4_L3_PROTO,
+ VCAP_IS2_HK_L3_PAYLOAD,
+ /* IP6_STD (TYPE=110) */
+ VCAP_IS2_HK_IP6_L3_TTL_GT0,
+ VCAP_IS2_HK_IP6_L3_PROTO,
+ VCAP_IS2_HK_L3_IP6_SIP,
+ /* OAM (TYPE=111) */
+ VCAP_IS2_HK_OAM_MEL_FLAGS,
+ VCAP_IS2_HK_OAM_VER,
+ VCAP_IS2_HK_OAM_OPCODE,
+ VCAP_IS2_HK_OAM_FLAGS,
+ VCAP_IS2_HK_OAM_MEPID,
+ VCAP_IS2_HK_OAM_CCM_CNTS_EQ0,
+ VCAP_IS2_HK_OAM_IS_Y1731,
+};
+
+struct vcap_field {
+ int offset;
+ int length;
+};
+
+enum vcap_is2_action_field {
+ VCAP_IS2_ACT_HIT_ME_ONCE,
+ VCAP_IS2_ACT_CPU_COPY_ENA,
+ VCAP_IS2_ACT_CPU_QU_NUM,
+ VCAP_IS2_ACT_MASK_MODE,
+ VCAP_IS2_ACT_MIRROR_ENA,
+ VCAP_IS2_ACT_LRN_DIS,
+ VCAP_IS2_ACT_POLICE_ENA,
+ VCAP_IS2_ACT_POLICE_IDX,
+ VCAP_IS2_ACT_POLICE_VCAP_ONLY,
+ VCAP_IS2_ACT_PORT_MASK,
+ VCAP_IS2_ACT_REW_OP,
+ VCAP_IS2_ACT_SMAC_REPLACE_ENA,
+ VCAP_IS2_ACT_RSV,
+ VCAP_IS2_ACT_ACL_ID,
+ VCAP_IS2_ACT_HIT_CNT,
+};
+
+#endif /* _OCELOT_VCAP_H_ */
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index b04c29270973..1ce3be63add1 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -75,13 +75,17 @@ static inline void bpf_test_probe_##call(void) \
check_trace_callback_type_##call(__bpf_trace_##template); \
} \
typedef void (*btf_trace_##call)(void *__data, proto); \
-static struct bpf_raw_event_map __used \
- __attribute__((section("__bpf_raw_tp_map"))) \
-__bpf_trace_tp_map_##call = { \
- .tp = &__tracepoint_##call, \
- .bpf_func = (void *)(btf_trace_##call)__bpf_trace_##template, \
- .num_args = COUNT_ARGS(args), \
- .writable_size = size, \
+static union { \
+ struct bpf_raw_event_map event; \
+ btf_trace_##call handler; \
+} __bpf_trace_tp_map_##call __used \
+__attribute__((section("__bpf_raw_tp_map"))) = { \
+ .event = { \
+ .tp = &__tracepoint_##call, \
+ .bpf_func = __bpf_trace_##template, \
+ .num_args = COUNT_ARGS(args), \
+ .writable_size = size, \
+ }, \
};
#define FIRST(x, ...) x
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 22f235260a3a..2e29a671d67e 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -73,7 +73,7 @@ struct bpf_insn {
/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
struct bpf_lpm_trie_key {
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
- __u8 data[0]; /* Arbitrary size */
+ __u8 data[]; /* Arbitrary size */
};
struct bpf_cgroup_storage_key {
@@ -111,6 +111,8 @@ enum bpf_cmd {
BPF_MAP_LOOKUP_AND_DELETE_BATCH,
BPF_MAP_UPDATE_BATCH,
BPF_MAP_DELETE_BATCH,
+ BPF_LINK_CREATE,
+ BPF_LINK_UPDATE,
};
enum bpf_map_type {
@@ -181,6 +183,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_TRACING,
BPF_PROG_TYPE_STRUCT_OPS,
BPF_PROG_TYPE_EXT,
+ BPF_PROG_TYPE_LSM,
};
enum bpf_attach_type {
@@ -210,6 +213,8 @@ enum bpf_attach_type {
BPF_TRACE_RAW_TP,
BPF_TRACE_FENTRY,
BPF_TRACE_FEXIT,
+ BPF_MODIFY_RETURN,
+ BPF_LSM_MAC,
__MAX_BPF_ATTACH_TYPE
};
@@ -325,44 +330,46 @@ enum bpf_attach_type {
#define BPF_PSEUDO_CALL 1
/* flags for BPF_MAP_UPDATE_ELEM command */
-#define BPF_ANY 0 /* create new element or update existing */
-#define BPF_NOEXIST 1 /* create new element if it didn't exist */
-#define BPF_EXIST 2 /* update existing element */
-#define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */
+enum {
+ BPF_ANY = 0, /* create new element or update existing */
+ BPF_NOEXIST = 1, /* create new element if it didn't exist */
+ BPF_EXIST = 2, /* update existing element */
+ BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */
+};
/* flags for BPF_MAP_CREATE command */
-#define BPF_F_NO_PREALLOC (1U << 0)
+enum {
+ BPF_F_NO_PREALLOC = (1U << 0),
/* Instead of having one common LRU list in the
* BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
* which can scale and perform better.
* Note, the LRU nodes (including free nodes) cannot be moved
* across different LRU lists.
*/
-#define BPF_F_NO_COMMON_LRU (1U << 1)
+ BPF_F_NO_COMMON_LRU = (1U << 1),
/* Specify numa node during map creation */
-#define BPF_F_NUMA_NODE (1U << 2)
-
-#define BPF_OBJ_NAME_LEN 16U
+ BPF_F_NUMA_NODE = (1U << 2),
/* Flags for accessing BPF object from syscall side. */
-#define BPF_F_RDONLY (1U << 3)
-#define BPF_F_WRONLY (1U << 4)
+ BPF_F_RDONLY = (1U << 3),
+ BPF_F_WRONLY = (1U << 4),
/* Flag for stack_map, store build_id+offset instead of pointer */
-#define BPF_F_STACK_BUILD_ID (1U << 5)
+ BPF_F_STACK_BUILD_ID = (1U << 5),
/* Zero-initialize hash function seed. This should only be used for testing. */
-#define BPF_F_ZERO_SEED (1U << 6)
+ BPF_F_ZERO_SEED = (1U << 6),
/* Flags for accessing BPF object from program side. */
-#define BPF_F_RDONLY_PROG (1U << 7)
-#define BPF_F_WRONLY_PROG (1U << 8)
+ BPF_F_RDONLY_PROG = (1U << 7),
+ BPF_F_WRONLY_PROG = (1U << 8),
/* Clone map from listener for newly accepted socket */
-#define BPF_F_CLONE (1U << 9)
+ BPF_F_CLONE = (1U << 9),
/* Enable memory-mapping BPF map */
-#define BPF_F_MMAPABLE (1U << 10)
+ BPF_F_MMAPABLE = (1U << 10),
+};
/* Flags for BPF_PROG_QUERY. */
@@ -391,6 +398,8 @@ struct bpf_stack_build_id {
};
};
+#define BPF_OBJ_NAME_LEN 16U
+
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */
@@ -534,7 +543,7 @@ union bpf_attr {
__u32 prog_cnt;
} query;
- struct {
+ struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
__u64 name;
__u32 prog_fd;
} raw_tracepoint;
@@ -562,6 +571,24 @@ union bpf_attr {
__u64 probe_offset; /* output: probe_offset */
__u64 probe_addr; /* output: probe_addr */
} task_fd_query;
+
+ struct { /* struct used by BPF_LINK_CREATE command */
+ __u32 prog_fd; /* eBPF program to attach */
+ __u32 target_fd; /* object to attach to */
+ __u32 attach_type; /* attach type */
+ __u32 flags; /* extra flags */
+ } link_create;
+
+ struct { /* struct used by BPF_LINK_UPDATE command */
+ __u32 link_fd; /* link fd */
+ /* new program fd to update link with */
+ __u32 new_prog_fd;
+ __u32 flags; /* extra flags */
+ /* expected link's program fd; is specified only if
+ * BPF_F_REPLACE flag is set in flags */
+ __u32 old_prog_fd;
+ } link_update;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -2890,6 +2917,114 @@ union bpf_attr {
* Obtain the 64bit jiffies
* Return
* The 64 bit jiffies
+ *
+ * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
+ * Description
+ * For an eBPF program attached to a perf event, retrieve the
+ * branch records (struct perf_branch_entry) associated to *ctx*
+ * and store it in the buffer pointed by *buf* up to size
+ * *size* bytes.
+ * Return
+ * On success, number of bytes written to *buf*. On error, a
+ * negative value.
+ *
+ * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to
+ * instead return the number of bytes required to store all the
+ * branch entries. If this flag is set, *buf* may be NULL.
+ *
+ * **-EINVAL** if arguments invalid or **size** not a multiple
+ * of sizeof(struct perf_branch_entry).
+ *
+ * **-ENOENT** if architecture does not support branch records.
+ *
+ * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
+ * Description
+ * Returns 0 on success, values for *pid* and *tgid* as seen from the current
+ * *namespace* will be returned in *nsdata*.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EINVAL** if dev and inum supplied don't match dev_t and inode number
+ * with nsfs of current task, or if dev conversion to dev_t lost high bits.
+ *
+ * **-ENOENT** if pidns does not exists for the current task.
+ *
+ * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * Description
+ * Write raw *data* blob into a special BPF perf event held by
+ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
+ * event must have the following attributes: **PERF_SAMPLE_RAW**
+ * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
+ * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
+ *
+ * The *flags* are used to indicate the index in *map* for which
+ * the value must be put, masked with **BPF_F_INDEX_MASK**.
+ * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
+ * to indicate that the index of the current CPU core should be
+ * used.
+ *
+ * The value to write, of *size*, is passed through eBPF stack and
+ * pointed by *data*.
+ *
+ * *ctx* is a pointer to in-kernel struct xdp_buff.
+ *
+ * This helper is similar to **bpf_perf_eventoutput**\ () but
+ * restricted to raw_tracepoint bpf programs.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * u64 bpf_get_netns_cookie(void *ctx)
+ * Description
+ * Retrieve the cookie (generated by the kernel) of the network
+ * namespace the input *ctx* is associated with. The network
+ * namespace cookie remains stable for its lifetime and provides
+ * a global identifier that can be assumed unique. If *ctx* is
+ * NULL, then the helper returns the cookie for the initial
+ * network namespace. The cookie itself is very similar to that
+ * of bpf_get_socket_cookie() helper, but for network namespaces
+ * instead of sockets.
+ * Return
+ * A 8-byte long opaque number.
+ *
+ * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level)
+ * Description
+ * Return id of cgroup v2 that is ancestor of the cgroup associated
+ * with the current task at the *ancestor_level*. The root cgroup
+ * is at *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with the current task, then return value will be the
+ * same as that of **bpf_get_current_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with the current task.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_get_current_cgroup_id**\ ().
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
+ * int bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags)
+ * Description
+ * Assign the *sk* to the *skb*. When combined with appropriate
+ * routing configuration to receive the packet towards the socket,
+ * will cause *skb* to be delivered to the specified socket.
+ * Subsequent redirection of *skb* via **bpf_redirect**\ (),
+ * **bpf_clone_redirect**\ () or other methods outside of BPF may
+ * interfere with successful delivery to the socket.
+ *
+ * This operation is only valid from TC ingress path.
+ *
+ * The *flags* argument must be zero.
+ * Return
+ * 0 on success, or a negative errno in case of failure.
+ *
+ * * **-EINVAL** Unsupported flags specified.
+ * * **-ENOENT** Socket is unavailable for assignment.
+ * * **-ENETUNREACH** Socket is unreachable (wrong netns).
+ * * **-EOPNOTSUPP** Unsupported operation, for example a
+ * call from outside of TC ingress.
+ * * **-ESOCKTNOSUPPORT** Socket type not supported (reuseport).
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -3010,7 +3145,13 @@ union bpf_attr {
FN(probe_read_kernel_str), \
FN(tcp_send_ack), \
FN(send_signal_thread), \
- FN(jiffies64),
+ FN(jiffies64), \
+ FN(read_branch_records), \
+ FN(get_ns_current_pid_tgid), \
+ FN(xdp_output), \
+ FN(get_netns_cookie), \
+ FN(get_current_ancestor_cgroup_id), \
+ FN(sk_assign),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -3025,69 +3166,100 @@ enum bpf_func_id {
/* All flags used by eBPF helper functions, placed here. */
/* BPF_FUNC_skb_store_bytes flags. */
-#define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
-#define BPF_F_INVALIDATE_HASH (1ULL << 1)
+enum {
+ BPF_F_RECOMPUTE_CSUM = (1ULL << 0),
+ BPF_F_INVALIDATE_HASH = (1ULL << 1),
+};
/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
* First 4 bits are for passing the header field size.
*/
-#define BPF_F_HDR_FIELD_MASK 0xfULL
+enum {
+ BPF_F_HDR_FIELD_MASK = 0xfULL,
+};
/* BPF_FUNC_l4_csum_replace flags. */
-#define BPF_F_PSEUDO_HDR (1ULL << 4)
-#define BPF_F_MARK_MANGLED_0 (1ULL << 5)
-#define BPF_F_MARK_ENFORCE (1ULL << 6)
+enum {
+ BPF_F_PSEUDO_HDR = (1ULL << 4),
+ BPF_F_MARK_MANGLED_0 = (1ULL << 5),
+ BPF_F_MARK_ENFORCE = (1ULL << 6),
+};
/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
-#define BPF_F_INGRESS (1ULL << 0)
+enum {
+ BPF_F_INGRESS = (1ULL << 0),
+};
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
-#define BPF_F_TUNINFO_IPV6 (1ULL << 0)
+enum {
+ BPF_F_TUNINFO_IPV6 = (1ULL << 0),
+};
/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
-#define BPF_F_SKIP_FIELD_MASK 0xffULL
-#define BPF_F_USER_STACK (1ULL << 8)
+enum {
+ BPF_F_SKIP_FIELD_MASK = 0xffULL,
+ BPF_F_USER_STACK = (1ULL << 8),
/* flags used by BPF_FUNC_get_stackid only. */
-#define BPF_F_FAST_STACK_CMP (1ULL << 9)
-#define BPF_F_REUSE_STACKID (1ULL << 10)
+ BPF_F_FAST_STACK_CMP = (1ULL << 9),
+ BPF_F_REUSE_STACKID = (1ULL << 10),
/* flags used by BPF_FUNC_get_stack only. */
-#define BPF_F_USER_BUILD_ID (1ULL << 11)
+ BPF_F_USER_BUILD_ID = (1ULL << 11),
+};
/* BPF_FUNC_skb_set_tunnel_key flags. */
-#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
-#define BPF_F_DONT_FRAGMENT (1ULL << 2)
-#define BPF_F_SEQ_NUMBER (1ULL << 3)
+enum {
+ BPF_F_ZERO_CSUM_TX = (1ULL << 1),
+ BPF_F_DONT_FRAGMENT = (1ULL << 2),
+ BPF_F_SEQ_NUMBER = (1ULL << 3),
+};
/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
* BPF_FUNC_perf_event_read_value flags.
*/
-#define BPF_F_INDEX_MASK 0xffffffffULL
-#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
+enum {
+ BPF_F_INDEX_MASK = 0xffffffffULL,
+ BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK,
/* BPF_FUNC_perf_event_output for sk_buff input context. */
-#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
+ BPF_F_CTXLEN_MASK = (0xfffffULL << 32),
+};
/* Current network namespace */
-#define BPF_F_CURRENT_NETNS (-1L)
+enum {
+ BPF_F_CURRENT_NETNS = (-1L),
+};
/* BPF_FUNC_skb_adjust_room flags. */
-#define BPF_F_ADJ_ROOM_FIXED_GSO (1ULL << 0)
+enum {
+ BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0),
+ BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1),
+ BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2),
+ BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3),
+ BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
+};
-#define BPF_ADJ_ROOM_ENCAP_L2_MASK 0xff
-#define BPF_ADJ_ROOM_ENCAP_L2_SHIFT 56
+enum {
+ BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff,
+ BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56,
+};
-#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 (1ULL << 1)
-#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 (1ULL << 2)
-#define BPF_F_ADJ_ROOM_ENCAP_L4_GRE (1ULL << 3)
-#define BPF_F_ADJ_ROOM_ENCAP_L4_UDP (1ULL << 4)
#define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \
BPF_ADJ_ROOM_ENCAP_L2_MASK) \
<< BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
/* BPF_FUNC_sysctl_get_name flags. */
-#define BPF_F_SYSCTL_BASE_NAME (1ULL << 0)
+enum {
+ BPF_F_SYSCTL_BASE_NAME = (1ULL << 0),
+};
/* BPF_FUNC_sk_storage_get flags */
-#define BPF_SK_STORAGE_GET_F_CREATE (1ULL << 0)
+enum {
+ BPF_SK_STORAGE_GET_F_CREATE = (1ULL << 0),
+};
+
+/* BPF_FUNC_read_branch_records flags. */
+enum {
+ BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0),
+};
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
@@ -3153,6 +3325,7 @@ struct __sk_buff {
__u32 wire_len;
__u32 gso_segs;
__bpf_md_ptr(struct bpf_sock *, sk);
+ __u32 gso_size;
};
struct bpf_tunnel_key {
@@ -3505,13 +3678,14 @@ struct bpf_sock_ops {
};
/* Definitions for bpf_sock_ops_cb_flags */
-#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
-#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
-#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
-#define BPF_SOCK_OPS_RTT_CB_FLAG (1<<3)
-#define BPF_SOCK_OPS_ALL_CB_FLAGS 0xF /* Mask of all currently
- * supported cb flags
- */
+enum {
+ BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0),
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1),
+ BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2),
+ BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3),
+/* Mask of all currently supported cb flags */
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0xF,
+};
/* List of known BPF sock_ops operators.
* New entries can only be added at the end
@@ -3590,8 +3764,10 @@ enum {
BPF_TCP_MAX_STATES /* Leave at the end! */
};
-#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
-#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */
+enum {
+ TCP_BPF_IW = 1001, /* Set TCP initial congestion window */
+ TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */
+};
struct bpf_perf_event_value {
__u64 counter;
@@ -3599,12 +3775,16 @@ struct bpf_perf_event_value {
__u64 running;
};
-#define BPF_DEVCG_ACC_MKNOD (1ULL << 0)
-#define BPF_DEVCG_ACC_READ (1ULL << 1)
-#define BPF_DEVCG_ACC_WRITE (1ULL << 2)
+enum {
+ BPF_DEVCG_ACC_MKNOD = (1ULL << 0),
+ BPF_DEVCG_ACC_READ = (1ULL << 1),
+ BPF_DEVCG_ACC_WRITE = (1ULL << 2),
+};
-#define BPF_DEVCG_DEV_BLOCK (1ULL << 0)
-#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
+enum {
+ BPF_DEVCG_DEV_BLOCK = (1ULL << 0),
+ BPF_DEVCG_DEV_CHAR = (1ULL << 1),
+};
struct bpf_cgroup_dev_ctx {
/* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
@@ -3620,8 +3800,10 @@ struct bpf_raw_tracepoint_args {
/* DIRECT: Skip the FIB rules and go to FIB table associated with device
* OUTPUT: Do lookup from egress perspective; default is ingress
*/
-#define BPF_FIB_LOOKUP_DIRECT (1U << 0)
-#define BPF_FIB_LOOKUP_OUTPUT (1U << 1)
+enum {
+ BPF_FIB_LOOKUP_DIRECT = (1U << 0),
+ BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
+};
enum {
BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
@@ -3693,9 +3875,11 @@ enum bpf_task_fd_type {
BPF_FD_TYPE_URETPROBE, /* filename + offset */
};
-#define BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG (1U << 0)
-#define BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL (1U << 1)
-#define BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP (1U << 2)
+enum {
+ BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0),
+ BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1),
+ BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2),
+};
struct bpf_flow_keys {
__u16 nhoff;
@@ -3761,4 +3945,8 @@ struct bpf_sockopt {
__s32 retval;
};
+struct bpf_pidns_info {
+ __u32 pid;
+ __u32 tgid;
+};
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index ae37fd4d194a..1ae90e06c06d 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -117,6 +117,11 @@ enum devlink_command {
DEVLINK_CMD_TRAP_GROUP_NEW,
DEVLINK_CMD_TRAP_GROUP_DEL,
+ DEVLINK_CMD_TRAP_POLICER_GET, /* can dump */
+ DEVLINK_CMD_TRAP_POLICER_SET,
+ DEVLINK_CMD_TRAP_POLICER_NEW,
+ DEVLINK_CMD_TRAP_POLICER_DEL,
+
/* add new commands above here */
__DEVLINK_CMD_MAX,
DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@@ -187,6 +192,7 @@ enum devlink_port_flavour {
* for the PCI VF. It is an internal
* port that faces the PCI VF.
*/
+ DEVLINK_PORT_FLAVOUR_VIRTUAL, /* Any virtual port facing the user. */
};
enum devlink_param_cmode {
@@ -216,6 +222,7 @@ enum devlink_param_reset_dev_on_drv_probe_value {
enum {
DEVLINK_ATTR_STATS_RX_PACKETS, /* u64 */
DEVLINK_ATTR_STATS_RX_BYTES, /* u64 */
+ DEVLINK_ATTR_STATS_RX_DROPPED, /* u64 */
__DEVLINK_ATTR_STATS_MAX,
DEVLINK_ATTR_STATS_MAX = __DEVLINK_ATTR_STATS_MAX - 1
@@ -252,6 +259,8 @@ enum devlink_trap_type {
enum {
/* Trap can report input port as metadata */
DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT,
+ /* Trap can report flow action cookie as metadata */
+ DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE,
};
enum devlink_attr {
@@ -426,6 +435,13 @@ enum devlink_attr {
DEVLINK_ATTR_NETNS_FD, /* u32 */
DEVLINK_ATTR_NETNS_PID, /* u32 */
DEVLINK_ATTR_NETNS_ID, /* u32 */
+
+ DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP, /* u8 */
+
+ DEVLINK_ATTR_TRAP_POLICER_ID, /* u32 */
+ DEVLINK_ATTR_TRAP_POLICER_RATE, /* u64 */
+ DEVLINK_ATTR_TRAP_POLICER_BURST, /* u64 */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 4295ebfa2f91..92f737f10117 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -596,6 +596,9 @@ struct ethtool_pauseparam {
* @ETH_SS_LINK_MODES: link mode names
* @ETH_SS_MSG_CLASSES: debug message class names
* @ETH_SS_WOL_MODES: wake-on-lan modes
+ * @ETH_SS_SOF_TIMESTAMPING: SOF_TIMESTAMPING_* flags
+ * @ETH_SS_TS_TX_TYPES: timestamping Tx types
+ * @ETH_SS_TS_RX_FILTERS: timestamping Rx filters
*/
enum ethtool_stringset {
ETH_SS_TEST = 0,
@@ -610,6 +613,9 @@ enum ethtool_stringset {
ETH_SS_LINK_MODES,
ETH_SS_MSG_CLASSES,
ETH_SS_WOL_MODES,
+ ETH_SS_SOF_TIMESTAMPING,
+ ETH_SS_TS_TX_TYPES,
+ ETH_SS_TS_RX_FILTERS,
/* add new constants above here */
ETH_SS_COUNT
@@ -1330,6 +1336,7 @@ enum ethtool_fec_config_bits {
ETHTOOL_FEC_OFF_BIT,
ETHTOOL_FEC_RS_BIT,
ETHTOOL_FEC_BASER_BIT,
+ ETHTOOL_FEC_LLRS_BIT,
};
#define ETHTOOL_FEC_NONE (1 << ETHTOOL_FEC_NONE_BIT)
@@ -1337,6 +1344,7 @@ enum ethtool_fec_config_bits {
#define ETHTOOL_FEC_OFF (1 << ETHTOOL_FEC_OFF_BIT)
#define ETHTOOL_FEC_RS (1 << ETHTOOL_FEC_RS_BIT)
#define ETHTOOL_FEC_BASER (1 << ETHTOOL_FEC_BASER_BIT)
+#define ETHTOOL_FEC_LLRS (1 << ETHTOOL_FEC_LLRS_BIT)
/* CMDs currently supported */
#define ETHTOOL_GSET 0x00000001 /* DEPRECATED, Get settings.
@@ -1521,7 +1529,7 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71,
ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72,
ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73,
-
+ ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74,
/* must be last entry */
__ETHTOOL_LINK_MODE_MASK_NBITS
};
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 7e0b460f872c..7fde76366ba4 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -24,6 +24,21 @@ enum {
ETHTOOL_MSG_DEBUG_SET,
ETHTOOL_MSG_WOL_GET,
ETHTOOL_MSG_WOL_SET,
+ ETHTOOL_MSG_FEATURES_GET,
+ ETHTOOL_MSG_FEATURES_SET,
+ ETHTOOL_MSG_PRIVFLAGS_GET,
+ ETHTOOL_MSG_PRIVFLAGS_SET,
+ ETHTOOL_MSG_RINGS_GET,
+ ETHTOOL_MSG_RINGS_SET,
+ ETHTOOL_MSG_CHANNELS_GET,
+ ETHTOOL_MSG_CHANNELS_SET,
+ ETHTOOL_MSG_COALESCE_GET,
+ ETHTOOL_MSG_COALESCE_SET,
+ ETHTOOL_MSG_PAUSE_GET,
+ ETHTOOL_MSG_PAUSE_SET,
+ ETHTOOL_MSG_EEE_GET,
+ ETHTOOL_MSG_EEE_SET,
+ ETHTOOL_MSG_TSINFO_GET,
/* add new constants above here */
__ETHTOOL_MSG_USER_CNT,
@@ -43,6 +58,22 @@ enum {
ETHTOOL_MSG_DEBUG_NTF,
ETHTOOL_MSG_WOL_GET_REPLY,
ETHTOOL_MSG_WOL_NTF,
+ ETHTOOL_MSG_FEATURES_GET_REPLY,
+ ETHTOOL_MSG_FEATURES_SET_REPLY,
+ ETHTOOL_MSG_FEATURES_NTF,
+ ETHTOOL_MSG_PRIVFLAGS_GET_REPLY,
+ ETHTOOL_MSG_PRIVFLAGS_NTF,
+ ETHTOOL_MSG_RINGS_GET_REPLY,
+ ETHTOOL_MSG_RINGS_NTF,
+ ETHTOOL_MSG_CHANNELS_GET_REPLY,
+ ETHTOOL_MSG_CHANNELS_NTF,
+ ETHTOOL_MSG_COALESCE_GET_REPLY,
+ ETHTOOL_MSG_COALESCE_NTF,
+ ETHTOOL_MSG_PAUSE_GET_REPLY,
+ ETHTOOL_MSG_PAUSE_NTF,
+ ETHTOOL_MSG_EEE_GET_REPLY,
+ ETHTOOL_MSG_EEE_NTF,
+ ETHTOOL_MSG_TSINFO_GET_REPLY,
/* add new constants above here */
__ETHTOOL_MSG_KERNEL_CNT,
@@ -228,6 +259,150 @@ enum {
ETHTOOL_A_WOL_MAX = __ETHTOOL_A_WOL_CNT - 1
};
+/* FEATURES */
+
+enum {
+ ETHTOOL_A_FEATURES_UNSPEC,
+ ETHTOOL_A_FEATURES_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_FEATURES_HW, /* bitset */
+ ETHTOOL_A_FEATURES_WANTED, /* bitset */
+ ETHTOOL_A_FEATURES_ACTIVE, /* bitset */
+ ETHTOOL_A_FEATURES_NOCHANGE, /* bitset */
+
+ /* add new constants above here */
+ __ETHTOOL_A_FEATURES_CNT,
+ ETHTOOL_A_FEATURES_MAX = __ETHTOOL_A_FEATURES_CNT - 1
+};
+
+/* PRIVFLAGS */
+
+enum {
+ ETHTOOL_A_PRIVFLAGS_UNSPEC,
+ ETHTOOL_A_PRIVFLAGS_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_PRIVFLAGS_FLAGS, /* bitset */
+
+ /* add new constants above here */
+ __ETHTOOL_A_PRIVFLAGS_CNT,
+ ETHTOOL_A_PRIVFLAGS_MAX = __ETHTOOL_A_PRIVFLAGS_CNT - 1
+};
+
+/* RINGS */
+
+enum {
+ ETHTOOL_A_RINGS_UNSPEC,
+ ETHTOOL_A_RINGS_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_RINGS_RX_MAX, /* u32 */
+ ETHTOOL_A_RINGS_RX_MINI_MAX, /* u32 */
+ ETHTOOL_A_RINGS_RX_JUMBO_MAX, /* u32 */
+ ETHTOOL_A_RINGS_TX_MAX, /* u32 */
+ ETHTOOL_A_RINGS_RX, /* u32 */
+ ETHTOOL_A_RINGS_RX_MINI, /* u32 */
+ ETHTOOL_A_RINGS_RX_JUMBO, /* u32 */
+ ETHTOOL_A_RINGS_TX, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_RINGS_CNT,
+ ETHTOOL_A_RINGS_MAX = (__ETHTOOL_A_RINGS_CNT - 1)
+};
+
+/* CHANNELS */
+
+enum {
+ ETHTOOL_A_CHANNELS_UNSPEC,
+ ETHTOOL_A_CHANNELS_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_CHANNELS_RX_MAX, /* u32 */
+ ETHTOOL_A_CHANNELS_TX_MAX, /* u32 */
+ ETHTOOL_A_CHANNELS_OTHER_MAX, /* u32 */
+ ETHTOOL_A_CHANNELS_COMBINED_MAX, /* u32 */
+ ETHTOOL_A_CHANNELS_RX_COUNT, /* u32 */
+ ETHTOOL_A_CHANNELS_TX_COUNT, /* u32 */
+ ETHTOOL_A_CHANNELS_OTHER_COUNT, /* u32 */
+ ETHTOOL_A_CHANNELS_COMBINED_COUNT, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_CHANNELS_CNT,
+ ETHTOOL_A_CHANNELS_MAX = (__ETHTOOL_A_CHANNELS_CNT - 1)
+};
+
+/* COALESCE */
+
+enum {
+ ETHTOOL_A_COALESCE_UNSPEC,
+ ETHTOOL_A_COALESCE_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_COALESCE_RX_USECS, /* u32 */
+ ETHTOOL_A_COALESCE_RX_MAX_FRAMES, /* u32 */
+ ETHTOOL_A_COALESCE_RX_USECS_IRQ, /* u32 */
+ ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ, /* u32 */
+ ETHTOOL_A_COALESCE_TX_USECS, /* u32 */
+ ETHTOOL_A_COALESCE_TX_MAX_FRAMES, /* u32 */
+ ETHTOOL_A_COALESCE_TX_USECS_IRQ, /* u32 */
+ ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ, /* u32 */
+ ETHTOOL_A_COALESCE_STATS_BLOCK_USECS, /* u32 */
+ ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX, /* u8 */
+ ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX, /* u8 */
+ ETHTOOL_A_COALESCE_PKT_RATE_LOW, /* u32 */
+ ETHTOOL_A_COALESCE_RX_USECS_LOW, /* u32 */
+ ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW, /* u32 */
+ ETHTOOL_A_COALESCE_TX_USECS_LOW, /* u32 */
+ ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW, /* u32 */
+ ETHTOOL_A_COALESCE_PKT_RATE_HIGH, /* u32 */
+ ETHTOOL_A_COALESCE_RX_USECS_HIGH, /* u32 */
+ ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH, /* u32 */
+ ETHTOOL_A_COALESCE_TX_USECS_HIGH, /* u32 */
+ ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH, /* u32 */
+ ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_COALESCE_CNT,
+ ETHTOOL_A_COALESCE_MAX = (__ETHTOOL_A_COALESCE_CNT - 1)
+};
+
+/* PAUSE */
+
+enum {
+ ETHTOOL_A_PAUSE_UNSPEC,
+ ETHTOOL_A_PAUSE_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_PAUSE_AUTONEG, /* u8 */
+ ETHTOOL_A_PAUSE_RX, /* u8 */
+ ETHTOOL_A_PAUSE_TX, /* u8 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_PAUSE_CNT,
+ ETHTOOL_A_PAUSE_MAX = (__ETHTOOL_A_PAUSE_CNT - 1)
+};
+
+/* EEE */
+
+enum {
+ ETHTOOL_A_EEE_UNSPEC,
+ ETHTOOL_A_EEE_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_EEE_MODES_OURS, /* bitset */
+ ETHTOOL_A_EEE_MODES_PEER, /* bitset */
+ ETHTOOL_A_EEE_ACTIVE, /* u8 */
+ ETHTOOL_A_EEE_ENABLED, /* u8 */
+ ETHTOOL_A_EEE_TX_LPI_ENABLED, /* u8 */
+ ETHTOOL_A_EEE_TX_LPI_TIMER, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_EEE_CNT,
+ ETHTOOL_A_EEE_MAX = (__ETHTOOL_A_EEE_CNT - 1)
+};
+
+/* TSINFO */
+
+enum {
+ ETHTOOL_A_TSINFO_UNSPEC,
+ ETHTOOL_A_TSINFO_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_TSINFO_TIMESTAMPING, /* bitset */
+ ETHTOOL_A_TSINFO_TX_TYPES, /* bitset */
+ ETHTOOL_A_TSINFO_RX_FILTERS, /* bitset */
+ ETHTOOL_A_TSINFO_PHC_INDEX, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_TSINFO_CNT,
+ ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1)
+};
+
/* generic netlink info */
#define ETHTOOL_GENL_NAME "ethtool"
#define ETHTOOL_GENL_VERSION 1
diff --git a/include/uapi/linux/if_arcnet.h b/include/uapi/linux/if_arcnet.h
index 683878036d76..b122cfac7128 100644
--- a/include/uapi/linux/if_arcnet.h
+++ b/include/uapi/linux/if_arcnet.h
@@ -60,7 +60,7 @@ struct arc_rfc1201 {
__u8 proto; /* protocol ID field - varies */
__u8 split_flag; /* for use with split packets */
__be16 sequence; /* sequence number */
- __u8 payload[0]; /* space remaining in packet (504 bytes)*/
+ __u8 payload[]; /* space remaining in packet (504 bytes)*/
};
#define RFC1201_HDR_SIZE 4
@@ -69,7 +69,7 @@ struct arc_rfc1201 {
*/
struct arc_rfc1051 {
__u8 proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */
- __u8 payload[0]; /* 507 bytes */
+ __u8 payload[]; /* 507 bytes */
};
#define RFC1051_HDR_SIZE 1
@@ -80,7 +80,7 @@ struct arc_rfc1051 {
struct arc_eth_encap {
__u8 proto; /* Always ARC_P_ETHER */
struct ethhdr eth; /* standard ethernet header (yuck!) */
- __u8 payload[0]; /* 493 bytes */
+ __u8 payload[]; /* 493 bytes */
};
#define ETH_ENCAP_HDR_SIZE 14
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 42f7ca38ad80..bfe621ea51b3 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -174,6 +174,16 @@ struct br_vlan_msg {
__u32 ifindex;
};
+enum {
+ BRIDGE_VLANDB_DUMP_UNSPEC,
+ BRIDGE_VLANDB_DUMP_FLAGS,
+ __BRIDGE_VLANDB_DUMP_MAX,
+};
+#define BRIDGE_VLANDB_DUMP_MAX (__BRIDGE_VLANDB_DUMP_MAX - 1)
+
+/* flags used in BRIDGE_VLANDB_DUMP_FLAGS attribute to affect dumps */
+#define BRIDGE_VLANDB_DUMPF_STATS (1 << 0) /* Include stats in the dump */
+
/* Bridge vlan RTM attributes
* [BRIDGE_VLANDB_ENTRY] = {
* [BRIDGE_VLANDB_ENTRY_INFO]
@@ -192,10 +202,46 @@ enum {
BRIDGE_VLANDB_ENTRY_INFO,
BRIDGE_VLANDB_ENTRY_RANGE,
BRIDGE_VLANDB_ENTRY_STATE,
+ BRIDGE_VLANDB_ENTRY_TUNNEL_INFO,
+ BRIDGE_VLANDB_ENTRY_STATS,
__BRIDGE_VLANDB_ENTRY_MAX,
};
#define BRIDGE_VLANDB_ENTRY_MAX (__BRIDGE_VLANDB_ENTRY_MAX - 1)
+/* [BRIDGE_VLANDB_ENTRY] = {
+ * [BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = {
+ * [BRIDGE_VLANDB_TINFO_ID]
+ * ...
+ * }
+ * }
+ */
+enum {
+ BRIDGE_VLANDB_TINFO_UNSPEC,
+ BRIDGE_VLANDB_TINFO_ID,
+ BRIDGE_VLANDB_TINFO_CMD,
+ __BRIDGE_VLANDB_TINFO_MAX,
+};
+#define BRIDGE_VLANDB_TINFO_MAX (__BRIDGE_VLANDB_TINFO_MAX - 1)
+
+/* [BRIDGE_VLANDB_ENTRY] = {
+ * [BRIDGE_VLANDB_ENTRY_STATS] = {
+ * [BRIDGE_VLANDB_STATS_RX_BYTES]
+ * ...
+ * }
+ * ...
+ * }
+ */
+enum {
+ BRIDGE_VLANDB_STATS_UNSPEC,
+ BRIDGE_VLANDB_STATS_RX_BYTES,
+ BRIDGE_VLANDB_STATS_RX_PACKETS,
+ BRIDGE_VLANDB_STATS_TX_BYTES,
+ BRIDGE_VLANDB_STATS_TX_PACKETS,
+ BRIDGE_VLANDB_STATS_PAD,
+ __BRIDGE_VLANDB_STATS_MAX,
+};
+#define BRIDGE_VLANDB_STATS_MAX (__BRIDGE_VLANDB_STATS_MAX - 1)
+
/* Bridge multicast database attributes
* [MDBA_MDB] = {
* [MDBA_MDB_ENTRY] = {
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 024af2d1d0af..127c704eeba9 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -463,6 +463,7 @@ enum {
IFLA_MACSEC_REPLAY_PROTECT,
IFLA_MACSEC_VALIDATION,
IFLA_MACSEC_PAD,
+ IFLA_MACSEC_OFFLOAD,
__IFLA_MACSEC_MAX,
};
@@ -489,6 +490,7 @@ enum macsec_validation_type {
enum macsec_offload {
MACSEC_OFFLOAD_OFF = 0,
MACSEC_OFFLOAD_PHY = 1,
+ MACSEC_OFFLOAD_MAC = 2,
__MACSEC_OFFLOAD_END,
MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1,
};
@@ -590,6 +592,18 @@ enum ifla_geneve_df {
GENEVE_DF_MAX = __GENEVE_DF_END - 1,
};
+/* Bareudp section */
+enum {
+ IFLA_BAREUDP_UNSPEC,
+ IFLA_BAREUDP_PORT,
+ IFLA_BAREUDP_ETHERTYPE,
+ IFLA_BAREUDP_SRCPORT_MIN,
+ IFLA_BAREUDP_MULTIPROTO_MODE,
+ __IFLA_BAREUDP_MAX
+};
+
+#define IFLA_BAREUDP_MAX (__IFLA_BAREUDP_MAX - 1)
+
/* PPP section */
enum {
IFLA_PPP_UNSPEC,
@@ -960,11 +974,12 @@ enum {
#define XDP_FLAGS_SKB_MODE (1U << 1)
#define XDP_FLAGS_DRV_MODE (1U << 2)
#define XDP_FLAGS_HW_MODE (1U << 3)
+#define XDP_FLAGS_REPLACE (1U << 4)
#define XDP_FLAGS_MODES (XDP_FLAGS_SKB_MODE | \
XDP_FLAGS_DRV_MODE | \
XDP_FLAGS_HW_MODE)
#define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \
- XDP_FLAGS_MODES)
+ XDP_FLAGS_MODES | XDP_FLAGS_REPLACE)
/* These are stored into IFLA_XDP_ATTACHED on dump. */
enum {
@@ -984,6 +999,7 @@ enum {
IFLA_XDP_DRV_PROG_ID,
IFLA_XDP_SKB_PROG_ID,
IFLA_XDP_HW_PROG_ID,
+ IFLA_XDP_EXPECTED_FD,
__IFLA_XDP_MAX,
};
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 1d63c43c38cc..3af2aa069a36 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -22,9 +22,11 @@
#define MACSEC_KEYID_LEN 16
-/* cipher IDs as per IEEE802.1AEbn-2011 */
+/* cipher IDs as per IEEE802.1AE-2018 (Table 14-1) */
#define MACSEC_CIPHER_ID_GCM_AES_128 0x0080C20001000001ULL
#define MACSEC_CIPHER_ID_GCM_AES_256 0x0080C20001000002ULL
+#define MACSEC_CIPHER_ID_GCM_AES_XPN_128 0x0080C20001000003ULL
+#define MACSEC_CIPHER_ID_GCM_AES_XPN_256 0x0080C20001000004ULL
/* deprecated cipher ID for GCM-AES-128 */
#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL
@@ -88,11 +90,13 @@ enum macsec_sa_attrs {
MACSEC_SA_ATTR_UNSPEC,
MACSEC_SA_ATTR_AN, /* config/dump, u8 0..3 */
MACSEC_SA_ATTR_ACTIVE, /* config/dump, u8 0..1 */
- MACSEC_SA_ATTR_PN, /* config/dump, u32 */
+ MACSEC_SA_ATTR_PN, /* config/dump, u32/u64 (u64 if XPN) */
MACSEC_SA_ATTR_KEY, /* config, data */
MACSEC_SA_ATTR_KEYID, /* config/dump, 128-bit */
MACSEC_SA_ATTR_STATS, /* dump, nested, macsec_sa_stats_attr */
MACSEC_SA_ATTR_PAD,
+ MACSEC_SA_ATTR_SSCI, /* config/dump, u32 - XPN only */
+ MACSEC_SA_ATTR_SALT, /* config, 96-bit - XPN only */
__MACSEC_SA_ATTR_END,
NUM_MACSEC_SA_ATTR = __MACSEC_SA_ATTR_END,
MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1,
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index a1ff345b3f33..57cc429a9177 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -64,9 +64,11 @@ struct inet_diag_req_raw {
enum {
INET_DIAG_REQ_NONE,
INET_DIAG_REQ_BYTECODE,
+ INET_DIAG_REQ_SK_BPF_STORAGES,
+ __INET_DIAG_REQ_MAX,
};
-#define INET_DIAG_REQ_MAX INET_DIAG_REQ_BYTECODE
+#define INET_DIAG_REQ_MAX (__INET_DIAG_REQ_MAX - 1)
/* Bytecode is sequence of 4 byte commands followed by variable arguments.
* All the commands identified by "code" are conditional jumps forward:
@@ -154,6 +156,7 @@ enum {
INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */
INET_DIAG_MD5SIG,
INET_DIAG_ULP_INFO,
+ INET_DIAG_SK_BPF_STORAGES,
__INET_DIAG_MAX,
};
@@ -163,6 +166,7 @@ enum {
INET_ULP_INFO_UNSPEC,
INET_ULP_INFO_NAME,
INET_ULP_INFO_TLS,
+ INET_ULP_INFO_MPTCP,
__INET_ULP_INFO_MAX,
};
#define INET_ULP_INFO_MAX (__INET_ULP_INFO_MAX - 1)
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 9c0f4a92bcff..13e8751bf24a 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -40,6 +40,7 @@ struct in6_ifreq {
#define IPV6_SRCRT_STRICT 0x01 /* Deprecated; will be removed */
#define IPV6_SRCRT_TYPE_0 0 /* Deprecated; will be removed */
#define IPV6_SRCRT_TYPE_2 2 /* IPv6 type 2 Routing Header */
+#define IPV6_SRCRT_TYPE_3 3 /* RPL Segment Routing with IPv6 */
#define IPV6_SRCRT_TYPE_4 4 /* Segment Routing with IPv6 */
/*
@@ -187,6 +188,7 @@ enum {
DEVCONF_DISABLE_POLICY,
DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN,
DEVCONF_NDISC_TCLASS,
+ DEVCONF_RPL_SEG_ENABLED,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index f6035f737193..568a4303ccce 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -13,6 +13,7 @@ enum lwtunnel_encap_types {
LWTUNNEL_ENCAP_SEG6,
LWTUNNEL_ENCAP_BPF,
LWTUNNEL_ENCAP_SEG6_LOCAL,
+ LWTUNNEL_ENCAP_RPL,
__LWTUNNEL_ENCAP_MAX,
};
diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h
index 0b9c3beda345..90f9b4e1ba27 100644
--- a/include/uapi/linux/mii.h
+++ b/include/uapi/linux/mii.h
@@ -134,11 +134,16 @@
/* MAC and PHY tx_config_Reg[15:0] for SGMII in-band auto-negotiation.*/
#define ADVERTISE_SGMII 0x0001 /* MAC can do SGMII */
#define LPA_SGMII 0x0001 /* PHY can do SGMII */
+#define LPA_SGMII_SPD_MASK 0x0c00 /* SGMII speed mask */
+#define LPA_SGMII_FULL_DUPLEX 0x1000 /* SGMII full duplex */
#define LPA_SGMII_DPX_SPD_MASK 0x1C00 /* SGMII duplex and speed bits */
+#define LPA_SGMII_10 0x0000 /* 10Mbps */
#define LPA_SGMII_10HALF 0x0000 /* Can do 10mbps half-duplex */
#define LPA_SGMII_10FULL 0x1000 /* Can do 10mbps full-duplex */
+#define LPA_SGMII_100 0x0400 /* 100Mbps */
#define LPA_SGMII_100HALF 0x0400 /* Can do 100mbps half-duplex */
#define LPA_SGMII_100FULL 0x1400 /* Can do 100mbps full-duplex */
+#define LPA_SGMII_1000 0x0800 /* 1000Mbps */
#define LPA_SGMII_1000HALF 0x0800 /* Can do 1000mbps half-duplex */
#define LPA_SGMII_1000FULL 0x1800 /* Can do 1000mbps full-duplex */
#define LPA_SGMII_LINK 0x8000 /* PHY link with copper-side partner */
diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
new file mode 100644
index 000000000000..5f2c77082d9e
--- /dev/null
+++ b/include/uapi/linux/mptcp.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _UAPI_MPTCP_H
+#define _UAPI_MPTCP_H
+
+#include <linux/const.h>
+#include <linux/types.h>
+
+#define MPTCP_SUBFLOW_FLAG_MCAP_REM _BITUL(0)
+#define MPTCP_SUBFLOW_FLAG_MCAP_LOC _BITUL(1)
+#define MPTCP_SUBFLOW_FLAG_JOIN_REM _BITUL(2)
+#define MPTCP_SUBFLOW_FLAG_JOIN_LOC _BITUL(3)
+#define MPTCP_SUBFLOW_FLAG_BKUP_REM _BITUL(4)
+#define MPTCP_SUBFLOW_FLAG_BKUP_LOC _BITUL(5)
+#define MPTCP_SUBFLOW_FLAG_FULLY_ESTABLISHED _BITUL(6)
+#define MPTCP_SUBFLOW_FLAG_CONNECTED _BITUL(7)
+#define MPTCP_SUBFLOW_FLAG_MAPVALID _BITUL(8)
+
+enum {
+ MPTCP_SUBFLOW_ATTR_UNSPEC,
+ MPTCP_SUBFLOW_ATTR_TOKEN_REM,
+ MPTCP_SUBFLOW_ATTR_TOKEN_LOC,
+ MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ,
+ MPTCP_SUBFLOW_ATTR_MAP_SEQ,
+ MPTCP_SUBFLOW_ATTR_MAP_SFSEQ,
+ MPTCP_SUBFLOW_ATTR_SSN_OFFSET,
+ MPTCP_SUBFLOW_ATTR_MAP_DATALEN,
+ MPTCP_SUBFLOW_ATTR_FLAGS,
+ MPTCP_SUBFLOW_ATTR_ID_REM,
+ MPTCP_SUBFLOW_ATTR_ID_LOC,
+ MPTCP_SUBFLOW_ATTR_PAD,
+ __MPTCP_SUBFLOW_ATTR_MAX
+};
+
+#define MPTCP_SUBFLOW_ATTR_MAX (__MPTCP_SUBFLOW_ATTR_MAX - 1)
+
+/* netlink interface */
+#define MPTCP_PM_NAME "mptcp_pm"
+#define MPTCP_PM_CMD_GRP_NAME "mptcp_pm_cmds"
+#define MPTCP_PM_VER 0x1
+
+/*
+ * ATTR types defined for MPTCP
+ */
+enum {
+ MPTCP_PM_ATTR_UNSPEC,
+
+ MPTCP_PM_ATTR_ADDR, /* nested address */
+ MPTCP_PM_ATTR_RCV_ADD_ADDRS, /* u32 */
+ MPTCP_PM_ATTR_SUBFLOWS, /* u32 */
+
+ __MPTCP_PM_ATTR_MAX
+};
+
+#define MPTCP_PM_ATTR_MAX (__MPTCP_PM_ATTR_MAX - 1)
+
+enum {
+ MPTCP_PM_ADDR_ATTR_UNSPEC,
+
+ MPTCP_PM_ADDR_ATTR_FAMILY, /* u16 */
+ MPTCP_PM_ADDR_ATTR_ID, /* u8 */
+ MPTCP_PM_ADDR_ATTR_ADDR4, /* struct in_addr */
+ MPTCP_PM_ADDR_ATTR_ADDR6, /* struct in6_addr */
+ MPTCP_PM_ADDR_ATTR_PORT, /* u16 */
+ MPTCP_PM_ADDR_ATTR_FLAGS, /* u32 */
+ MPTCP_PM_ADDR_ATTR_IF_IDX, /* s32 */
+
+ __MPTCP_PM_ADDR_ATTR_MAX
+};
+
+#define MPTCP_PM_ADDR_ATTR_MAX (__MPTCP_PM_ADDR_ATTR_MAX - 1)
+
+#define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0)
+#define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1)
+#define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2)
+
+enum {
+ MPTCP_PM_CMD_UNSPEC,
+
+ MPTCP_PM_CMD_ADD_ADDR,
+ MPTCP_PM_CMD_DEL_ADDR,
+ MPTCP_PM_CMD_GET_ADDR,
+ MPTCP_PM_CMD_FLUSH_ADDRS,
+ MPTCP_PM_CMD_SET_LIMITS,
+ MPTCP_PM_CMD_GET_LIMITS,
+
+ __MPTCP_PM_CMD_AFTER_LAST
+};
+
+#endif /* _UAPI_MPTCP_H */
diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h
index 8bf79a9eb234..67e31f329190 100644
--- a/include/uapi/linux/net_dropmon.h
+++ b/include/uapi/linux/net_dropmon.h
@@ -29,12 +29,12 @@ struct net_dm_config_entry {
struct net_dm_config_msg {
__u32 entries;
- struct net_dm_config_entry options[0];
+ struct net_dm_config_entry options[];
};
struct net_dm_alert_msg {
__u32 entries;
- struct net_dm_drop_point points[0];
+ struct net_dm_drop_point points[];
};
struct net_dm_user_msg {
@@ -92,6 +92,7 @@ enum net_dm_attr {
NET_DM_ATTR_HW_TRAP_COUNT, /* u32 */
NET_DM_ATTR_SW_DROPS, /* flag */
NET_DM_ATTR_HW_DROPS, /* flag */
+ NET_DM_ATTR_FLOW_ACTION_COOKIE, /* binary */
__NET_DM_ATTR_MAX,
NET_DM_ATTR_MAX = __NET_DM_ATTR_MAX - 1
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index f96e650d0af9..7ed0b3d1c00a 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -98,6 +98,9 @@ enum hwtstamp_tx_types {
* receive a time stamp via the socket error queue.
*/
HWTSTAMP_TX_ONESTEP_P2P,
+
+ /* add new constants above here */
+ __HWTSTAMP_TX_CNT
};
/* possible values for hwtstamp_config->rx_filter */
@@ -140,6 +143,9 @@ enum hwtstamp_rx_filters {
/* NTP, UDP, all versions and packet modes */
HWTSTAMP_FILTER_NTP_ALL,
+
+ /* add new constants above here */
+ __HWTSTAMP_FILTER_CNT
};
/* SCM_TIMESTAMPING_PKTINFO control message */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 065218a20bb7..30f2a87270dc 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -342,6 +342,7 @@ enum nft_set_field_attributes {
* @NFTA_SET_USERDATA: user data (NLA_BINARY)
* @NFTA_SET_OBJ_TYPE: stateful object type (NLA_U32: NFT_OBJECT_*)
* @NFTA_SET_HANDLE: set handle (NLA_U64)
+ * @NFTA_SET_EXPR: set expression (NLA_NESTED: nft_expr_attributes)
*/
enum nft_set_attributes {
NFTA_SET_UNSPEC,
@@ -361,6 +362,7 @@ enum nft_set_attributes {
NFTA_SET_PAD,
NFTA_SET_OBJ_TYPE,
NFTA_SET_HANDLE,
+ NFTA_SET_EXPR,
__NFTA_SET_MAX
};
#define NFTA_SET_MAX (__NFTA_SET_MAX - 1)
@@ -1552,6 +1554,19 @@ enum nft_object_attributes {
#define NFTA_OBJ_MAX (__NFTA_OBJ_MAX - 1)
/**
+ * enum nft_flowtable_flags - nf_tables flowtable flags
+ *
+ * @NFT_FLOWTABLE_HW_OFFLOAD: flowtable hardware offload is enabled
+ * @NFT_FLOWTABLE_COUNTER: enable flow counters
+ */
+enum nft_flowtable_flags {
+ NFT_FLOWTABLE_HW_OFFLOAD = 0x1,
+ NFT_FLOWTABLE_COUNTER = 0x2,
+ NFT_FLOWTABLE_MASK = (NFT_FLOWTABLE_HW_OFFLOAD |
+ NFT_FLOWTABLE_COUNTER)
+};
+
+/**
* enum nft_flowtable_attributes - nf_tables flow table netlink attributes
*
* @NFTA_FLOWTABLE_TABLE: name of the table containing the expression (NLA_STRING)
@@ -1770,6 +1785,7 @@ enum nft_tunnel_opts_attributes {
NFTA_TUNNEL_KEY_OPTS_UNSPEC,
NFTA_TUNNEL_KEY_OPTS_VXLAN,
NFTA_TUNNEL_KEY_OPTS_ERSPAN,
+ NFTA_TUNNEL_KEY_OPTS_GENEVE,
__NFTA_TUNNEL_KEY_OPTS_MAX
};
#define NFTA_TUNNEL_KEY_OPTS_MAX (__NFTA_TUNNEL_KEY_OPTS_MAX - 1)
@@ -1791,6 +1807,15 @@ enum nft_tunnel_opts_erspan_attributes {
};
#define NFTA_TUNNEL_KEY_ERSPAN_MAX (__NFTA_TUNNEL_KEY_ERSPAN_MAX - 1)
+enum nft_tunnel_opts_geneve_attributes {
+ NFTA_TUNNEL_KEY_GENEVE_UNSPEC,
+ NFTA_TUNNEL_KEY_GENEVE_CLASS,
+ NFTA_TUNNEL_KEY_GENEVE_TYPE,
+ NFTA_TUNNEL_KEY_GENEVE_DATA,
+ __NFTA_TUNNEL_KEY_GENEVE_MAX
+};
+#define NFTA_TUNNEL_KEY_GENEVE_MAX (__NFTA_TUNNEL_KEY_GENEVE_MAX - 1)
+
enum nft_tunnel_flags {
NFT_TUNNEL_F_ZERO_CSUM_TX = (1 << 0),
NFT_TUNNEL_F_DONT_FRAGMENT = (1 << 1),
diff --git a/include/uapi/linux/netfilter/xt_IDLETIMER.h b/include/uapi/linux/netfilter/xt_IDLETIMER.h
index 3c586a19baea..434e6506abaa 100644
--- a/include/uapi/linux/netfilter/xt_IDLETIMER.h
+++ b/include/uapi/linux/netfilter/xt_IDLETIMER.h
@@ -1,4 +1,3 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* linux/include/linux/netfilter/xt_IDLETIMER.h
*
@@ -33,6 +32,7 @@
#include <linux/types.h>
#define MAX_IDLETIMER_LABEL_SIZE 28
+#define XT_IDLETIMER_ALARM 0x01
struct idletimer_tg_info {
__u32 timeout;
@@ -43,4 +43,14 @@ struct idletimer_tg_info {
struct idletimer_tg *timer __attribute__((aligned(8)));
};
+struct idletimer_tg_info_v1 {
+ __u32 timeout;
+
+ char label[MAX_IDLETIMER_LABEL_SIZE];
+
+ __u8 timer_type;
+
+ /* for kernel module internal use only */
+ struct idletimer_tg *timer __attribute__((aligned(8)));
+};
#endif
diff --git a/include/uapi/linux/netfilter_bridge/ebt_among.h b/include/uapi/linux/netfilter_bridge/ebt_among.h
index 9acf757bc1f7..73b26a280c4f 100644
--- a/include/uapi/linux/netfilter_bridge/ebt_among.h
+++ b/include/uapi/linux/netfilter_bridge/ebt_among.h
@@ -40,7 +40,7 @@ struct ebt_mac_wormhash_tuple {
struct ebt_mac_wormhash {
int table[257];
int poolsize;
- struct ebt_mac_wormhash_tuple pool[0];
+ struct ebt_mac_wormhash_tuple pool[];
};
#define ebt_mac_wormhash_size(x) ((x) ? sizeof(struct ebt_mac_wormhash) \
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 5eab191607f8..2b691161830f 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -11,7 +11,7 @@
* Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
* Copyright 2008 Colin McCabe <colin@cozybit.com>
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -265,6 +265,29 @@
*/
/**
+ * DOC: TID configuration
+ *
+ * TID config support can be checked in the %NL80211_ATTR_TID_CONFIG
+ * attribute given in wiphy capabilities.
+ *
+ * The necessary configuration parameters are mentioned in
+ * &enum nl80211_tid_config_attr and it will be passed to the
+ * %NL80211_CMD_SET_TID_CONFIG command in %NL80211_ATTR_TID_CONFIG.
+ *
+ * If the configuration needs to be applied for specific peer then the MAC
+ * address of the peer needs to be passed in %NL80211_ATTR_MAC, otherwise the
+ * configuration will be applied for all the connected peers in the vif except
+ * any peers that have peer specific configuration for the TID by default; if
+ * the %NL80211_TID_CONFIG_ATTR_OVERRIDE flag is set, peer specific values
+ * will be overwritten.
+ *
+ * All this configuration is valid only for STA's current connection
+ * i.e. the configuration will be reset to default when the STA connects back
+ * after disconnection/roaming, and this configuration will be cleared when
+ * the interface goes down.
+ */
+
+/**
* enum nl80211_commands - supported nl80211 commands
*
* @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -1125,6 +1148,9 @@
* peer MAC address and %NL80211_ATTR_FRAME is used to specify the frame
* content. The frame is ethernet data.
*
+ * @NL80211_CMD_SET_TID_CONFIG: Data frame TID specific configuration
+ * is passed using %NL80211_ATTR_TID_CONFIG attribute.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1349,6 +1375,8 @@ enum nl80211_commands {
NL80211_CMD_PROBE_MESH_LINK,
+ NL80211_CMD_SET_TID_CONFIG,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1604,7 +1632,8 @@ enum nl80211_commands {
* flag is included, then control port frames are sent over NL80211 instead
* using %CMD_CONTROL_PORT_FRAME. If control port routing over NL80211 is
* to be used then userspace must also use the %NL80211_ATTR_SOCKET_OWNER
- * flag.
+ * flag. When used with %NL80211_ATTR_CONTROL_PORT_NO_PREAUTH, pre-auth
+ * frames are not forwared over the control port.
*
* @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver.
* We recommend using nested, driver-specific attributes within this.
@@ -2400,6 +2429,47 @@ enum nl80211_commands {
* @NL80211_ATTR_VLAN_ID: VLAN ID (1..4094) for the station and VLAN group key
* (u16).
*
+ * @NL80211_ATTR_HE_BSS_COLOR: nested attribute for BSS Color Settings.
+ *
+ * @NL80211_ATTR_IFTYPE_AKM_SUITES: nested array attribute, with each entry
+ * using attributes from &enum nl80211_iftype_akm_attributes. This
+ * attribute is sent in a response to %NL80211_CMD_GET_WIPHY indicating
+ * supported AKM suites capability per interface. AKMs advertised in
+ * %NL80211_ATTR_AKM_SUITES are default capabilities if AKM suites not
+ * advertised for a specific interface type.
+ *
+ * @NL80211_ATTR_TID_CONFIG: TID specific configuration in a
+ * nested attribute with &enum nl80211_tid_config_attr sub-attributes;
+ * on output (in wiphy attributes) it contains only the feature sub-
+ * attributes.
+ *
+ * @NL80211_ATTR_CONTROL_PORT_NO_PREAUTH: disable preauth frame rx on control
+ * port in order to forward/receive them as ordinary data frames.
+ *
+ * @NL80211_ATTR_PMK_LIFETIME: Maximum lifetime for PMKSA in seconds (u32,
+ * dot11RSNAConfigPMKReauthThreshold; 0 is not a valid value).
+ * An optional parameter configured through %NL80211_CMD_SET_PMKSA.
+ * Drivers that trigger roaming need to know the lifetime of the
+ * configured PMKSA for triggering the full vs. PMKSA caching based
+ * authentication. This timeout helps authentication methods like SAE,
+ * where PMK gets updated only by going through a full (new SAE)
+ * authentication instead of getting updated during an association for EAP
+ * authentication. No new full authentication within the PMK expiry shall
+ * result in a disassociation at the end of the lifetime.
+ *
+ * @NL80211_ATTR_PMK_REAUTH_THRESHOLD: Reauthentication threshold time, in
+ * terms of percentage of %NL80211_ATTR_PMK_LIFETIME
+ * (u8, dot11RSNAConfigPMKReauthThreshold, 1..100). This is an optional
+ * parameter configured through %NL80211_CMD_SET_PMKSA. Requests the
+ * driver to trigger a full authentication roam (without PMKSA caching)
+ * after the reauthentication threshold time, but before the PMK lifetime
+ * has expired.
+ *
+ * Authentication methods like SAE need to be able to generate a new PMKSA
+ * entry without having to force a disconnection after the PMK timeout. If
+ * no roaming occurs between the reauth threshold and PMK expiration,
+ * disassociation is still forced.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2864,6 +2934,17 @@ enum nl80211_attrs {
NL80211_ATTR_VLAN_ID,
+ NL80211_ATTR_HE_BSS_COLOR,
+
+ NL80211_ATTR_IFTYPE_AKM_SUITES,
+
+ NL80211_ATTR_TID_CONFIG,
+
+ NL80211_ATTR_CONTROL_PORT_NO_PREAUTH,
+
+ NL80211_ATTR_PMK_LIFETIME,
+ NL80211_ATTR_PMK_REAUTH_THRESHOLD,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3583,6 +3664,8 @@ enum nl80211_wmm_rule {
* @NL80211_FREQUENCY_ATTR_WMM: this channel has wmm limitations.
* This is a nested attribute that contains the wmm limitation per AC.
* (see &enum nl80211_wmm_rule)
+ * @NL80211_FREQUENCY_ATTR_NO_HE: HE operation is not allowed on this channel
+ * in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
* currently defined
* @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -3612,6 +3695,7 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_NO_20MHZ,
NL80211_FREQUENCY_ATTR_NO_10MHZ,
NL80211_FREQUENCY_ATTR_WMM,
+ NL80211_FREQUENCY_ATTR_NO_HE,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -3809,6 +3893,7 @@ enum nl80211_sched_scan_match_attr {
* @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation
* @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed
* @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed
+ * @NL80211_RRF_NO_HE: HE operation not allowed
*/
enum nl80211_reg_rule_flags {
NL80211_RRF_NO_OFDM = 1<<0,
@@ -3826,6 +3911,7 @@ enum nl80211_reg_rule_flags {
NL80211_RRF_NO_HT40PLUS = 1<<14,
NL80211_RRF_NO_80MHZ = 1<<15,
NL80211_RRF_NO_160MHZ = 1<<16,
+ NL80211_RRF_NO_HE = 1<<17,
};
#define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR
@@ -4532,6 +4618,7 @@ enum nl80211_key_default_types {
* See &enum nl80211_key_default_types.
* @NL80211_KEY_MODE: the mode from enum nl80211_key_mode.
* Defaults to @NL80211_KEY_RX_TX.
+ * @NL80211_KEY_DEFAULT_BEACON: flag indicating default Beacon frame key
*
* @__NL80211_KEY_AFTER_LAST: internal
* @NL80211_KEY_MAX: highest key attribute
@@ -4547,6 +4634,7 @@ enum nl80211_key_attributes {
NL80211_KEY_TYPE,
NL80211_KEY_DEFAULT_TYPES,
NL80211_KEY_MODE,
+ NL80211_KEY_DEFAULT_BEACON,
/* keep last */
__NL80211_KEY_AFTER_LAST,
@@ -4703,6 +4791,69 @@ enum nl80211_tx_power_setting {
};
/**
+ * enum nl80211_tid_config - TID config state
+ * @NL80211_TID_CONFIG_ENABLE: Enable config for the TID
+ * @NL80211_TID_CONFIG_DISABLE: Disable config for the TID
+ */
+enum nl80211_tid_config {
+ NL80211_TID_CONFIG_ENABLE,
+ NL80211_TID_CONFIG_DISABLE,
+};
+
+/* enum nl80211_tid_config_attr - TID specific configuration.
+ * @NL80211_TID_CONFIG_ATTR_PAD: pad attribute for 64-bit values
+ * @NL80211_TID_CONFIG_ATTR_VIF_SUPP: a bitmap (u64) of attributes supported
+ * for per-vif configuration; doesn't list the ones that are generic
+ * (%NL80211_TID_CONFIG_ATTR_TIDS, %NL80211_TID_CONFIG_ATTR_OVERRIDE).
+ * @NL80211_TID_CONFIG_ATTR_PEER_SUPP: same as the previous per-vif one, but
+ * per peer instead.
+ * @NL80211_TID_CONFIG_ATTR_OVERRIDE: flag attribue, if no peer
+ * is selected, if set indicates that the new configuration overrides
+ * all previous peer configurations, otherwise previous peer specific
+ * configurations should be left untouched. If peer is selected then
+ * it will reset particular TID configuration of that peer and it will
+ * not accept other TID config attributes along with peer.
+ * @NL80211_TID_CONFIG_ATTR_TIDS: a bitmask value of TIDs (bit 0 to 7)
+ * Its type is u16.
+ * @NL80211_TID_CONFIG_ATTR_NOACK: Configure ack policy for the TID.
+ * specified in %NL80211_TID_CONFIG_ATTR_TID. see %enum nl80211_tid_config.
+ * Its type is u8.
+ * @NL80211_TID_CONFIG_ATTR_RETRY_SHORT: Number of retries used with data frame
+ * transmission, user-space sets this configuration in
+ * &NL80211_CMD_SET_TID_CONFIG. It is u8 type, min value is 1 and
+ * the max value is advertised by the driver in this attribute on
+ * output in wiphy capabilities.
+ * @NL80211_TID_CONFIG_ATTR_RETRY_LONG: Number of retries used with data frame
+ * transmission, user-space sets this configuration in
+ * &NL80211_CMD_SET_TID_CONFIG. Its type is u8, min value is 1 and
+ * the max value is advertised by the driver in this attribute on
+ * output in wiphy capabilities.
+ * @NL80211_TID_CONFIG_ATTR_AMPDU_CTRL: Enable/Disable aggregation for the TIDs
+ * specified in %NL80211_TID_CONFIG_ATTR_TIDS. Its type is u8, using
+ * the values from &nl80211_tid_config.
+ * @NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL: Enable/Disable RTS_CTS for the TIDs
+ * specified in %NL80211_TID_CONFIG_ATTR_TIDS. It is u8 type, using
+ * the values from &nl80211_tid_config.
+ */
+enum nl80211_tid_config_attr {
+ __NL80211_TID_CONFIG_ATTR_INVALID,
+ NL80211_TID_CONFIG_ATTR_PAD,
+ NL80211_TID_CONFIG_ATTR_VIF_SUPP,
+ NL80211_TID_CONFIG_ATTR_PEER_SUPP,
+ NL80211_TID_CONFIG_ATTR_OVERRIDE,
+ NL80211_TID_CONFIG_ATTR_TIDS,
+ NL80211_TID_CONFIG_ATTR_NOACK,
+ NL80211_TID_CONFIG_ATTR_RETRY_SHORT,
+ NL80211_TID_CONFIG_ATTR_RETRY_LONG,
+ NL80211_TID_CONFIG_ATTR_AMPDU_CTRL,
+ NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL,
+
+ /* keep last */
+ __NL80211_TID_CONFIG_ATTR_AFTER_LAST,
+ NL80211_TID_CONFIG_ATTR_MAX = __NL80211_TID_CONFIG_ATTR_AFTER_LAST - 1
+};
+
+/**
* enum nl80211_packet_pattern_attr - packet pattern attribute
* @__NL80211_PKTPAT_INVALID: invalid number for nested attribute
* @NL80211_PKTPAT_PATTERN: the pattern, values where the mask has
@@ -5521,6 +5672,18 @@ enum nl80211_feature_flags {
* feature, which prevents bufferbloat by using the expected transmission
* time to limit the amount of data buffered in the hardware.
*
+ * @NL80211_EXT_FEATURE_BEACON_PROTECTION: The driver supports Beacon protection
+ * and can receive key configuration for BIGTK using key indexes 6 and 7.
+ *
+ * @NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH: The driver can disable the
+ * forwarding of preauth frames over the control port. They are then
+ * handled as ordinary data frames.
+ *
+ * @NL80211_EXT_FEATURE_PROTECTED_TWT: Driver supports protected TWT frames
+ *
+ * @NL80211_EXT_FEATURE_DEL_IBSS_STA: The driver supports removing stations
+ * in IBSS mode, essentially by dropping their state.
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -5568,6 +5731,10 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_SAE_OFFLOAD,
NL80211_EXT_FEATURE_VLAN_OFFLOAD,
NL80211_EXT_FEATURE_AQL,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION,
+ NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH,
+ NL80211_EXT_FEATURE_PROTECTED_TWT,
+ NL80211_EXT_FEATURE_DEL_IBSS_STA,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -6190,12 +6357,14 @@ enum nl80211_ftm_responder_stats {
* @NL80211_PREAMBLE_HT: HT preamble
* @NL80211_PREAMBLE_VHT: VHT preamble
* @NL80211_PREAMBLE_DMG: DMG preamble
+ * @NL80211_PREAMBLE_HE: HE preamble
*/
enum nl80211_preamble {
NL80211_PREAMBLE_LEGACY,
NL80211_PREAMBLE_HT,
NL80211_PREAMBLE_VHT,
NL80211_PREAMBLE_DMG,
+ NL80211_PREAMBLE_HE,
};
/**
@@ -6388,6 +6557,10 @@ enum nl80211_peer_measurement_attrs {
* is valid)
* @NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST: u32 attribute indicating
* the maximum FTMs per burst (if not present anything is valid)
+ * @NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED: flag attribute indicating if
+ * trigger based ranging measurement is supported
+ * @NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED: flag attribute indicating
+ * if non trigger based ranging measurement is supported
*
* @NUM_NL80211_PMSR_FTM_CAPA_ATTR: internal
* @NL80211_PMSR_FTM_CAPA_ATTR_MAX: highest attribute number
@@ -6403,6 +6576,8 @@ enum nl80211_peer_measurement_ftm_capa {
NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS,
NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT,
NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST,
+ NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED,
+ NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED,
/* keep last */
NUM_NL80211_PMSR_FTM_CAPA_ATTR,
@@ -6432,6 +6607,20 @@ enum nl80211_peer_measurement_ftm_capa {
* @NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI: request LCI data (flag)
* @NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC: request civic location data
* (flag)
+ * @NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED: request trigger based ranging
+ * measurement (flag).
+ * This attribute and %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED are
+ * mutually exclusive.
+ * if neither %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED nor
+ * %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set, EDCA based
+ * ranging will be used.
+ * @NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED: request non trigger based
+ * ranging measurement (flag)
+ * This attribute and %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED are
+ * mutually exclusive.
+ * if neither %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED nor
+ * %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set, EDCA based
+ * ranging will be used.
*
* @NUM_NL80211_PMSR_FTM_REQ_ATTR: internal
* @NL80211_PMSR_FTM_REQ_ATTR_MAX: highest attribute number
@@ -6448,6 +6637,8 @@ enum nl80211_peer_measurement_ftm_req {
NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES,
NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI,
NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC,
+ NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED,
+ NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED,
/* keep last */
NUM_NL80211_PMSR_FTM_REQ_ATTR,
@@ -6587,5 +6778,51 @@ enum nl80211_obss_pd_attributes {
NL80211_HE_OBSS_PD_ATTR_MAX = __NL80211_HE_OBSS_PD_ATTR_LAST - 1,
};
+/**
+ * enum nl80211_bss_color_attributes - BSS Color attributes
+ * @__NL80211_HE_BSS_COLOR_ATTR_INVALID: Invalid
+ *
+ * @NL80211_HE_BSS_COLOR_ATTR_COLOR: the current BSS Color.
+ * @NL80211_HE_BSS_COLOR_ATTR_DISABLED: is BSS coloring disabled.
+ * @NL80211_HE_BSS_COLOR_ATTR_PARTIAL: the AID equation to be used..
+ *
+ * @__NL80211_HE_BSS_COLOR_ATTR_LAST: Internal
+ * @NL80211_HE_BSS_COLOR_ATTR_MAX: highest BSS Color attribute.
+ */
+enum nl80211_bss_color_attributes {
+ __NL80211_HE_BSS_COLOR_ATTR_INVALID,
+
+ NL80211_HE_BSS_COLOR_ATTR_COLOR,
+ NL80211_HE_BSS_COLOR_ATTR_DISABLED,
+ NL80211_HE_BSS_COLOR_ATTR_PARTIAL,
+
+ /* keep last */
+ __NL80211_HE_BSS_COLOR_ATTR_LAST,
+ NL80211_HE_BSS_COLOR_ATTR_MAX = __NL80211_HE_BSS_COLOR_ATTR_LAST - 1,
+};
+
+/**
+ * enum nl80211_iftype_akm_attributes - interface type AKM attributes
+ * @__NL80211_IFTYPE_AKM_ATTR_INVALID: Invalid
+ *
+ * @NL80211_IFTYPE_AKM_ATTR_IFTYPES: nested attribute containing a flag
+ * attribute for each interface type that supports AKM suites specified in
+ * %NL80211_IFTYPE_AKM_ATTR_SUITES
+ * @NL80211_IFTYPE_AKM_ATTR_SUITES: an array of u32. Used to indicate supported
+ * AKM suites for the specified interface types.
+ *
+ * @__NL80211_IFTYPE_AKM_ATTR_LAST: Internal
+ * @NL80211_IFTYPE_AKM_ATTR_MAX: highest interface type AKM attribute.
+ */
+enum nl80211_iftype_akm_attributes {
+ __NL80211_IFTYPE_AKM_ATTR_INVALID,
+
+ NL80211_IFTYPE_AKM_ATTR_IFTYPES,
+ NL80211_IFTYPE_AKM_ATTR_SUITES,
+
+ /* keep last */
+ __NL80211_IFTYPE_AKM_ATTR_LAST,
+ NL80211_IFTYPE_AKM_ATTR_MAX = __NL80211_IFTYPE_AKM_ATTR_LAST - 1,
+};
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index ae2bff14e7e1..9b14519e74d9 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -958,6 +958,7 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_CLONE, /* Nested OVS_CLONE_ATTR_*. */
OVS_ACTION_ATTR_CHECK_PKT_LEN, /* Nested OVS_CHECK_PKT_LEN_ATTR_*. */
OVS_ACTION_ATTR_ADD_MPLS, /* struct ovs_action_add_mpls. */
+ OVS_ACTION_ATTR_DEC_TTL, /* Nested OVS_DEC_TTL_ATTR_*. */
__OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
* from userspace. */
@@ -1050,4 +1051,10 @@ struct ovs_zone_limit {
__u32 count;
};
+enum ovs_dec_ttl_attr {
+ OVS_DEC_TTL_ATTR_UNSPEC,
+ OVS_DEC_TTL_ATTR_ACTION, /* Nested struct nlattr */
+ __OVS_DEC_TTL_ATTR_MAX
+};
+
#endif /* _LINUX_OPENVSWITCH_H */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 449a63971451..9f06d29cab70 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -17,6 +17,8 @@ enum {
TCA_ACT_PAD,
TCA_ACT_COOKIE,
TCA_ACT_FLAGS,
+ TCA_ACT_HW_STATS,
+ TCA_ACT_USED_HW_STATS,
__TCA_ACT_MAX
};
@@ -24,6 +26,26 @@ enum {
* actions stats.
*/
+/* tca HW stats type
+ * When user does not pass the attribute, he does not care.
+ * It is the same as if he would pass the attribute with
+ * all supported bits set.
+ * In case no bits are set, user is not interested in getting any HW statistics.
+ */
+#define TCA_ACT_HW_STATS_IMMEDIATE (1 << 0) /* Means that in dump, user
+ * gets the current HW stats
+ * state from the device
+ * queried at the dump time.
+ */
+#define TCA_ACT_HW_STATS_DELAYED (1 << 1) /* Means that in dump, user gets
+ * HW stats that might be out of date
+ * for some time, maybe couple of
+ * seconds. This is the case when
+ * driver polls stats updates
+ * periodically or when it gets async
+ * stats update from the device.
+ */
+
#define TCA_ACT_MAX __TCA_ACT_MAX
#define TCA_OLD_COMPAT (TCA_ACT_MAX+1)
#define TCA_ACT_MAX_PRIO 32
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index bbe791b24168..0c02737c8f47 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -256,6 +256,7 @@ enum {
TCA_RED_PARMS,
TCA_RED_STAB,
TCA_RED_MAX_P,
+ TCA_RED_FLAGS, /* bitfield32 */
__TCA_RED_MAX,
};
@@ -268,12 +269,28 @@ struct tc_red_qopt {
unsigned char Wlog; /* log(W) */
unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
unsigned char Scell_log; /* cell size for idle damping */
+
+ /* This field can be used for flags that a RED-like qdisc has
+ * historically supported. E.g. when configuring RED, it can be used for
+ * ECN, HARDDROP and ADAPTATIVE. For SFQ it can be used for ECN,
+ * HARDDROP. Etc. Because this field has not been validated, and is
+ * copied back on dump, any bits besides those to which a given qdisc
+ * has assigned a historical meaning need to be considered for free use
+ * by userspace tools.
+ *
+ * Any further flags need to be passed differently, e.g. through an
+ * attribute (such as TCA_RED_FLAGS above). Such attribute should allow
+ * passing both recent and historic flags in one value.
+ */
unsigned char flags;
#define TC_RED_ECN 1
#define TC_RED_HARDDROP 2
#define TC_RED_ADAPTATIVE 4
+#define TC_RED_NODROP 8
};
+#define TC_RED_HISTORIC_FLAGS (TC_RED_ECN | TC_RED_HARDDROP | TC_RED_ADAPTATIVE)
+
struct tc_red_xstats {
__u32 early; /* Early drops */
__u32 pdrop; /* Drops due to queue limits */
@@ -894,6 +911,8 @@ enum {
TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */
+ TCA_FQ_TIMER_SLACK, /* timer slack */
+
__TCA_FQ_MAX
};
@@ -1197,8 +1216,8 @@ enum {
* [TCA_TAPRIO_ATTR_SCHED_ENTRY_INTERVAL]
*/
-#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST BIT(0)
-#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD BIT(1)
+#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST _BITUL(0)
+#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD _BITUL(1)
enum {
TCA_TAPRIO_ATTR_UNSPEC,
diff --git a/include/uapi/linux/rpl.h b/include/uapi/linux/rpl.h
new file mode 100644
index 000000000000..1dccb55cf8c6
--- /dev/null
+++ b/include/uapi/linux/rpl.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * IPv6 RPL-SR implementation
+ *
+ * Author:
+ * (C) 2020 Alexander Aring <alex.aring@gmail.com>
+ */
+
+#ifndef _UAPI_LINUX_RPL_H
+#define _UAPI_LINUX_RPL_H
+
+#include <asm/byteorder.h>
+#include <linux/types.h>
+#include <linux/in6.h>
+
+/*
+ * RPL SR Header
+ */
+struct ipv6_rpl_sr_hdr {
+ __u8 nexthdr;
+ __u8 hdrlen;
+ __u8 type;
+ __u8 segments_left;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u32 cmpre:4,
+ cmpri:4,
+ reserved:4,
+ pad:4,
+ reserved1:16;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u32 reserved:20,
+ pad:4,
+ cmpri:4,
+ cmpre:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+
+ union {
+ struct in6_addr addr[0];
+ __u8 data[0];
+ } segments;
+} __attribute__((packed));
+
+#define rpl_segaddr segments.addr
+#define rpl_segdata segments.data
+
+#endif
diff --git a/include/uapi/linux/rpl_iptunnel.h b/include/uapi/linux/rpl_iptunnel.h
new file mode 100644
index 000000000000..f4eed1f92baa
--- /dev/null
+++ b/include/uapi/linux/rpl_iptunnel.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * IPv6 RPL-SR implementation
+ *
+ * Author:
+ * (C) 2020 Alexander Aring <alex.aring@gmail.com>
+ */
+
+#ifndef _UAPI_LINUX_RPL_IPTUNNEL_H
+#define _UAPI_LINUX_RPL_IPTUNNEL_H
+
+enum {
+ RPL_IPTUNNEL_UNSPEC,
+ RPL_IPTUNNEL_SRH,
+ __RPL_IPTUNNEL_MAX,
+};
+#define RPL_IPTUNNEL_MAX (__RPL_IPTUNNEL_MAX - 1)
+
+#define RPL_IPTUNNEL_SRH_SIZE(srh) (((srh)->hdrlen + 1) << 3)
+
+#endif
diff --git a/include/uapi/linux/sock_diag.h b/include/uapi/linux/sock_diag.h
index e5925009a652..5f74a5f6091d 100644
--- a/include/uapi/linux/sock_diag.h
+++ b/include/uapi/linux/sock_diag.h
@@ -36,4 +36,30 @@ enum sknetlink_groups {
};
#define SKNLGRP_MAX (__SKNLGRP_MAX - 1)
+enum {
+ SK_DIAG_BPF_STORAGE_REQ_NONE,
+ SK_DIAG_BPF_STORAGE_REQ_MAP_FD,
+ __SK_DIAG_BPF_STORAGE_REQ_MAX,
+};
+
+#define SK_DIAG_BPF_STORAGE_REQ_MAX (__SK_DIAG_BPF_STORAGE_REQ_MAX - 1)
+
+enum {
+ SK_DIAG_BPF_STORAGE_REP_NONE,
+ SK_DIAG_BPF_STORAGE,
+ __SK_DIAG_BPF_STORAGE_REP_MAX,
+};
+
+#define SK_DIAB_BPF_STORAGE_REP_MAX (__SK_DIAG_BPF_STORAGE_REP_MAX - 1)
+
+enum {
+ SK_DIAG_BPF_STORAGE_NONE,
+ SK_DIAG_BPF_STORAGE_PAD,
+ SK_DIAG_BPF_STORAGE_MAP_ID,
+ SK_DIAG_BPF_STORAGE_MAP_VALUE,
+ __SK_DIAG_BPF_STORAGE_MAX,
+};
+
+#define SK_DIAG_BPF_STORAGE_MAX (__SK_DIAG_BPF_STORAGE_MAX - 1)
+
#endif /* _UAPI__SOCK_DIAG_H__ */
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index fd9eb8f6bcae..f2acb2566333 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -312,6 +312,7 @@ enum {
TCP_NLA_REORD_SEEN, /* reordering events seen */
TCP_NLA_SRTT, /* smoothed RTT in usecs */
TCP_NLA_TIMEOUT_REHASH, /* Timeout-triggered rehash attempts */
+ TCP_NLA_BYTES_NOTSENT, /* Bytes in write queue not yet sent */
};
/* for TCP_MD5SIG socket option */
@@ -345,5 +346,7 @@ struct tcp_zerocopy_receive {
__u64 address; /* in: address of mapping */
__u32 length; /* in/out: number of bytes to map/mapped */
__u32 recv_skip_hint; /* out: amount of bytes to skip */
+ __u32 inq; /* out: amount of bytes in read queue */
+ __s32 err; /* out: socket error */
};
#endif /* _UAPI_LINUX_TCP_H */
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 585e07b27333..ecc27a17401a 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -46,5 +46,6 @@
#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */
#define VIRTIO_ID_FS 26 /* virtio filesystem */
#define VIRTIO_ID_PMEM 27 /* virtio pmem */
+#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h
index 86eca3208b6b..a2c006a364e0 100644
--- a/include/uapi/linux/wireless.h
+++ b/include/uapi/linux/wireless.h
@@ -74,6 +74,8 @@
#include <linux/socket.h> /* for "struct sockaddr" et al */
#include <linux/if.h> /* for IFNAMSIZ and co... */
+#include <stddef.h> /* for offsetof */
+
/***************************** VERSION *****************************/
/*
* This constant is used to know the availability of the wireless
@@ -1090,8 +1092,7 @@ struct iw_event {
/* iw_point events are special. First, the payload (extra data) come at
* the end of the event, so they are bigger than IW_EV_POINT_LEN. Second,
* we omit the pointer, so start at an offset. */
-#define IW_EV_POINT_OFF (((char *) &(((struct iw_point *) NULL)->length)) - \
- (char *) NULL)
+#define IW_EV_POINT_OFF offsetof(struct iw_point, length)
#define IW_EV_POINT_LEN (IW_EV_LCP_LEN + sizeof(struct iw_point) - \
IW_EV_POINT_OFF)
diff --git a/init/Kconfig b/init/Kconfig
index f11846c06176..f095ec64bb91 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1618,6 +1618,19 @@ config KALLSYMS_BASE_RELATIVE
# end of the "standard kernel features (expert users)" menu
# syscall, maps, verifier
+
+config BPF_LSM
+ bool "LSM Instrumentation with BPF"
+ depends on BPF_EVENTS
+ depends on BPF_SYSCALL
+ depends on SECURITY
+ depends on BPF_JIT
+ help
+ Enables instrumentation of the security hooks with eBPF programs for
+ implementing dynamic MAC and Audit Policies.
+
+ If you are unsure how to answer this question, answer N.
+
config BPF_SYSCALL
bool "Enable bpf() system call"
select BPF
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 046ce5d98033..f2d7be596966 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -29,4 +29,5 @@ obj-$(CONFIG_DEBUG_INFO_BTF) += sysfs_btf.o
endif
ifeq ($(CONFIG_BPF_JIT),y)
obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o
+obj-${CONFIG_BPF_LSM} += bpf_lsm.o
endif
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
new file mode 100644
index 000000000000..19636703b24e
--- /dev/null
+++ b/kernel/bpf/bpf_lsm.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2020 Google LLC.
+ */
+
+#include <linux/filter.h>
+#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <linux/lsm_hooks.h>
+#include <linux/bpf_lsm.h>
+#include <linux/kallsyms.h>
+#include <linux/bpf_verifier.h>
+
+/* For every LSM hook that allows attachment of BPF programs, declare a nop
+ * function where a BPF program can be attached.
+ */
+#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+noinline RET bpf_lsm_##NAME(__VA_ARGS__) \
+{ \
+ return DEFAULT; \
+}
+
+#include <linux/lsm_hook_defs.h>
+#undef LSM_HOOK
+
+#define BPF_LSM_SYM_PREFX "bpf_lsm_"
+
+int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
+ const struct bpf_prog *prog)
+{
+ if (!prog->gpl_compatible) {
+ bpf_log(vlog,
+ "LSM programs must have a GPL compatible license\n");
+ return -EINVAL;
+ }
+
+ if (strncmp(BPF_LSM_SYM_PREFX, prog->aux->attach_func_name,
+ sizeof(BPF_LSM_SYM_PREFX) - 1)) {
+ bpf_log(vlog, "attach_btf_id %u points to wrong type name %s\n",
+ prog->aux->attach_btf_id, prog->aux->attach_func_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct bpf_prog_ops lsm_prog_ops = {
+};
+
+const struct bpf_verifier_ops lsm_verifier_ops = {
+ .get_func_proto = bpf_tracing_func_proto,
+ .is_valid_access = btf_ctx_access,
+};
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 68a89a9f7ccd..26cb51f2db72 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -23,7 +23,7 @@ enum bpf_struct_ops_state {
struct bpf_struct_ops_value {
BPF_STRUCT_OPS_COMMON_VALUE;
- char data[0] ____cacheline_aligned_in_smp;
+ char data[] ____cacheline_aligned_in_smp;
};
struct bpf_struct_ops_map {
@@ -320,6 +320,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
struct bpf_struct_ops_value *uvalue, *kvalue;
const struct btf_member *member;
const struct btf_type *t = st_ops->type;
+ struct bpf_tramp_progs *tprogs = NULL;
void *udata, *kdata;
int prog_fd, err = 0;
void *image;
@@ -343,6 +344,10 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
if (uvalue->state || refcount_read(&uvalue->refcnt))
return -EINVAL;
+ tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
+ if (!tprogs)
+ return -ENOMEM;
+
uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
@@ -425,10 +430,12 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
goto reset_unlock;
}
+ tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
+ tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
err = arch_prepare_bpf_trampoline(image,
st_map->image + PAGE_SIZE,
&st_ops->func_models[i], 0,
- &prog, 1, NULL, 0, NULL);
+ tprogs, NULL);
if (err < 0)
goto reset_unlock;
@@ -469,6 +476,7 @@ reset_unlock:
memset(uvalue, 0, map->value_size);
memset(kvalue, 0, map->value_size);
unlock:
+ kfree(tprogs);
mutex_unlock(&st_map->lock);
return err;
}
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 7787bdcb5d68..d65c6912bdaf 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -3477,8 +3477,8 @@ errout:
return ERR_PTR(err);
}
-extern char __weak _binary__btf_vmlinux_bin_start[];
-extern char __weak _binary__btf_vmlinux_bin_end[];
+extern char __weak __start_BTF[];
+extern char __weak __stop_BTF[];
extern struct btf *btf_vmlinux;
#define BPF_MAP_TYPE(_id, _ops)
@@ -3605,9 +3605,8 @@ struct btf *btf_parse_vmlinux(void)
}
env->btf = btf;
- btf->data = _binary__btf_vmlinux_bin_start;
- btf->data_size = _binary__btf_vmlinux_bin_end -
- _binary__btf_vmlinux_bin_start;
+ btf->data = __start_BTF;
+ btf->data_size = __stop_BTF - __start_BTF;
err = btf_parse_hdr(env);
if (err)
@@ -3710,23 +3709,60 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
nr_args--;
}
- if (prog->expected_attach_type == BPF_TRACE_FEXIT &&
- arg == nr_args) {
- if (!t)
- /* Default prog with 5 args. 6th arg is retval. */
- return true;
- /* function return type */
- t = btf_type_by_id(btf, t->type);
- } else if (arg >= nr_args) {
+ if (arg > nr_args) {
bpf_log(log, "func '%s' doesn't have %d-th argument\n",
tname, arg + 1);
return false;
+ }
+
+ if (arg == nr_args) {
+ switch (prog->expected_attach_type) {
+ case BPF_LSM_MAC:
+ case BPF_TRACE_FEXIT:
+ /* When LSM programs are attached to void LSM hooks
+ * they use FEXIT trampolines and when attached to
+ * int LSM hooks, they use MODIFY_RETURN trampolines.
+ *
+ * While the LSM programs are BPF_MODIFY_RETURN-like
+ * the check:
+ *
+ * if (ret_type != 'int')
+ * return -EINVAL;
+ *
+ * is _not_ done here. This is still safe as LSM hooks
+ * have only void and int return types.
+ */
+ if (!t)
+ return true;
+ t = btf_type_by_id(btf, t->type);
+ break;
+ case BPF_MODIFY_RETURN:
+ /* For now the BPF_MODIFY_RETURN can only be attached to
+ * functions that return an int.
+ */
+ if (!t)
+ return false;
+
+ t = btf_type_skip_modifiers(btf, t->type, NULL);
+ if (!btf_type_is_int(t)) {
+ bpf_log(log,
+ "ret type %s not allowed for fmod_ret\n",
+ btf_kind_str[BTF_INFO_KIND(t->info)]);
+ return false;
+ }
+ break;
+ default:
+ bpf_log(log, "func '%s' doesn't have %d-th argument\n",
+ tname, arg + 1);
+ return false;
+ }
} else {
if (!t)
/* Default prog with 5 args */
return true;
t = btf_type_by_id(btf, args[arg].type);
}
+
/* skip modifiers */
while (btf_type_is_modifier(t))
t = btf_type_by_id(btf, t->type);
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 4f1472409ef8..cb305e71e7de 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -28,6 +28,69 @@ void cgroup_bpf_offline(struct cgroup *cgrp)
percpu_ref_kill(&cgrp->bpf.refcnt);
}
+static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
+{
+ enum bpf_cgroup_storage_type stype;
+
+ for_each_cgroup_storage_type(stype)
+ bpf_cgroup_storage_free(storages[stype]);
+}
+
+static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
+ struct bpf_prog *prog)
+{
+ enum bpf_cgroup_storage_type stype;
+
+ for_each_cgroup_storage_type(stype) {
+ storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
+ if (IS_ERR(storages[stype])) {
+ storages[stype] = NULL;
+ bpf_cgroup_storages_free(storages);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
+ struct bpf_cgroup_storage *src[])
+{
+ enum bpf_cgroup_storage_type stype;
+
+ for_each_cgroup_storage_type(stype)
+ dst[stype] = src[stype];
+}
+
+static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
+ struct cgroup* cgrp,
+ enum bpf_attach_type attach_type)
+{
+ enum bpf_cgroup_storage_type stype;
+
+ for_each_cgroup_storage_type(stype)
+ bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
+}
+
+static void bpf_cgroup_storages_unlink(struct bpf_cgroup_storage *storages[])
+{
+ enum bpf_cgroup_storage_type stype;
+
+ for_each_cgroup_storage_type(stype)
+ bpf_cgroup_storage_unlink(storages[stype]);
+}
+
+/* Called when bpf_cgroup_link is auto-detached from dying cgroup.
+ * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
+ * doesn't free link memory, which will eventually be done by bpf_link's
+ * release() callback, when its last FD is closed.
+ */
+static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
+{
+ cgroup_put(link->cgroup);
+ link->cgroup = NULL;
+}
+
/**
* cgroup_bpf_release() - put references of all bpf programs and
* release all cgroup bpf data
@@ -37,7 +100,6 @@ static void cgroup_bpf_release(struct work_struct *work)
{
struct cgroup *p, *cgrp = container_of(work, struct cgroup,
bpf.release_work);
- enum bpf_cgroup_storage_type stype;
struct bpf_prog_array *old_array;
unsigned int type;
@@ -49,11 +111,12 @@ static void cgroup_bpf_release(struct work_struct *work)
list_for_each_entry_safe(pl, tmp, progs, node) {
list_del(&pl->node);
- bpf_prog_put(pl->prog);
- for_each_cgroup_storage_type(stype) {
- bpf_cgroup_storage_unlink(pl->storage[stype]);
- bpf_cgroup_storage_free(pl->storage[stype]);
- }
+ if (pl->prog)
+ bpf_prog_put(pl->prog);
+ if (pl->link)
+ bpf_cgroup_link_auto_detach(pl->link);
+ bpf_cgroup_storages_unlink(pl->storage);
+ bpf_cgroup_storages_free(pl->storage);
kfree(pl);
static_branch_dec(&cgroup_bpf_enabled_key);
}
@@ -85,6 +148,18 @@ static void cgroup_bpf_release_fn(struct percpu_ref *ref)
queue_work(system_wq, &cgrp->bpf.release_work);
}
+/* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
+ * link or direct prog.
+ */
+static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
+{
+ if (pl->prog)
+ return pl->prog;
+ if (pl->link)
+ return pl->link->link.prog;
+ return NULL;
+}
+
/* count number of elements in the list.
* it's slow but the list cannot be long
*/
@@ -94,7 +169,7 @@ static u32 prog_list_length(struct list_head *head)
u32 cnt = 0;
list_for_each_entry(pl, head, node) {
- if (!pl->prog)
+ if (!prog_list_prog(pl))
continue;
cnt++;
}
@@ -138,7 +213,7 @@ static int compute_effective_progs(struct cgroup *cgrp,
enum bpf_attach_type type,
struct bpf_prog_array **array)
{
- enum bpf_cgroup_storage_type stype;
+ struct bpf_prog_array_item *item;
struct bpf_prog_array *progs;
struct bpf_prog_list *pl;
struct cgroup *p = cgrp;
@@ -163,13 +238,13 @@ static int compute_effective_progs(struct cgroup *cgrp,
continue;
list_for_each_entry(pl, &p->bpf.progs[type], node) {
- if (!pl->prog)
+ if (!prog_list_prog(pl))
continue;
- progs->items[cnt].prog = pl->prog;
- for_each_cgroup_storage_type(stype)
- progs->items[cnt].cgroup_storage[stype] =
- pl->storage[stype];
+ item = &progs->items[cnt];
+ item->prog = prog_list_prog(pl);
+ bpf_cgroup_storages_assign(item->cgroup_storage,
+ pl->storage);
cnt++;
}
} while ((p = cgroup_parent(p)));
@@ -287,19 +362,60 @@ cleanup:
#define BPF_CGROUP_MAX_PROGS 64
+static struct bpf_prog_list *find_attach_entry(struct list_head *progs,
+ struct bpf_prog *prog,
+ struct bpf_cgroup_link *link,
+ struct bpf_prog *replace_prog,
+ bool allow_multi)
+{
+ struct bpf_prog_list *pl;
+
+ /* single-attach case */
+ if (!allow_multi) {
+ if (list_empty(progs))
+ return NULL;
+ return list_first_entry(progs, typeof(*pl), node);
+ }
+
+ list_for_each_entry(pl, progs, node) {
+ if (prog && pl->prog == prog)
+ /* disallow attaching the same prog twice */
+ return ERR_PTR(-EINVAL);
+ if (link && pl->link == link)
+ /* disallow attaching the same link twice */
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* direct prog multi-attach w/ replacement case */
+ if (replace_prog) {
+ list_for_each_entry(pl, progs, node) {
+ if (pl->prog == replace_prog)
+ /* a match found */
+ return pl;
+ }
+ /* prog to replace not found for cgroup */
+ return ERR_PTR(-ENOENT);
+ }
+
+ return NULL;
+}
+
/**
- * __cgroup_bpf_attach() - Attach the program to a cgroup, and
+ * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
* propagate the change to descendants
* @cgrp: The cgroup which descendants to traverse
* @prog: A program to attach
+ * @link: A link to attach
* @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
* @type: Type of attach operation
* @flags: Option flags
*
+ * Exactly one of @prog or @link can be non-null.
* Must be called with cgroup_mutex held.
*/
-int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
- struct bpf_prog *replace_prog,
+int __cgroup_bpf_attach(struct cgroup *cgrp,
+ struct bpf_prog *prog, struct bpf_prog *replace_prog,
+ struct bpf_cgroup_link *link,
enum bpf_attach_type type, u32 flags)
{
u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
@@ -307,14 +423,19 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
struct bpf_prog *old_prog = NULL;
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
- struct bpf_prog_list *pl, *replace_pl = NULL;
- enum bpf_cgroup_storage_type stype;
+ struct bpf_prog_list *pl;
int err;
if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
/* invalid combination */
return -EINVAL;
+ if (link && (prog || replace_prog))
+ /* only either link or prog/replace_prog can be specified */
+ return -EINVAL;
+ if (!!replace_prog != !!(flags & BPF_F_REPLACE))
+ /* replace_prog implies BPF_F_REPLACE, and vice versa */
+ return -EINVAL;
if (!hierarchy_allows_attach(cgrp, type))
return -EPERM;
@@ -329,140 +450,203 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
return -E2BIG;
- if (flags & BPF_F_ALLOW_MULTI) {
- list_for_each_entry(pl, progs, node) {
- if (pl->prog == prog)
- /* disallow attaching the same prog twice */
- return -EINVAL;
- if (pl->prog == replace_prog)
- replace_pl = pl;
- }
- if ((flags & BPF_F_REPLACE) && !replace_pl)
- /* prog to replace not found for cgroup */
- return -ENOENT;
- } else if (!list_empty(progs)) {
- replace_pl = list_first_entry(progs, typeof(*pl), node);
- }
+ pl = find_attach_entry(progs, prog, link, replace_prog,
+ flags & BPF_F_ALLOW_MULTI);
+ if (IS_ERR(pl))
+ return PTR_ERR(pl);
- for_each_cgroup_storage_type(stype) {
- storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
- if (IS_ERR(storage[stype])) {
- storage[stype] = NULL;
- for_each_cgroup_storage_type(stype)
- bpf_cgroup_storage_free(storage[stype]);
- return -ENOMEM;
- }
- }
+ if (bpf_cgroup_storages_alloc(storage, prog ? : link->link.prog))
+ return -ENOMEM;
- if (replace_pl) {
- pl = replace_pl;
+ if (pl) {
old_prog = pl->prog;
- for_each_cgroup_storage_type(stype) {
- old_storage[stype] = pl->storage[stype];
- bpf_cgroup_storage_unlink(old_storage[stype]);
- }
+ bpf_cgroup_storages_unlink(pl->storage);
+ bpf_cgroup_storages_assign(old_storage, pl->storage);
} else {
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
if (!pl) {
- for_each_cgroup_storage_type(stype)
- bpf_cgroup_storage_free(storage[stype]);
+ bpf_cgroup_storages_free(storage);
return -ENOMEM;
}
list_add_tail(&pl->node, progs);
}
pl->prog = prog;
- for_each_cgroup_storage_type(stype)
- pl->storage[stype] = storage[stype];
-
+ pl->link = link;
+ bpf_cgroup_storages_assign(pl->storage, storage);
cgrp->bpf.flags[type] = saved_flags;
err = update_effective_progs(cgrp, type);
if (err)
goto cleanup;
- static_branch_inc(&cgroup_bpf_enabled_key);
- for_each_cgroup_storage_type(stype) {
- if (!old_storage[stype])
- continue;
- bpf_cgroup_storage_free(old_storage[stype]);
- }
- if (old_prog) {
+ bpf_cgroup_storages_free(old_storage);
+ if (old_prog)
bpf_prog_put(old_prog);
- static_branch_dec(&cgroup_bpf_enabled_key);
- }
- for_each_cgroup_storage_type(stype)
- bpf_cgroup_storage_link(storage[stype], cgrp, type);
+ else
+ static_branch_inc(&cgroup_bpf_enabled_key);
+ bpf_cgroup_storages_link(pl->storage, cgrp, type);
return 0;
cleanup:
- /* and cleanup the prog list */
- pl->prog = old_prog;
- for_each_cgroup_storage_type(stype) {
- bpf_cgroup_storage_free(pl->storage[stype]);
- pl->storage[stype] = old_storage[stype];
- bpf_cgroup_storage_link(old_storage[stype], cgrp, type);
+ if (old_prog) {
+ pl->prog = old_prog;
+ pl->link = NULL;
}
- if (!replace_pl) {
+ bpf_cgroup_storages_free(pl->storage);
+ bpf_cgroup_storages_assign(pl->storage, old_storage);
+ bpf_cgroup_storages_link(pl->storage, cgrp, type);
+ if (!old_prog) {
list_del(&pl->node);
kfree(pl);
}
return err;
}
+/* Swap updated BPF program for given link in effective program arrays across
+ * all descendant cgroups. This function is guaranteed to succeed.
+ */
+static void replace_effective_prog(struct cgroup *cgrp,
+ enum bpf_attach_type type,
+ struct bpf_cgroup_link *link)
+{
+ struct bpf_prog_array_item *item;
+ struct cgroup_subsys_state *css;
+ struct bpf_prog_array *progs;
+ struct bpf_prog_list *pl;
+ struct list_head *head;
+ struct cgroup *cg;
+ int pos;
+
+ css_for_each_descendant_pre(css, &cgrp->self) {
+ struct cgroup *desc = container_of(css, struct cgroup, self);
+
+ if (percpu_ref_is_zero(&desc->bpf.refcnt))
+ continue;
+
+ /* find position of link in effective progs array */
+ for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
+ if (pos && !(cg->bpf.flags[type] & BPF_F_ALLOW_MULTI))
+ continue;
+
+ head = &cg->bpf.progs[type];
+ list_for_each_entry(pl, head, node) {
+ if (!prog_list_prog(pl))
+ continue;
+ if (pl->link == link)
+ goto found;
+ pos++;
+ }
+ }
+found:
+ BUG_ON(!cg);
+ progs = rcu_dereference_protected(
+ desc->bpf.effective[type],
+ lockdep_is_held(&cgroup_mutex));
+ item = &progs->items[pos];
+ WRITE_ONCE(item->prog, link->link.prog);
+ }
+}
+
/**
- * __cgroup_bpf_detach() - Detach the program from a cgroup, and
+ * __cgroup_bpf_replace() - Replace link's program and propagate the change
+ * to descendants
+ * @cgrp: The cgroup which descendants to traverse
+ * @link: A link for which to replace BPF program
+ * @type: Type of attach operation
+ *
+ * Must be called with cgroup_mutex held.
+ */
+int __cgroup_bpf_replace(struct cgroup *cgrp, struct bpf_cgroup_link *link,
+ struct bpf_prog *new_prog)
+{
+ struct list_head *progs = &cgrp->bpf.progs[link->type];
+ struct bpf_prog *old_prog;
+ struct bpf_prog_list *pl;
+ bool found = false;
+
+ if (link->link.prog->type != new_prog->type)
+ return -EINVAL;
+
+ list_for_each_entry(pl, progs, node) {
+ if (pl->link == link) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return -ENOENT;
+
+ old_prog = xchg(&link->link.prog, new_prog);
+ replace_effective_prog(cgrp, link->type, link);
+ bpf_prog_put(old_prog);
+ return 0;
+}
+
+static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
+ struct bpf_prog *prog,
+ struct bpf_cgroup_link *link,
+ bool allow_multi)
+{
+ struct bpf_prog_list *pl;
+
+ if (!allow_multi) {
+ if (list_empty(progs))
+ /* report error when trying to detach and nothing is attached */
+ return ERR_PTR(-ENOENT);
+
+ /* to maintain backward compatibility NONE and OVERRIDE cgroups
+ * allow detaching with invalid FD (prog==NULL) in legacy mode
+ */
+ return list_first_entry(progs, typeof(*pl), node);
+ }
+
+ if (!prog && !link)
+ /* to detach MULTI prog the user has to specify valid FD
+ * of the program or link to be detached
+ */
+ return ERR_PTR(-EINVAL);
+
+ /* find the prog or link and detach it */
+ list_for_each_entry(pl, progs, node) {
+ if (pl->prog == prog && pl->link == link)
+ return pl;
+ }
+ return ERR_PTR(-ENOENT);
+}
+
+/**
+ * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
* propagate the change to descendants
* @cgrp: The cgroup which descendants to traverse
* @prog: A program to detach or NULL
+ * @prog: A link to detach or NULL
* @type: Type of detach operation
*
+ * At most one of @prog or @link can be non-NULL.
* Must be called with cgroup_mutex held.
*/
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type)
+ struct bpf_cgroup_link *link, enum bpf_attach_type type)
{
struct list_head *progs = &cgrp->bpf.progs[type];
- enum bpf_cgroup_storage_type stype;
u32 flags = cgrp->bpf.flags[type];
- struct bpf_prog *old_prog = NULL;
struct bpf_prog_list *pl;
+ struct bpf_prog *old_prog;
int err;
- if (flags & BPF_F_ALLOW_MULTI) {
- if (!prog)
- /* to detach MULTI prog the user has to specify valid FD
- * of the program to be detached
- */
- return -EINVAL;
- } else {
- if (list_empty(progs))
- /* report error when trying to detach and nothing is attached */
- return -ENOENT;
- }
+ if (prog && link)
+ /* only one of prog or link can be specified */
+ return -EINVAL;
- if (flags & BPF_F_ALLOW_MULTI) {
- /* find the prog and detach it */
- list_for_each_entry(pl, progs, node) {
- if (pl->prog != prog)
- continue;
- old_prog = prog;
- /* mark it deleted, so it's ignored while
- * recomputing effective
- */
- pl->prog = NULL;
- break;
- }
- if (!old_prog)
- return -ENOENT;
- } else {
- /* to maintain backward compatibility NONE and OVERRIDE cgroups
- * allow detaching with invalid FD (prog==NULL)
- */
- pl = list_first_entry(progs, typeof(*pl), node);
- old_prog = pl->prog;
- pl->prog = NULL;
- }
+ pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
+ if (IS_ERR(pl))
+ return PTR_ERR(pl);
+
+ /* mark it deleted, so it's ignored while recomputing effective */
+ old_prog = pl->prog;
+ pl->prog = NULL;
+ pl->link = NULL;
err = update_effective_progs(cgrp, type);
if (err)
@@ -470,22 +654,21 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
/* now can actually delete it from this cgroup list */
list_del(&pl->node);
- for_each_cgroup_storage_type(stype) {
- bpf_cgroup_storage_unlink(pl->storage[stype]);
- bpf_cgroup_storage_free(pl->storage[stype]);
- }
+ bpf_cgroup_storages_unlink(pl->storage);
+ bpf_cgroup_storages_free(pl->storage);
kfree(pl);
if (list_empty(progs))
/* last program was detached, reset flags to zero */
cgrp->bpf.flags[type] = 0;
-
- bpf_prog_put(old_prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
static_branch_dec(&cgroup_bpf_enabled_key);
return 0;
cleanup:
- /* and restore back old_prog */
+ /* restore back prog or link */
pl->prog = old_prog;
+ pl->link = link;
return err;
}
@@ -498,6 +681,7 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
struct list_head *progs = &cgrp->bpf.progs[type];
u32 flags = cgrp->bpf.flags[type];
struct bpf_prog_array *effective;
+ struct bpf_prog *prog;
int cnt, ret = 0, i;
effective = rcu_dereference_protected(cgrp->bpf.effective[type],
@@ -528,7 +712,8 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
i = 0;
list_for_each_entry(pl, progs, node) {
- id = pl->prog->aux->id;
+ prog = prog_list_prog(pl);
+ id = prog->aux->id;
if (copy_to_user(prog_ids + i, &id, sizeof(id)))
return -EFAULT;
if (++i == cnt)
@@ -558,8 +743,8 @@ int cgroup_bpf_prog_attach(const union bpf_attr *attr,
}
}
- ret = cgroup_bpf_attach(cgrp, prog, replace_prog, attr->attach_type,
- attr->attach_flags);
+ ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
+ attr->attach_type, attr->attach_flags);
if (replace_prog)
bpf_prog_put(replace_prog);
@@ -581,7 +766,7 @@ int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
if (IS_ERR(prog))
prog = NULL;
- ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
+ ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type);
if (prog)
bpf_prog_put(prog);
@@ -589,6 +774,90 @@ int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
return ret;
}
+static void bpf_cgroup_link_release(struct bpf_link *link)
+{
+ struct bpf_cgroup_link *cg_link =
+ container_of(link, struct bpf_cgroup_link, link);
+
+ /* link might have been auto-detached by dying cgroup already,
+ * in that case our work is done here
+ */
+ if (!cg_link->cgroup)
+ return;
+
+ mutex_lock(&cgroup_mutex);
+
+ /* re-check cgroup under lock again */
+ if (!cg_link->cgroup) {
+ mutex_unlock(&cgroup_mutex);
+ return;
+ }
+
+ WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
+ cg_link->type));
+
+ mutex_unlock(&cgroup_mutex);
+ cgroup_put(cg_link->cgroup);
+}
+
+static void bpf_cgroup_link_dealloc(struct bpf_link *link)
+{
+ struct bpf_cgroup_link *cg_link =
+ container_of(link, struct bpf_cgroup_link, link);
+
+ kfree(cg_link);
+}
+
+const struct bpf_link_ops bpf_cgroup_link_lops = {
+ .release = bpf_cgroup_link_release,
+ .dealloc = bpf_cgroup_link_dealloc,
+};
+
+int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ struct bpf_cgroup_link *link;
+ struct file *link_file;
+ struct cgroup *cgrp;
+ int err, link_fd;
+
+ if (attr->link_create.flags)
+ return -EINVAL;
+
+ cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
+ if (IS_ERR(cgrp))
+ return PTR_ERR(cgrp);
+
+ link = kzalloc(sizeof(*link), GFP_USER);
+ if (!link) {
+ err = -ENOMEM;
+ goto out_put_cgroup;
+ }
+ bpf_link_init(&link->link, &bpf_cgroup_link_lops, prog);
+ link->cgroup = cgrp;
+ link->type = attr->link_create.attach_type;
+
+ link_file = bpf_link_new_file(&link->link, &link_fd);
+ if (IS_ERR(link_file)) {
+ kfree(link);
+ err = PTR_ERR(link_file);
+ goto out_put_cgroup;
+ }
+
+ err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type,
+ BPF_F_ALLOW_MULTI);
+ if (err) {
+ bpf_link_cleanup(&link->link, link_file, link_fd);
+ goto out_put_cgroup;
+ }
+
+ fd_install(link_fd, link_file);
+ return link_fd;
+
+out_put_cgroup:
+ cgroup_put(cgrp);
+ return err;
+}
+
int cgroup_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 973a20d49749..916f5132a984 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -97,7 +97,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
fp->aux->prog = fp;
fp->jit_requested = ebpf_jit_enabled();
- INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
+ INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
return fp;
}
@@ -523,22 +523,22 @@ int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
int bpf_jit_harden __read_mostly;
long bpf_jit_limit __read_mostly;
-static __always_inline void
-bpf_get_prog_addr_region(const struct bpf_prog *prog,
- unsigned long *symbol_start,
- unsigned long *symbol_end)
+static void
+bpf_prog_ksym_set_addr(struct bpf_prog *prog)
{
const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
unsigned long addr = (unsigned long)hdr;
WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
- *symbol_start = addr;
- *symbol_end = addr + hdr->pages * PAGE_SIZE;
+ prog->aux->ksym.start = (unsigned long) prog->bpf_func;
+ prog->aux->ksym.end = addr + hdr->pages * PAGE_SIZE;
}
-void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
+static void
+bpf_prog_ksym_set_name(struct bpf_prog *prog)
{
+ char *sym = prog->aux->ksym.name;
const char *end = sym + KSYM_NAME_LEN;
const struct btf_type *type;
const char *func_name;
@@ -572,36 +572,27 @@ void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
*sym = 0;
}
-static __always_inline unsigned long
-bpf_get_prog_addr_start(struct latch_tree_node *n)
+static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
{
- unsigned long symbol_start, symbol_end;
- const struct bpf_prog_aux *aux;
-
- aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
- bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
-
- return symbol_start;
+ return container_of(n, struct bpf_ksym, tnode)->start;
}
static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
struct latch_tree_node *b)
{
- return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
+ return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
}
static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
{
unsigned long val = (unsigned long)key;
- unsigned long symbol_start, symbol_end;
- const struct bpf_prog_aux *aux;
+ const struct bpf_ksym *ksym;
- aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
- bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
+ ksym = container_of(n, struct bpf_ksym, tnode);
- if (val < symbol_start)
+ if (val < ksym->start)
return -1;
- if (val >= symbol_end)
+ if (val >= ksym->end)
return 1;
return 0;
@@ -616,20 +607,29 @@ static DEFINE_SPINLOCK(bpf_lock);
static LIST_HEAD(bpf_kallsyms);
static struct latch_tree_root bpf_tree __cacheline_aligned;
-static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
+void bpf_ksym_add(struct bpf_ksym *ksym)
{
- WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
- list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
- latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
+ spin_lock_bh(&bpf_lock);
+ WARN_ON_ONCE(!list_empty(&ksym->lnode));
+ list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
+ latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
+ spin_unlock_bh(&bpf_lock);
}
-static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
+static void __bpf_ksym_del(struct bpf_ksym *ksym)
{
- if (list_empty(&aux->ksym_lnode))
+ if (list_empty(&ksym->lnode))
return;
- latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
- list_del_rcu(&aux->ksym_lnode);
+ latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
+ list_del_rcu(&ksym->lnode);
+}
+
+void bpf_ksym_del(struct bpf_ksym *ksym)
+{
+ spin_lock_bh(&bpf_lock);
+ __bpf_ksym_del(ksym);
+ spin_unlock_bh(&bpf_lock);
}
static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
@@ -639,8 +639,8 @@ static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
{
- return list_empty(&fp->aux->ksym_lnode) ||
- fp->aux->ksym_lnode.prev == LIST_POISON2;
+ return list_empty(&fp->aux->ksym.lnode) ||
+ fp->aux->ksym.lnode.prev == LIST_POISON2;
}
void bpf_prog_kallsyms_add(struct bpf_prog *fp)
@@ -649,9 +649,11 @@ void bpf_prog_kallsyms_add(struct bpf_prog *fp)
!capable(CAP_SYS_ADMIN))
return;
- spin_lock_bh(&bpf_lock);
- bpf_prog_ksym_node_add(fp->aux);
- spin_unlock_bh(&bpf_lock);
+ bpf_prog_ksym_set_addr(fp);
+ bpf_prog_ksym_set_name(fp);
+ fp->aux->ksym.prog = true;
+
+ bpf_ksym_add(&fp->aux->ksym);
}
void bpf_prog_kallsyms_del(struct bpf_prog *fp)
@@ -659,33 +661,30 @@ void bpf_prog_kallsyms_del(struct bpf_prog *fp)
if (!bpf_prog_kallsyms_candidate(fp))
return;
- spin_lock_bh(&bpf_lock);
- bpf_prog_ksym_node_del(fp->aux);
- spin_unlock_bh(&bpf_lock);
+ bpf_ksym_del(&fp->aux->ksym);
}
-static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
+static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
{
struct latch_tree_node *n;
n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
- return n ?
- container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
- NULL;
+ return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
}
const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char *sym)
{
- unsigned long symbol_start, symbol_end;
- struct bpf_prog *prog;
+ struct bpf_ksym *ksym;
char *ret = NULL;
rcu_read_lock();
- prog = bpf_prog_kallsyms_find(addr);
- if (prog) {
- bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
- bpf_get_prog_name(prog, sym);
+ ksym = bpf_ksym_find(addr);
+ if (ksym) {
+ unsigned long symbol_start = ksym->start;
+ unsigned long symbol_end = ksym->end;
+
+ strncpy(sym, ksym->name, KSYM_NAME_LEN);
ret = sym;
if (size)
@@ -703,19 +702,28 @@ bool is_bpf_text_address(unsigned long addr)
bool ret;
rcu_read_lock();
- ret = bpf_prog_kallsyms_find(addr) != NULL;
+ ret = bpf_ksym_find(addr) != NULL;
rcu_read_unlock();
return ret;
}
+static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
+{
+ struct bpf_ksym *ksym = bpf_ksym_find(addr);
+
+ return ksym && ksym->prog ?
+ container_of(ksym, struct bpf_prog_aux, ksym)->prog :
+ NULL;
+}
+
const struct exception_table_entry *search_bpf_extables(unsigned long addr)
{
const struct exception_table_entry *e = NULL;
struct bpf_prog *prog;
rcu_read_lock();
- prog = bpf_prog_kallsyms_find(addr);
+ prog = bpf_prog_ksym_find(addr);
if (!prog)
goto out;
if (!prog->aux->num_exentries)
@@ -730,7 +738,7 @@ out:
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym)
{
- struct bpf_prog_aux *aux;
+ struct bpf_ksym *ksym;
unsigned int it = 0;
int ret = -ERANGE;
@@ -738,13 +746,13 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
return ret;
rcu_read_lock();
- list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
+ list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
if (it++ != symnum)
continue;
- bpf_get_prog_name(aux->prog, sym);
+ strncpy(sym, ksym->name, KSYM_NAME_LEN);
- *value = (unsigned long)aux->prog->bpf_func;
+ *value = ksym->start;
*type = BPF_SYM_ELF_TYPE;
ret = 0;
@@ -2148,7 +2156,9 @@ const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
const struct bpf_func_proto bpf_get_current_comm_proto __weak;
const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
+const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
const struct bpf_func_proto bpf_get_local_storage_proto __weak;
+const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
{
diff --git a/kernel/bpf/dispatcher.c b/kernel/bpf/dispatcher.c
index b3e5b214fed8..2444bd15cc2d 100644
--- a/kernel/bpf/dispatcher.c
+++ b/kernel/bpf/dispatcher.c
@@ -113,7 +113,7 @@ static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
noff = 0;
} else {
old = d->image + d->image_off;
- noff = d->image_off ^ (BPF_IMAGE_SIZE / 2);
+ noff = d->image_off ^ (PAGE_SIZE / 2);
}
new = d->num_progs ? d->image + noff : NULL;
@@ -140,9 +140,10 @@ void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
mutex_lock(&d->mutex);
if (!d->image) {
- d->image = bpf_image_alloc();
+ d->image = bpf_jit_alloc_exec_page();
if (!d->image)
goto out;
+ bpf_image_ksym_add(d->image, &d->ksym);
}
prev_num_progs = d->num_progs;
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index a1468e3f5af2..d541c8486c95 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -27,9 +27,62 @@
.map_delete_batch = \
generic_map_delete_batch
+/*
+ * The bucket lock has two protection scopes:
+ *
+ * 1) Serializing concurrent operations from BPF programs on differrent
+ * CPUs
+ *
+ * 2) Serializing concurrent operations from BPF programs and sys_bpf()
+ *
+ * BPF programs can execute in any context including perf, kprobes and
+ * tracing. As there are almost no limits where perf, kprobes and tracing
+ * can be invoked from the lock operations need to be protected against
+ * deadlocks. Deadlocks can be caused by recursion and by an invocation in
+ * the lock held section when functions which acquire this lock are invoked
+ * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
+ * variable bpf_prog_active, which prevents BPF programs attached to perf
+ * events, kprobes and tracing to be invoked before the prior invocation
+ * from one of these contexts completed. sys_bpf() uses the same mechanism
+ * by pinning the task to the current CPU and incrementing the recursion
+ * protection accross the map operation.
+ *
+ * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
+ * operations like memory allocations (even with GFP_ATOMIC) from atomic
+ * contexts. This is required because even with GFP_ATOMIC the memory
+ * allocator calls into code pathes which acquire locks with long held lock
+ * sections. To ensure the deterministic behaviour these locks are regular
+ * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
+ * true atomic contexts on an RT kernel are the low level hardware
+ * handling, scheduling, low level interrupt handling, NMIs etc. None of
+ * these contexts should ever do memory allocations.
+ *
+ * As regular device interrupt handlers and soft interrupts are forced into
+ * thread context, the existing code which does
+ * spin_lock*(); alloc(GPF_ATOMIC); spin_unlock*();
+ * just works.
+ *
+ * In theory the BPF locks could be converted to regular spinlocks as well,
+ * but the bucket locks and percpu_freelist locks can be taken from
+ * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
+ * atomic contexts even on RT. These mechanisms require preallocated maps,
+ * so there is no need to invoke memory allocations within the lock held
+ * sections.
+ *
+ * BPF maps which need dynamic allocation are only used from (forced)
+ * thread context on RT and can therefore use regular spinlocks which in
+ * turn allows to invoke memory allocations from the lock held section.
+ *
+ * On a non RT kernel this distinction is neither possible nor required.
+ * spinlock maps to raw_spinlock and the extra code is optimized out by the
+ * compiler.
+ */
struct bucket {
struct hlist_nulls_head head;
- raw_spinlock_t lock;
+ union {
+ raw_spinlock_t raw_lock;
+ spinlock_t lock;
+ };
};
struct bpf_htab {
@@ -65,9 +118,54 @@ struct htab_elem {
struct bpf_lru_node lru_node;
};
u32 hash;
- char key[0] __aligned(8);
+ char key[] __aligned(8);
};
+static inline bool htab_is_prealloc(const struct bpf_htab *htab)
+{
+ return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
+}
+
+static inline bool htab_use_raw_lock(const struct bpf_htab *htab)
+{
+ return (!IS_ENABLED(CONFIG_PREEMPT_RT) || htab_is_prealloc(htab));
+}
+
+static void htab_init_buckets(struct bpf_htab *htab)
+{
+ unsigned i;
+
+ for (i = 0; i < htab->n_buckets; i++) {
+ INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
+ if (htab_use_raw_lock(htab))
+ raw_spin_lock_init(&htab->buckets[i].raw_lock);
+ else
+ spin_lock_init(&htab->buckets[i].lock);
+ }
+}
+
+static inline unsigned long htab_lock_bucket(const struct bpf_htab *htab,
+ struct bucket *b)
+{
+ unsigned long flags;
+
+ if (htab_use_raw_lock(htab))
+ raw_spin_lock_irqsave(&b->raw_lock, flags);
+ else
+ spin_lock_irqsave(&b->lock, flags);
+ return flags;
+}
+
+static inline void htab_unlock_bucket(const struct bpf_htab *htab,
+ struct bucket *b,
+ unsigned long flags)
+{
+ if (htab_use_raw_lock(htab))
+ raw_spin_unlock_irqrestore(&b->raw_lock, flags);
+ else
+ spin_unlock_irqrestore(&b->lock, flags);
+}
+
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
static bool htab_is_lru(const struct bpf_htab *htab)
@@ -82,11 +180,6 @@ static bool htab_is_percpu(const struct bpf_htab *htab)
htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
}
-static bool htab_is_prealloc(const struct bpf_htab *htab)
-{
- return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
-}
-
static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
void __percpu *pptr)
{
@@ -328,8 +421,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
struct bpf_htab *htab;
- int err, i;
u64 cost;
+ int err;
htab = kzalloc(sizeof(*htab), GFP_USER);
if (!htab)
@@ -391,10 +484,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
else
htab->hashrnd = get_random_int();
- for (i = 0; i < htab->n_buckets; i++) {
- INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
- raw_spin_lock_init(&htab->buckets[i].lock);
- }
+ htab_init_buckets(htab);
if (prealloc) {
err = prealloc_init(htab);
@@ -602,7 +692,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
b = __select_bucket(htab, tgt_l->hash);
head = &b->head;
- raw_spin_lock_irqsave(&b->lock, flags);
+ flags = htab_lock_bucket(htab, b);
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
if (l == tgt_l) {
@@ -610,7 +700,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
break;
}
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
return l == tgt_l;
}
@@ -686,15 +776,7 @@ static void htab_elem_free_rcu(struct rcu_head *head)
struct htab_elem *l = container_of(head, struct htab_elem, rcu);
struct bpf_htab *htab = l->htab;
- /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
- * we're calling kfree, otherwise deadlock is possible if kprobes
- * are placed somewhere inside of slub
- */
- preempt_disable();
- __this_cpu_inc(bpf_prog_active);
htab_elem_free(htab, l);
- __this_cpu_dec(bpf_prog_active);
- preempt_enable();
}
static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
@@ -884,8 +966,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
*/
}
- /* bpf_map_update_elem() can be called in_irq() */
- raw_spin_lock_irqsave(&b->lock, flags);
+ flags = htab_lock_bucket(htab, b);
l_old = lookup_elem_raw(head, hash, key, key_size);
@@ -926,7 +1007,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
}
ret = 0;
err:
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
return ret;
}
@@ -964,8 +1045,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
return -ENOMEM;
memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
- /* bpf_map_update_elem() can be called in_irq() */
- raw_spin_lock_irqsave(&b->lock, flags);
+ flags = htab_lock_bucket(htab, b);
l_old = lookup_elem_raw(head, hash, key, key_size);
@@ -984,7 +1064,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
ret = 0;
err:
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
if (ret)
bpf_lru_push_free(&htab->lru, &l_new->lru_node);
@@ -1019,8 +1099,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
b = __select_bucket(htab, hash);
head = &b->head;
- /* bpf_map_update_elem() can be called in_irq() */
- raw_spin_lock_irqsave(&b->lock, flags);
+ flags = htab_lock_bucket(htab, b);
l_old = lookup_elem_raw(head, hash, key, key_size);
@@ -1043,7 +1122,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
}
ret = 0;
err:
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
return ret;
}
@@ -1083,8 +1162,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
return -ENOMEM;
}
- /* bpf_map_update_elem() can be called in_irq() */
- raw_spin_lock_irqsave(&b->lock, flags);
+ flags = htab_lock_bucket(htab, b);
l_old = lookup_elem_raw(head, hash, key, key_size);
@@ -1106,7 +1184,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
}
ret = 0;
err:
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
if (l_new)
bpf_lru_push_free(&htab->lru, &l_new->lru_node);
return ret;
@@ -1144,7 +1222,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
b = __select_bucket(htab, hash);
head = &b->head;
- raw_spin_lock_irqsave(&b->lock, flags);
+ flags = htab_lock_bucket(htab, b);
l = lookup_elem_raw(head, hash, key, key_size);
@@ -1154,7 +1232,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
ret = 0;
}
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
return ret;
}
@@ -1176,7 +1254,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
b = __select_bucket(htab, hash);
head = &b->head;
- raw_spin_lock_irqsave(&b->lock, flags);
+ flags = htab_lock_bucket(htab, b);
l = lookup_elem_raw(head, hash, key, key_size);
@@ -1185,7 +1263,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
ret = 0;
}
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
if (l)
bpf_lru_push_free(&htab->lru, &l->lru_node);
return ret;
@@ -1325,8 +1403,7 @@ alloc:
}
again:
- preempt_disable();
- this_cpu_inc(bpf_prog_active);
+ bpf_disable_instrumentation();
rcu_read_lock();
again_nocopy:
dst_key = keys;
@@ -1335,7 +1412,7 @@ again_nocopy:
head = &b->head;
/* do not grab the lock unless need it (bucket_cnt > 0). */
if (locked)
- raw_spin_lock_irqsave(&b->lock, flags);
+ flags = htab_lock_bucket(htab, b);
bucket_cnt = 0;
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
@@ -1352,10 +1429,9 @@ again_nocopy:
/* Note that since bucket_cnt > 0 here, it is implicit
* that the locked was grabbed, so release it.
*/
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
rcu_read_unlock();
- this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
goto after_loop;
}
@@ -1364,10 +1440,9 @@ again_nocopy:
/* Note that since bucket_cnt > 0 here, it is implicit
* that the locked was grabbed, so release it.
*/
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
rcu_read_unlock();
- this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
kvfree(keys);
kvfree(values);
goto alloc;
@@ -1418,7 +1493,7 @@ again_nocopy:
dst_val += value_size;
}
- raw_spin_unlock_irqrestore(&b->lock, flags);
+ htab_unlock_bucket(htab, b, flags);
locked = false;
while (node_to_free) {
@@ -1437,8 +1512,7 @@ next_batch:
}
rcu_read_unlock();
- this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
key_size * bucket_cnt) ||
copy_to_user(uvalues + total * value_size, values,
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index d8b7b110a1c5..bafc53ddd350 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -12,6 +12,8 @@
#include <linux/filter.h>
#include <linux/ctype.h>
#include <linux/jiffies.h>
+#include <linux/pid_namespace.h>
+#include <linux/proc_ns.h>
#include "../../lib/kstrtox.h"
@@ -338,6 +340,24 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
.ret_type = RET_INTEGER,
};
+BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
+{
+ struct cgroup *cgrp = task_dfl_cgroup(current);
+ struct cgroup *ancestor;
+
+ ancestor = cgroup_ancestor(cgrp, ancestor_level);
+ if (!ancestor)
+ return 0;
+ return cgroup_id(ancestor);
+}
+
+const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
+ .func = bpf_get_current_ancestor_cgroup_id,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_ANYTHING,
+};
+
#ifdef CONFIG_CGROUP_BPF
DECLARE_PER_CPU(struct bpf_cgroup_storage*,
bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
@@ -499,3 +519,46 @@ const struct bpf_func_proto bpf_strtoul_proto = {
.arg4_type = ARG_PTR_TO_LONG,
};
#endif
+
+BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
+ struct bpf_pidns_info *, nsdata, u32, size)
+{
+ struct task_struct *task = current;
+ struct pid_namespace *pidns;
+ int err = -EINVAL;
+
+ if (unlikely(size != sizeof(struct bpf_pidns_info)))
+ goto clear;
+
+ if (unlikely((u64)(dev_t)dev != dev))
+ goto clear;
+
+ if (unlikely(!task))
+ goto clear;
+
+ pidns = task_active_pid_ns(task);
+ if (unlikely(!pidns)) {
+ err = -ENOENT;
+ goto clear;
+ }
+
+ if (!ns_match(&pidns->ns, (dev_t)dev, ino))
+ goto clear;
+
+ nsdata->pid = task_pid_nr_ns(task, pidns);
+ nsdata->tgid = task_tgid_nr_ns(task, pidns);
+ return 0;
+clear:
+ memset((void *)nsdata, 0, (size_t) size);
+ return err;
+}
+
+const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
+ .func = bpf_get_ns_current_pid_tgid,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_ANYTHING,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg4_type = ARG_CONST_SIZE,
+};
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 5e40e7fccc21..95087d9f4ed3 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -25,6 +25,7 @@ enum bpf_type {
BPF_TYPE_UNSPEC = 0,
BPF_TYPE_PROG,
BPF_TYPE_MAP,
+ BPF_TYPE_LINK,
};
static void *bpf_any_get(void *raw, enum bpf_type type)
@@ -36,6 +37,9 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
case BPF_TYPE_MAP:
bpf_map_inc_with_uref(raw);
break;
+ case BPF_TYPE_LINK:
+ bpf_link_inc(raw);
+ break;
default:
WARN_ON_ONCE(1);
break;
@@ -53,6 +57,9 @@ static void bpf_any_put(void *raw, enum bpf_type type)
case BPF_TYPE_MAP:
bpf_map_put_with_uref(raw);
break;
+ case BPF_TYPE_LINK:
+ bpf_link_put(raw);
+ break;
default:
WARN_ON_ONCE(1);
break;
@@ -63,20 +70,32 @@ static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
{
void *raw;
- *type = BPF_TYPE_MAP;
raw = bpf_map_get_with_uref(ufd);
- if (IS_ERR(raw)) {
+ if (!IS_ERR(raw)) {
+ *type = BPF_TYPE_MAP;
+ return raw;
+ }
+
+ raw = bpf_prog_get(ufd);
+ if (!IS_ERR(raw)) {
*type = BPF_TYPE_PROG;
- raw = bpf_prog_get(ufd);
+ return raw;
}
- return raw;
+ raw = bpf_link_get_from_fd(ufd);
+ if (!IS_ERR(raw)) {
+ *type = BPF_TYPE_LINK;
+ return raw;
+ }
+
+ return ERR_PTR(-EINVAL);
}
static const struct inode_operations bpf_dir_iops;
static const struct inode_operations bpf_prog_iops = { };
static const struct inode_operations bpf_map_iops = { };
+static const struct inode_operations bpf_link_iops = { };
static struct inode *bpf_get_inode(struct super_block *sb,
const struct inode *dir,
@@ -114,6 +133,8 @@ static int bpf_inode_type(const struct inode *inode, enum bpf_type *type)
*type = BPF_TYPE_PROG;
else if (inode->i_op == &bpf_map_iops)
*type = BPF_TYPE_MAP;
+ else if (inode->i_op == &bpf_link_iops)
+ *type = BPF_TYPE_LINK;
else
return -EACCES;
@@ -335,6 +356,12 @@ static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
&bpffs_map_fops : &bpffs_obj_fops);
}
+static int bpf_mklink(struct dentry *dentry, umode_t mode, void *arg)
+{
+ return bpf_mkobj_ops(dentry, mode, arg, &bpf_link_iops,
+ &bpffs_obj_fops);
+}
+
static struct dentry *
bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
{
@@ -411,6 +438,9 @@ static int bpf_obj_do_pin(const char __user *pathname, void *raw,
case BPF_TYPE_MAP:
ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
break;
+ case BPF_TYPE_LINK:
+ ret = vfs_mkobj(dentry, mode, bpf_mklink, raw);
+ break;
default:
ret = -EPERM;
}
@@ -487,6 +517,8 @@ int bpf_obj_get_user(const char __user *pathname, int flags)
ret = bpf_prog_new_fd(raw);
else if (type == BPF_TYPE_MAP)
ret = bpf_map_new_fd(raw, f_flags);
+ else if (type == BPF_TYPE_LINK)
+ ret = bpf_link_new_fd(raw);
else
return -ENOENT;
@@ -504,6 +536,8 @@ static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type
if (inode->i_op == &bpf_map_iops)
return ERR_PTR(-EINVAL);
+ if (inode->i_op == &bpf_link_iops)
+ return ERR_PTR(-EINVAL);
if (inode->i_op != &bpf_prog_iops)
return ERR_PTR(-EACCES);
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 56e6c75d354d..65c236cf341e 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -25,7 +25,7 @@ struct lpm_trie_node {
struct lpm_trie_node __rcu *child[2];
u32 prefixlen;
u32 flags;
- u8 data[0];
+ u8 data[];
};
struct lpm_trie {
@@ -34,7 +34,7 @@ struct lpm_trie {
size_t n_entries;
size_t max_prefixlen;
size_t data_size;
- raw_spinlock_t lock;
+ spinlock_t lock;
};
/* This trie implements a longest prefix match algorithm that can be used to
@@ -315,7 +315,7 @@ static int trie_update_elem(struct bpf_map *map,
if (key->prefixlen > trie->max_prefixlen)
return -EINVAL;
- raw_spin_lock_irqsave(&trie->lock, irq_flags);
+ spin_lock_irqsave(&trie->lock, irq_flags);
/* Allocate and fill a new node */
@@ -422,7 +422,7 @@ out:
kfree(im_node);
}
- raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
+ spin_unlock_irqrestore(&trie->lock, irq_flags);
return ret;
}
@@ -442,7 +442,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
if (key->prefixlen > trie->max_prefixlen)
return -EINVAL;
- raw_spin_lock_irqsave(&trie->lock, irq_flags);
+ spin_lock_irqsave(&trie->lock, irq_flags);
/* Walk the tree looking for an exact key/length match and keeping
* track of the path we traverse. We will need to know the node
@@ -518,7 +518,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
kfree_rcu(node, rcu);
out:
- raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
+ spin_unlock_irqrestore(&trie->lock, irq_flags);
return ret;
}
@@ -575,7 +575,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
if (ret)
goto out_err;
- raw_spin_lock_init(&trie->lock);
+ spin_lock_init(&trie->lock);
return &trie->map;
out_err:
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
index 6e090140b924..b367430e611c 100644
--- a/kernel/bpf/percpu_freelist.c
+++ b/kernel/bpf/percpu_freelist.c
@@ -25,12 +25,18 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s)
free_percpu(s->freelist);
}
+static inline void pcpu_freelist_push_node(struct pcpu_freelist_head *head,
+ struct pcpu_freelist_node *node)
+{
+ node->next = head->first;
+ head->first = node;
+}
+
static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
struct pcpu_freelist_node *node)
{
raw_spin_lock(&head->lock);
- node->next = head->first;
- head->first = node;
+ pcpu_freelist_push_node(head, node);
raw_spin_unlock(&head->lock);
}
@@ -56,21 +62,16 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
u32 nr_elems)
{
struct pcpu_freelist_head *head;
- unsigned long flags;
int i, cpu, pcpu_entries;
pcpu_entries = nr_elems / num_possible_cpus() + 1;
i = 0;
- /* disable irq to workaround lockdep false positive
- * in bpf usage pcpu_freelist_populate() will never race
- * with pcpu_freelist_push()
- */
- local_irq_save(flags);
for_each_possible_cpu(cpu) {
again:
head = per_cpu_ptr(s->freelist, cpu);
- ___pcpu_freelist_push(head, buf);
+ /* No locking required as this is not visible yet. */
+ pcpu_freelist_push_node(head, buf);
i++;
buf += elem_size;
if (i == nr_elems)
@@ -78,7 +79,6 @@ again:
if (i % pcpu_entries)
goto again;
}
- local_irq_restore(flags);
}
struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c
index 50c083ba978c..01badd3eda7a 100644
--- a/kernel/bpf/reuseport_array.c
+++ b/kernel/bpf/reuseport_array.c
@@ -305,11 +305,6 @@ int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
if (err)
goto put_file_unlock;
- /* Ensure reuse->reuseport_id is set */
- err = reuseport_get_id(reuse);
- if (err < 0)
- goto put_file_unlock;
-
WRITE_ONCE(nsk->sk_user_data, &array->ptrs[index]);
rcu_assign_pointer(array->ptrs[index], nsk);
free_osk = osk;
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 3f958b90d914..db76339fe358 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -40,6 +40,9 @@ static void do_up_read(struct irq_work *entry)
{
struct stack_map_irq_work *work;
+ if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
+ return;
+
work = container_of(entry, struct stack_map_irq_work, irq_work);
up_read_non_owner(work->sem);
work->sem = NULL;
@@ -288,10 +291,19 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
struct stack_map_irq_work *work = NULL;
if (irqs_disabled()) {
- work = this_cpu_ptr(&up_read_work);
- if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
- /* cannot queue more up_read, fallback */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ work = this_cpu_ptr(&up_read_work);
+ if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) {
+ /* cannot queue more up_read, fallback */
+ irq_work_busy = true;
+ }
+ } else {
+ /*
+ * PREEMPT_RT does not allow to trylock mmap sem in
+ * interrupt disabled context. Force the fallback code.
+ */
irq_work_busy = true;
+ }
}
/*
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 966b7b34cde0..64783da34202 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -25,6 +25,7 @@
#include <linux/nospec.h>
#include <linux/audit.h>
#include <uapi/linux/btf.h>
+#include <linux/bpf_lsm.h>
#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
(map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
@@ -171,11 +172,7 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
flags);
}
- /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
- * inside bpf map update or delete otherwise deadlocks are possible
- */
- preempt_disable();
- __this_cpu_inc(bpf_prog_active);
+ bpf_disable_instrumentation();
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
err = bpf_percpu_hash_update(map, key, value, flags);
@@ -206,8 +203,7 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
err = map->ops->map_update_elem(map, key, value, flags);
rcu_read_unlock();
}
- __this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
maybe_wait_bpf_programs(map);
return err;
@@ -222,8 +218,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
if (bpf_map_is_dev_bound(map))
return bpf_map_offload_lookup_elem(map, key, value);
- preempt_disable();
- this_cpu_inc(bpf_prog_active);
+ bpf_disable_instrumentation();
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
err = bpf_percpu_hash_copy(map, key, value);
@@ -268,8 +263,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
rcu_read_unlock();
}
- this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
maybe_wait_bpf_programs(map);
return err;
@@ -911,6 +905,21 @@ void bpf_map_inc_with_uref(struct bpf_map *map)
}
EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
+struct bpf_map *bpf_map_get(u32 ufd)
+{
+ struct fd f = fdget(ufd);
+ struct bpf_map *map;
+
+ map = __bpf_map_get(f);
+ if (IS_ERR(map))
+ return map;
+
+ bpf_map_inc(map);
+ fdput(f);
+
+ return map;
+}
+
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
{
struct fd f = fdget(ufd);
@@ -1138,13 +1147,11 @@ static int map_delete_elem(union bpf_attr *attr)
goto out;
}
- preempt_disable();
- __this_cpu_inc(bpf_prog_active);
+ bpf_disable_instrumentation();
rcu_read_lock();
err = map->ops->map_delete_elem(map, key);
rcu_read_unlock();
- __this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
maybe_wait_bpf_programs(map);
out:
kfree(key);
@@ -1256,13 +1263,11 @@ int generic_map_delete_batch(struct bpf_map *map,
break;
}
- preempt_disable();
- __this_cpu_inc(bpf_prog_active);
+ bpf_disable_instrumentation();
rcu_read_lock();
err = map->ops->map_delete_elem(map, key);
rcu_read_unlock();
- __this_cpu_dec(bpf_prog_active);
- preempt_enable();
+ bpf_enable_instrumentation();
maybe_wait_bpf_programs(map);
if (err)
break;
@@ -1938,6 +1943,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
switch (prog_type) {
case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_LSM:
case BPF_PROG_TYPE_STRUCT_OPS:
case BPF_PROG_TYPE_EXT:
break;
@@ -2177,84 +2183,288 @@ static int bpf_obj_get(const union bpf_attr *attr)
attr->file_flags);
}
-static int bpf_tracing_prog_release(struct inode *inode, struct file *filp)
+void bpf_link_init(struct bpf_link *link, const struct bpf_link_ops *ops,
+ struct bpf_prog *prog)
{
- struct bpf_prog *prog = filp->private_data;
+ atomic64_set(&link->refcnt, 1);
+ link->ops = ops;
+ link->prog = prog;
+}
- WARN_ON_ONCE(bpf_trampoline_unlink_prog(prog));
- bpf_prog_put(prog);
+/* Clean up bpf_link and corresponding anon_inode file and FD. After
+ * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
+ * anon_inode's release() call. This helper manages marking bpf_link as
+ * defunct, releases anon_inode file and puts reserved FD.
+ */
+void bpf_link_cleanup(struct bpf_link *link, struct file *link_file,
+ int link_fd)
+{
+ link->prog = NULL;
+ fput(link_file);
+ put_unused_fd(link_fd);
+}
+
+void bpf_link_inc(struct bpf_link *link)
+{
+ atomic64_inc(&link->refcnt);
+}
+
+/* bpf_link_free is guaranteed to be called from process context */
+static void bpf_link_free(struct bpf_link *link)
+{
+ if (link->prog) {
+ /* detach BPF program, clean up used resources */
+ link->ops->release(link);
+ bpf_prog_put(link->prog);
+ }
+ /* free bpf_link and its containing memory */
+ link->ops->dealloc(link);
+}
+
+static void bpf_link_put_deferred(struct work_struct *work)
+{
+ struct bpf_link *link = container_of(work, struct bpf_link, work);
+
+ bpf_link_free(link);
+}
+
+/* bpf_link_put can be called from atomic context, but ensures that resources
+ * are freed from process context
+ */
+void bpf_link_put(struct bpf_link *link)
+{
+ if (!atomic64_dec_and_test(&link->refcnt))
+ return;
+
+ if (in_atomic()) {
+ INIT_WORK(&link->work, bpf_link_put_deferred);
+ schedule_work(&link->work);
+ } else {
+ bpf_link_free(link);
+ }
+}
+
+static int bpf_link_release(struct inode *inode, struct file *filp)
+{
+ struct bpf_link *link = filp->private_data;
+
+ bpf_link_put(link);
return 0;
}
-static const struct file_operations bpf_tracing_prog_fops = {
- .release = bpf_tracing_prog_release,
+#ifdef CONFIG_PROC_FS
+static const struct bpf_link_ops bpf_raw_tp_lops;
+static const struct bpf_link_ops bpf_tracing_link_lops;
+
+static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
+{
+ const struct bpf_link *link = filp->private_data;
+ const struct bpf_prog *prog = link->prog;
+ char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
+ const char *link_type;
+
+ if (link->ops == &bpf_raw_tp_lops)
+ link_type = "raw_tracepoint";
+ else if (link->ops == &bpf_tracing_link_lops)
+ link_type = "tracing";
+#ifdef CONFIG_CGROUP_BPF
+ else if (link->ops == &bpf_cgroup_link_lops)
+ link_type = "cgroup";
+#endif
+ else
+ link_type = "unknown";
+
+ bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
+ seq_printf(m,
+ "link_type:\t%s\n"
+ "prog_tag:\t%s\n"
+ "prog_id:\t%u\n",
+ link_type,
+ prog_tag,
+ prog->aux->id);
+}
+#endif
+
+const struct file_operations bpf_link_fops = {
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = bpf_link_show_fdinfo,
+#endif
+ .release = bpf_link_release,
.read = bpf_dummy_read,
.write = bpf_dummy_write,
};
+int bpf_link_new_fd(struct bpf_link *link)
+{
+ return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
+}
+
+/* Similar to bpf_link_new_fd, create anon_inode for given bpf_link, but
+ * instead of immediately installing fd in fdtable, just reserve it and
+ * return. Caller then need to either install it with fd_install(fd, file) or
+ * release with put_unused_fd(fd).
+ * This is useful for cases when bpf_link attachment/detachment are
+ * complicated and expensive operations and should be delayed until all the fd
+ * reservation and anon_inode creation succeeds.
+ */
+struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd)
+{
+ struct file *file;
+ int fd;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0)
+ return ERR_PTR(fd);
+
+ file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
+ if (IS_ERR(file)) {
+ put_unused_fd(fd);
+ return file;
+ }
+
+ *reserved_fd = fd;
+ return file;
+}
+
+struct bpf_link *bpf_link_get_from_fd(u32 ufd)
+{
+ struct fd f = fdget(ufd);
+ struct bpf_link *link;
+
+ if (!f.file)
+ return ERR_PTR(-EBADF);
+ if (f.file->f_op != &bpf_link_fops) {
+ fdput(f);
+ return ERR_PTR(-EINVAL);
+ }
+
+ link = f.file->private_data;
+ bpf_link_inc(link);
+ fdput(f);
+
+ return link;
+}
+
+struct bpf_tracing_link {
+ struct bpf_link link;
+};
+
+static void bpf_tracing_link_release(struct bpf_link *link)
+{
+ WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog));
+}
+
+static void bpf_tracing_link_dealloc(struct bpf_link *link)
+{
+ struct bpf_tracing_link *tr_link =
+ container_of(link, struct bpf_tracing_link, link);
+
+ kfree(tr_link);
+}
+
+static const struct bpf_link_ops bpf_tracing_link_lops = {
+ .release = bpf_tracing_link_release,
+ .dealloc = bpf_tracing_link_dealloc,
+};
+
static int bpf_tracing_prog_attach(struct bpf_prog *prog)
{
- int tr_fd, err;
+ struct bpf_tracing_link *link;
+ struct file *link_file;
+ int link_fd, err;
- if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
- prog->expected_attach_type != BPF_TRACE_FEXIT &&
- prog->type != BPF_PROG_TYPE_EXT) {
+ switch (prog->type) {
+ case BPF_PROG_TYPE_TRACING:
+ if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
+ prog->expected_attach_type != BPF_TRACE_FEXIT &&
+ prog->expected_attach_type != BPF_MODIFY_RETURN) {
+ err = -EINVAL;
+ goto out_put_prog;
+ }
+ break;
+ case BPF_PROG_TYPE_EXT:
+ if (prog->expected_attach_type != 0) {
+ err = -EINVAL;
+ goto out_put_prog;
+ }
+ break;
+ case BPF_PROG_TYPE_LSM:
+ if (prog->expected_attach_type != BPF_LSM_MAC) {
+ err = -EINVAL;
+ goto out_put_prog;
+ }
+ break;
+ default:
err = -EINVAL;
goto out_put_prog;
}
- err = bpf_trampoline_link_prog(prog);
- if (err)
+ link = kzalloc(sizeof(*link), GFP_USER);
+ if (!link) {
+ err = -ENOMEM;
+ goto out_put_prog;
+ }
+ bpf_link_init(&link->link, &bpf_tracing_link_lops, prog);
+
+ link_file = bpf_link_new_file(&link->link, &link_fd);
+ if (IS_ERR(link_file)) {
+ kfree(link);
+ err = PTR_ERR(link_file);
goto out_put_prog;
+ }
- tr_fd = anon_inode_getfd("bpf-tracing-prog", &bpf_tracing_prog_fops,
- prog, O_CLOEXEC);
- if (tr_fd < 0) {
- WARN_ON_ONCE(bpf_trampoline_unlink_prog(prog));
- err = tr_fd;
+ err = bpf_trampoline_link_prog(prog);
+ if (err) {
+ bpf_link_cleanup(&link->link, link_file, link_fd);
goto out_put_prog;
}
- return tr_fd;
+
+ fd_install(link_fd, link_file);
+ return link_fd;
out_put_prog:
bpf_prog_put(prog);
return err;
}
-struct bpf_raw_tracepoint {
+struct bpf_raw_tp_link {
+ struct bpf_link link;
struct bpf_raw_event_map *btp;
- struct bpf_prog *prog;
};
-static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp)
+static void bpf_raw_tp_link_release(struct bpf_link *link)
{
- struct bpf_raw_tracepoint *raw_tp = filp->private_data;
+ struct bpf_raw_tp_link *raw_tp =
+ container_of(link, struct bpf_raw_tp_link, link);
- if (raw_tp->prog) {
- bpf_probe_unregister(raw_tp->btp, raw_tp->prog);
- bpf_prog_put(raw_tp->prog);
- }
+ bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
bpf_put_raw_tracepoint(raw_tp->btp);
+}
+
+static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
+{
+ struct bpf_raw_tp_link *raw_tp =
+ container_of(link, struct bpf_raw_tp_link, link);
+
kfree(raw_tp);
- return 0;
}
-static const struct file_operations bpf_raw_tp_fops = {
- .release = bpf_raw_tracepoint_release,
- .read = bpf_dummy_read,
- .write = bpf_dummy_write,
+static const struct bpf_link_ops bpf_raw_tp_lops = {
+ .release = bpf_raw_tp_link_release,
+ .dealloc = bpf_raw_tp_link_dealloc,
};
#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
{
- struct bpf_raw_tracepoint *raw_tp;
+ struct bpf_raw_tp_link *link;
struct bpf_raw_event_map *btp;
+ struct file *link_file;
struct bpf_prog *prog;
const char *tp_name;
char buf[128];
- int tp_fd, err;
+ int link_fd, err;
if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
return -EINVAL;
@@ -2263,16 +2473,10 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
if (IS_ERR(prog))
return PTR_ERR(prog);
- if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT &&
- prog->type != BPF_PROG_TYPE_TRACING &&
- prog->type != BPF_PROG_TYPE_EXT &&
- prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) {
- err = -EINVAL;
- goto out_put_prog;
- }
-
- if (prog->type == BPF_PROG_TYPE_TRACING ||
- prog->type == BPF_PROG_TYPE_EXT) {
+ switch (prog->type) {
+ case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_EXT:
+ case BPF_PROG_TYPE_LSM:
if (attr->raw_tracepoint.name) {
/* The attach point for this category of programs
* should be specified via btf_id during program load.
@@ -2280,11 +2484,14 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
err = -EINVAL;
goto out_put_prog;
}
- if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
+ if (prog->type == BPF_PROG_TYPE_TRACING &&
+ prog->expected_attach_type == BPF_TRACE_RAW_TP) {
tp_name = prog->aux->attach_func_name;
- else
- return bpf_tracing_prog_attach(prog);
- } else {
+ break;
+ }
+ return bpf_tracing_prog_attach(prog);
+ case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
if (strncpy_from_user(buf,
u64_to_user_ptr(attr->raw_tracepoint.name),
sizeof(buf) - 1) < 0) {
@@ -2293,6 +2500,10 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
}
buf[sizeof(buf) - 1] = 0;
tp_name = buf;
+ break;
+ default:
+ err = -EINVAL;
+ goto out_put_prog;
}
btp = bpf_get_raw_tracepoint(tp_name);
@@ -2301,29 +2512,30 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
goto out_put_prog;
}
- raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER);
- if (!raw_tp) {
+ link = kzalloc(sizeof(*link), GFP_USER);
+ if (!link) {
err = -ENOMEM;
goto out_put_btp;
}
- raw_tp->btp = btp;
- raw_tp->prog = prog;
+ bpf_link_init(&link->link, &bpf_raw_tp_lops, prog);
+ link->btp = btp;
- err = bpf_probe_register(raw_tp->btp, prog);
- if (err)
- goto out_free_tp;
+ link_file = bpf_link_new_file(&link->link, &link_fd);
+ if (IS_ERR(link_file)) {
+ kfree(link);
+ err = PTR_ERR(link_file);
+ goto out_put_btp;
+ }
- tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp,
- O_CLOEXEC);
- if (tp_fd < 0) {
- bpf_probe_unregister(raw_tp->btp, prog);
- err = tp_fd;
- goto out_free_tp;
+ err = bpf_probe_register(link->btp, prog);
+ if (err) {
+ bpf_link_cleanup(&link->link, link_file, link_fd);
+ goto out_put_btp;
}
- return tp_fd;
-out_free_tp:
- kfree(raw_tp);
+ fd_install(link_fd, link_file);
+ return link_fd;
+
out_put_btp:
bpf_put_raw_tracepoint(btp);
out_put_prog:
@@ -2348,36 +2560,18 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
}
}
-#define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
-
-#define BPF_F_ATTACH_MASK \
- (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
-
-static int bpf_prog_attach(const union bpf_attr *attr)
+static enum bpf_prog_type
+attach_type_to_prog_type(enum bpf_attach_type attach_type)
{
- enum bpf_prog_type ptype;
- struct bpf_prog *prog;
- int ret;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (CHECK_ATTR(BPF_PROG_ATTACH))
- return -EINVAL;
-
- if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
- return -EINVAL;
-
- switch (attr->attach_type) {
+ switch (attach_type) {
case BPF_CGROUP_INET_INGRESS:
case BPF_CGROUP_INET_EGRESS:
- ptype = BPF_PROG_TYPE_CGROUP_SKB;
+ return BPF_PROG_TYPE_CGROUP_SKB;
break;
case BPF_CGROUP_INET_SOCK_CREATE:
case BPF_CGROUP_INET4_POST_BIND:
case BPF_CGROUP_INET6_POST_BIND:
- ptype = BPF_PROG_TYPE_CGROUP_SOCK;
- break;
+ return BPF_PROG_TYPE_CGROUP_SOCK;
case BPF_CGROUP_INET4_BIND:
case BPF_CGROUP_INET6_BIND:
case BPF_CGROUP_INET4_CONNECT:
@@ -2386,37 +2580,53 @@ static int bpf_prog_attach(const union bpf_attr *attr)
case BPF_CGROUP_UDP6_SENDMSG:
case BPF_CGROUP_UDP4_RECVMSG:
case BPF_CGROUP_UDP6_RECVMSG:
- ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
- break;
+ return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
case BPF_CGROUP_SOCK_OPS:
- ptype = BPF_PROG_TYPE_SOCK_OPS;
- break;
+ return BPF_PROG_TYPE_SOCK_OPS;
case BPF_CGROUP_DEVICE:
- ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
- break;
+ return BPF_PROG_TYPE_CGROUP_DEVICE;
case BPF_SK_MSG_VERDICT:
- ptype = BPF_PROG_TYPE_SK_MSG;
- break;
+ return BPF_PROG_TYPE_SK_MSG;
case BPF_SK_SKB_STREAM_PARSER:
case BPF_SK_SKB_STREAM_VERDICT:
- ptype = BPF_PROG_TYPE_SK_SKB;
- break;
+ return BPF_PROG_TYPE_SK_SKB;
case BPF_LIRC_MODE2:
- ptype = BPF_PROG_TYPE_LIRC_MODE2;
- break;
+ return BPF_PROG_TYPE_LIRC_MODE2;
case BPF_FLOW_DISSECTOR:
- ptype = BPF_PROG_TYPE_FLOW_DISSECTOR;
- break;
+ return BPF_PROG_TYPE_FLOW_DISSECTOR;
case BPF_CGROUP_SYSCTL:
- ptype = BPF_PROG_TYPE_CGROUP_SYSCTL;
- break;
+ return BPF_PROG_TYPE_CGROUP_SYSCTL;
case BPF_CGROUP_GETSOCKOPT:
case BPF_CGROUP_SETSOCKOPT:
- ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT;
- break;
+ return BPF_PROG_TYPE_CGROUP_SOCKOPT;
default:
- return -EINVAL;
+ return BPF_PROG_TYPE_UNSPEC;
}
+}
+
+#define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
+
+#define BPF_F_ATTACH_MASK \
+ (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
+
+static int bpf_prog_attach(const union bpf_attr *attr)
+{
+ enum bpf_prog_type ptype;
+ struct bpf_prog *prog;
+ int ret;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (CHECK_ATTR(BPF_PROG_ATTACH))
+ return -EINVAL;
+
+ if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
+ return -EINVAL;
+
+ ptype = attach_type_to_prog_type(attr->attach_type);
+ if (ptype == BPF_PROG_TYPE_UNSPEC)
+ return -EINVAL;
prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
if (IS_ERR(prog))
@@ -2438,8 +2648,17 @@ static int bpf_prog_attach(const union bpf_attr *attr)
case BPF_PROG_TYPE_FLOW_DISSECTOR:
ret = skb_flow_dissector_bpf_prog_attach(attr, prog);
break;
- default:
+ case BPF_PROG_TYPE_CGROUP_DEVICE:
+ case BPF_PROG_TYPE_CGROUP_SKB:
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+ case BPF_PROG_TYPE_CGROUP_SOCKOPT:
+ case BPF_PROG_TYPE_CGROUP_SYSCTL:
+ case BPF_PROG_TYPE_SOCK_OPS:
ret = cgroup_bpf_prog_attach(attr, ptype, prog);
+ break;
+ default:
+ ret = -EINVAL;
}
if (ret)
@@ -2459,53 +2678,27 @@ static int bpf_prog_detach(const union bpf_attr *attr)
if (CHECK_ATTR(BPF_PROG_DETACH))
return -EINVAL;
- switch (attr->attach_type) {
- case BPF_CGROUP_INET_INGRESS:
- case BPF_CGROUP_INET_EGRESS:
- ptype = BPF_PROG_TYPE_CGROUP_SKB;
- break;
- case BPF_CGROUP_INET_SOCK_CREATE:
- case BPF_CGROUP_INET4_POST_BIND:
- case BPF_CGROUP_INET6_POST_BIND:
- ptype = BPF_PROG_TYPE_CGROUP_SOCK;
- break;
- case BPF_CGROUP_INET4_BIND:
- case BPF_CGROUP_INET6_BIND:
- case BPF_CGROUP_INET4_CONNECT:
- case BPF_CGROUP_INET6_CONNECT:
- case BPF_CGROUP_UDP4_SENDMSG:
- case BPF_CGROUP_UDP6_SENDMSG:
- case BPF_CGROUP_UDP4_RECVMSG:
- case BPF_CGROUP_UDP6_RECVMSG:
- ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
- break;
- case BPF_CGROUP_SOCK_OPS:
- ptype = BPF_PROG_TYPE_SOCK_OPS;
- break;
- case BPF_CGROUP_DEVICE:
- ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
- break;
- case BPF_SK_MSG_VERDICT:
- return sock_map_get_from_fd(attr, NULL);
- case BPF_SK_SKB_STREAM_PARSER:
- case BPF_SK_SKB_STREAM_VERDICT:
+ ptype = attach_type_to_prog_type(attr->attach_type);
+
+ switch (ptype) {
+ case BPF_PROG_TYPE_SK_MSG:
+ case BPF_PROG_TYPE_SK_SKB:
return sock_map_get_from_fd(attr, NULL);
- case BPF_LIRC_MODE2:
+ case BPF_PROG_TYPE_LIRC_MODE2:
return lirc_prog_detach(attr);
- case BPF_FLOW_DISSECTOR:
+ case BPF_PROG_TYPE_FLOW_DISSECTOR:
return skb_flow_dissector_bpf_prog_detach(attr);
- case BPF_CGROUP_SYSCTL:
- ptype = BPF_PROG_TYPE_CGROUP_SYSCTL;
- break;
- case BPF_CGROUP_GETSOCKOPT:
- case BPF_CGROUP_SETSOCKOPT:
- ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT;
- break;
+ case BPF_PROG_TYPE_CGROUP_DEVICE:
+ case BPF_PROG_TYPE_CGROUP_SKB:
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+ case BPF_PROG_TYPE_CGROUP_SOCKOPT:
+ case BPF_PROG_TYPE_CGROUP_SYSCTL:
+ case BPF_PROG_TYPE_SOCK_OPS:
+ return cgroup_bpf_prog_detach(attr, ptype);
default:
return -EINVAL;
}
-
- return cgroup_bpf_prog_detach(attr, ptype);
}
#define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
@@ -2539,7 +2732,7 @@ static int bpf_prog_query(const union bpf_attr *attr,
case BPF_CGROUP_SYSCTL:
case BPF_CGROUP_GETSOCKOPT:
case BPF_CGROUP_SETSOCKOPT:
- break;
+ return cgroup_bpf_prog_query(attr, uattr);
case BPF_LIRC_MODE2:
return lirc_prog_query(attr, uattr);
case BPF_FLOW_DISSECTOR:
@@ -2547,8 +2740,6 @@ static int bpf_prog_query(const union bpf_attr *attr,
default:
return -EINVAL;
}
-
- return cgroup_bpf_prog_query(attr, uattr);
}
#define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out
@@ -3272,15 +3463,21 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
if (err)
goto out;
- if (file->f_op == &bpf_raw_tp_fops) {
- struct bpf_raw_tracepoint *raw_tp = file->private_data;
- struct bpf_raw_event_map *btp = raw_tp->btp;
+ if (file->f_op == &bpf_link_fops) {
+ struct bpf_link *link = file->private_data;
- err = bpf_task_fd_query_copy(attr, uattr,
- raw_tp->prog->aux->id,
- BPF_FD_TYPE_RAW_TRACEPOINT,
- btp->tp->name, 0, 0);
- goto put_file;
+ if (link->ops == &bpf_raw_tp_lops) {
+ struct bpf_raw_tp_link *raw_tp =
+ container_of(link, struct bpf_raw_tp_link, link);
+ struct bpf_raw_event_map *btp = raw_tp->btp;
+
+ err = bpf_task_fd_query_copy(attr, uattr,
+ raw_tp->link.prog->aux->id,
+ BPF_FD_TYPE_RAW_TRACEPOINT,
+ btp->tp->name, 0, 0);
+ goto put_file;
+ }
+ goto out_not_supp;
}
event = perf_get_event(file);
@@ -3300,6 +3497,7 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
goto put_file;
}
+out_not_supp:
err = -ENOTSUPP;
put_file:
fput(file);
@@ -3362,6 +3560,104 @@ err_put:
return err;
}
+#define BPF_LINK_CREATE_LAST_FIELD link_create.flags
+static int link_create(union bpf_attr *attr)
+{
+ enum bpf_prog_type ptype;
+ struct bpf_prog *prog;
+ int ret;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (CHECK_ATTR(BPF_LINK_CREATE))
+ return -EINVAL;
+
+ ptype = attach_type_to_prog_type(attr->link_create.attach_type);
+ if (ptype == BPF_PROG_TYPE_UNSPEC)
+ return -EINVAL;
+
+ prog = bpf_prog_get_type(attr->link_create.prog_fd, ptype);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+ ret = bpf_prog_attach_check_attach_type(prog,
+ attr->link_create.attach_type);
+ if (ret)
+ goto err_out;
+
+ switch (ptype) {
+ case BPF_PROG_TYPE_CGROUP_SKB:
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+ case BPF_PROG_TYPE_SOCK_OPS:
+ case BPF_PROG_TYPE_CGROUP_DEVICE:
+ case BPF_PROG_TYPE_CGROUP_SYSCTL:
+ case BPF_PROG_TYPE_CGROUP_SOCKOPT:
+ ret = cgroup_bpf_link_attach(attr, prog);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+err_out:
+ if (ret < 0)
+ bpf_prog_put(prog);
+ return ret;
+}
+
+#define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
+
+static int link_update(union bpf_attr *attr)
+{
+ struct bpf_prog *old_prog = NULL, *new_prog;
+ struct bpf_link *link;
+ u32 flags;
+ int ret;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (CHECK_ATTR(BPF_LINK_UPDATE))
+ return -EINVAL;
+
+ flags = attr->link_update.flags;
+ if (flags & ~BPF_F_REPLACE)
+ return -EINVAL;
+
+ link = bpf_link_get_from_fd(attr->link_update.link_fd);
+ if (IS_ERR(link))
+ return PTR_ERR(link);
+
+ new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
+ if (IS_ERR(new_prog))
+ return PTR_ERR(new_prog);
+
+ if (flags & BPF_F_REPLACE) {
+ old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
+ if (IS_ERR(old_prog)) {
+ ret = PTR_ERR(old_prog);
+ old_prog = NULL;
+ goto out_put_progs;
+ }
+ }
+
+#ifdef CONFIG_CGROUP_BPF
+ if (link->ops == &bpf_cgroup_link_lops) {
+ ret = cgroup_bpf_replace(link, old_prog, new_prog);
+ goto out_put_progs;
+ }
+#endif
+ ret = -EINVAL;
+
+out_put_progs:
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ if (ret)
+ bpf_prog_put(new_prog);
+ return ret;
+}
+
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{
union bpf_attr attr;
@@ -3473,6 +3769,12 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
case BPF_MAP_DELETE_BATCH:
err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
break;
+ case BPF_LINK_CREATE:
+ err = link_create(&attr);
+ break;
+ case BPF_LINK_UPDATE:
+ err = link_update(&attr);
+ break;
default:
err = -EINVAL;
break;
diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c
index 7ae5dddd1fe6..3b495773de5a 100644
--- a/kernel/bpf/sysfs_btf.c
+++ b/kernel/bpf/sysfs_btf.c
@@ -9,15 +9,15 @@
#include <linux/sysfs.h>
/* See scripts/link-vmlinux.sh, gen_btf() func for details */
-extern char __weak _binary__btf_vmlinux_bin_start[];
-extern char __weak _binary__btf_vmlinux_bin_end[];
+extern char __weak __start_BTF[];
+extern char __weak __stop_BTF[];
static ssize_t
btf_vmlinux_read(struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t len)
{
- memcpy(buf, _binary__btf_vmlinux_bin_start + off, len);
+ memcpy(buf, __start_BTF + off, len);
return len;
}
@@ -30,15 +30,14 @@ static struct kobject *btf_kobj;
static int __init btf_vmlinux_init(void)
{
- if (!_binary__btf_vmlinux_bin_start)
+ if (!__start_BTF)
return 0;
btf_kobj = kobject_create_and_add("btf", kernel_kobj);
if (!btf_kobj)
return -ENOMEM;
- bin_attr_btf_vmlinux.size = _binary__btf_vmlinux_bin_end -
- _binary__btf_vmlinux_bin_start;
+ bin_attr_btf_vmlinux.size = __stop_BTF - __start_BTF;
return sysfs_create_bin_file(btf_kobj, &bin_attr_btf_vmlinux);
}
diff --git a/kernel/bpf/tnum.c b/kernel/bpf/tnum.c
index d4f335a9a899..ceac5281bd31 100644
--- a/kernel/bpf/tnum.c
+++ b/kernel/bpf/tnum.c
@@ -194,3 +194,18 @@ int tnum_sbin(char *str, size_t size, struct tnum a)
str[min(size - 1, (size_t)64)] = 0;
return 64;
}
+
+struct tnum tnum_subreg(struct tnum a)
+{
+ return tnum_cast(a, 4);
+}
+
+struct tnum tnum_clear_subreg(struct tnum a)
+{
+ return tnum_lshift(tnum_rshift(a, 32), 32);
+}
+
+struct tnum tnum_const_subreg(struct tnum a, u32 value)
+{
+ return tnum_or(tnum_clear_subreg(a), tnum_const(value));
+}
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 6b264a92064b..9be85aa4ec5f 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -5,6 +5,8 @@
#include <linux/filter.h>
#include <linux/ftrace.h>
#include <linux/rbtree_latch.h>
+#include <linux/perf_event.h>
+#include <linux/btf.h>
/* dummy _ops. The verifier will operate on target program's ops. */
const struct bpf_verifier_ops bpf_extension_verifier_ops = {
@@ -17,12 +19,11 @@ const struct bpf_prog_ops bpf_extension_prog_ops = {
#define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
-static struct latch_tree_root image_tree __cacheline_aligned;
-/* serializes access to trampoline_table and image_tree */
+/* serializes access to trampoline_table */
static DEFINE_MUTEX(trampoline_mutex);
-static void *bpf_jit_alloc_exec_page(void)
+void *bpf_jit_alloc_exec_page(void)
{
void *image;
@@ -38,62 +39,28 @@ static void *bpf_jit_alloc_exec_page(void)
return image;
}
-static __always_inline bool image_tree_less(struct latch_tree_node *a,
- struct latch_tree_node *b)
+void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
{
- struct bpf_image *ia = container_of(a, struct bpf_image, tnode);
- struct bpf_image *ib = container_of(b, struct bpf_image, tnode);
-
- return ia < ib;
-}
-
-static __always_inline int image_tree_comp(void *addr, struct latch_tree_node *n)
-{
- void *image = container_of(n, struct bpf_image, tnode);
-
- if (addr < image)
- return -1;
- if (addr >= image + PAGE_SIZE)
- return 1;
-
- return 0;
-}
-
-static const struct latch_tree_ops image_tree_ops = {
- .less = image_tree_less,
- .comp = image_tree_comp,
-};
-
-static void *__bpf_image_alloc(bool lock)
-{
- struct bpf_image *image;
-
- image = bpf_jit_alloc_exec_page();
- if (!image)
- return NULL;
-
- if (lock)
- mutex_lock(&trampoline_mutex);
- latch_tree_insert(&image->tnode, &image_tree, &image_tree_ops);
- if (lock)
- mutex_unlock(&trampoline_mutex);
- return image->data;
+ ksym->start = (unsigned long) data;
+ ksym->end = ksym->start + PAGE_SIZE;
+ bpf_ksym_add(ksym);
+ perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
+ PAGE_SIZE, false, ksym->name);
}
-void *bpf_image_alloc(void)
+void bpf_image_ksym_del(struct bpf_ksym *ksym)
{
- return __bpf_image_alloc(true);
+ bpf_ksym_del(ksym);
+ perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
+ PAGE_SIZE, true, ksym->name);
}
-bool is_bpf_image_address(unsigned long addr)
+static void bpf_trampoline_ksym_add(struct bpf_trampoline *tr)
{
- bool ret;
-
- rcu_read_lock();
- ret = latch_tree_find((void *) addr, &image_tree, &image_tree_ops) != NULL;
- rcu_read_unlock();
+ struct bpf_ksym *ksym = &tr->ksym;
- return ret;
+ snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", tr->key);
+ bpf_image_ksym_add(tr->image, ksym);
}
struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
@@ -116,7 +83,7 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
goto out;
/* is_root was checked earlier. No need for bpf_jit_charge_modmem() */
- image = __bpf_image_alloc(false);
+ image = bpf_jit_alloc_exec_page();
if (!image) {
kfree(tr);
tr = NULL;
@@ -131,6 +98,8 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
for (i = 0; i < BPF_TRAMP_MAX; i++)
INIT_HLIST_HEAD(&tr->progs_hlist[i]);
tr->image = image;
+ INIT_LIST_HEAD_RCU(&tr->ksym.lnode);
+ bpf_trampoline_ksym_add(tr);
out:
mutex_unlock(&trampoline_mutex);
return tr;
@@ -190,40 +159,50 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
return ret;
}
-/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
- * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
- */
-#define BPF_MAX_TRAMP_PROGS 40
+static struct bpf_tramp_progs *
+bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total)
+{
+ const struct bpf_prog_aux *aux;
+ struct bpf_tramp_progs *tprogs;
+ struct bpf_prog **progs;
+ int kind;
+
+ *total = 0;
+ tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
+ if (!tprogs)
+ return ERR_PTR(-ENOMEM);
+
+ for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
+ tprogs[kind].nr_progs = tr->progs_cnt[kind];
+ *total += tr->progs_cnt[kind];
+ progs = tprogs[kind].progs;
+
+ hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist)
+ *progs++ = aux->prog;
+ }
+ return tprogs;
+}
static int bpf_trampoline_update(struct bpf_trampoline *tr)
{
- void *old_image = tr->image + ((tr->selector + 1) & 1) * BPF_IMAGE_SIZE/2;
- void *new_image = tr->image + (tr->selector & 1) * BPF_IMAGE_SIZE/2;
- struct bpf_prog *progs_to_run[BPF_MAX_TRAMP_PROGS];
- int fentry_cnt = tr->progs_cnt[BPF_TRAMP_FENTRY];
- int fexit_cnt = tr->progs_cnt[BPF_TRAMP_FEXIT];
- struct bpf_prog **progs, **fentry, **fexit;
+ void *old_image = tr->image + ((tr->selector + 1) & 1) * PAGE_SIZE/2;
+ void *new_image = tr->image + (tr->selector & 1) * PAGE_SIZE/2;
+ struct bpf_tramp_progs *tprogs;
u32 flags = BPF_TRAMP_F_RESTORE_REGS;
- struct bpf_prog_aux *aux;
- int err;
+ int err, total;
+
+ tprogs = bpf_trampoline_get_progs(tr, &total);
+ if (IS_ERR(tprogs))
+ return PTR_ERR(tprogs);
- if (fentry_cnt + fexit_cnt == 0) {
+ if (total == 0) {
err = unregister_fentry(tr, old_image);
tr->selector = 0;
goto out;
}
- /* populate fentry progs */
- fentry = progs = progs_to_run;
- hlist_for_each_entry(aux, &tr->progs_hlist[BPF_TRAMP_FENTRY], tramp_hlist)
- *progs++ = aux->prog;
-
- /* populate fexit progs */
- fexit = progs;
- hlist_for_each_entry(aux, &tr->progs_hlist[BPF_TRAMP_FEXIT], tramp_hlist)
- *progs++ = aux->prog;
-
- if (fexit_cnt)
+ if (tprogs[BPF_TRAMP_FEXIT].nr_progs ||
+ tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
/* Though the second half of trampoline page is unused a task could be
@@ -232,12 +211,11 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
* preempted task. Hence wait for tasks to voluntarily schedule or go
* to userspace.
*/
+
synchronize_rcu_tasks();
- err = arch_prepare_bpf_trampoline(new_image, new_image + BPF_IMAGE_SIZE / 2,
- &tr->func.model, flags,
- fentry, fentry_cnt,
- fexit, fexit_cnt,
+ err = arch_prepare_bpf_trampoline(new_image, new_image + PAGE_SIZE / 2,
+ &tr->func.model, flags, tprogs,
tr->func.addr);
if (err < 0)
goto out;
@@ -252,16 +230,27 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
goto out;
tr->selector++;
out:
+ kfree(tprogs);
return err;
}
-static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(enum bpf_attach_type t)
+static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
{
- switch (t) {
+ switch (prog->expected_attach_type) {
case BPF_TRACE_FENTRY:
return BPF_TRAMP_FENTRY;
+ case BPF_MODIFY_RETURN:
+ return BPF_TRAMP_MODIFY_RETURN;
case BPF_TRACE_FEXIT:
return BPF_TRAMP_FEXIT;
+ case BPF_LSM_MAC:
+ if (!prog->aux->attach_func_proto->type)
+ /* The function returns void, we cannot modify its
+ * return value.
+ */
+ return BPF_TRAMP_FEXIT;
+ else
+ return BPF_TRAMP_MODIFY_RETURN;
default:
return BPF_TRAMP_REPLACE;
}
@@ -275,7 +264,7 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog)
int cnt;
tr = prog->aux->trampoline;
- kind = bpf_attach_type_to_tramp(prog->expected_attach_type);
+ kind = bpf_attach_type_to_tramp(prog);
mutex_lock(&tr->mutex);
if (tr->extension_prog) {
/* cannot attach fentry/fexit if extension prog is attached.
@@ -325,7 +314,7 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
int err;
tr = prog->aux->trampoline;
- kind = bpf_attach_type_to_tramp(prog->expected_attach_type);
+ kind = bpf_attach_type_to_tramp(prog);
mutex_lock(&tr->mutex);
if (kind == BPF_TRAMP_REPLACE) {
WARN_ON_ONCE(!tr->extension_prog);
@@ -344,8 +333,6 @@ out:
void bpf_trampoline_put(struct bpf_trampoline *tr)
{
- struct bpf_image *image;
-
if (!tr)
return;
mutex_lock(&trampoline_mutex);
@@ -356,35 +343,37 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
goto out;
if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
goto out;
- image = container_of(tr->image, struct bpf_image, data);
- latch_tree_erase(&image->tnode, &image_tree, &image_tree_ops);
+ bpf_image_ksym_del(&tr->ksym);
/* wait for tasks to get out of trampoline before freeing it */
synchronize_rcu_tasks();
- bpf_jit_free_exec(image);
+ bpf_jit_free_exec(tr->image);
hlist_del(&tr->hlist);
kfree(tr);
out:
mutex_unlock(&trampoline_mutex);
}
-/* The logic is similar to BPF_PROG_RUN, but with explicit rcu and preempt that
- * are needed for trampoline. The macro is split into
+/* The logic is similar to BPF_PROG_RUN, but with an explicit
+ * rcu_read_lock() and migrate_disable() which are required
+ * for the trampoline. The macro is split into
* call _bpf_prog_enter
* call prog->bpf_func
* call __bpf_prog_exit
*/
u64 notrace __bpf_prog_enter(void)
+ __acquires(RCU)
{
u64 start = 0;
rcu_read_lock();
- preempt_disable();
+ migrate_disable();
if (static_branch_unlikely(&bpf_stats_enabled_key))
start = sched_clock();
return start;
}
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
+ __releases(RCU)
{
struct bpf_prog_stats *stats;
@@ -401,15 +390,14 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
stats->nsecs += sched_clock() - start;
u64_stats_update_end(&stats->syncp);
}
- preempt_enable();
+ migrate_enable();
rcu_read_unlock();
}
int __weak
arch_prepare_bpf_trampoline(void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
- struct bpf_prog **fentry_progs, int fentry_cnt,
- struct bpf_prog **fexit_progs, int fexit_cnt,
+ struct bpf_tramp_progs *tprogs,
void *orig_call)
{
return -ENOTSUPP;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 1cc945daa9c8..04c6630cc18f 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -19,6 +19,8 @@
#include <linux/sort.h>
#include <linux/perf_event.h>
#include <linux/ctype.h>
+#include <linux/error-injection.h>
+#include <linux/bpf_lsm.h>
#include "disasm.h"
@@ -227,8 +229,7 @@ struct bpf_call_arg_meta {
bool pkt_access;
int regno;
int access_size;
- s64 msize_smax_value;
- u64 msize_umax_value;
+ u64 msize_max_value;
int ref_obj_id;
int func_id;
u32 btf_id;
@@ -549,6 +550,22 @@ static void print_verifier_state(struct bpf_verifier_env *env,
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, ",var_off=%s", tn_buf);
}
+ if (reg->s32_min_value != reg->smin_value &&
+ reg->s32_min_value != S32_MIN)
+ verbose(env, ",s32_min_value=%d",
+ (int)(reg->s32_min_value));
+ if (reg->s32_max_value != reg->smax_value &&
+ reg->s32_max_value != S32_MAX)
+ verbose(env, ",s32_max_value=%d",
+ (int)(reg->s32_max_value));
+ if (reg->u32_min_value != reg->umin_value &&
+ reg->u32_min_value != U32_MIN)
+ verbose(env, ",u32_min_value=%d",
+ (int)(reg->u32_min_value));
+ if (reg->u32_max_value != reg->umax_value &&
+ reg->u32_max_value != U32_MAX)
+ verbose(env, ",u32_max_value=%d",
+ (int)(reg->u32_max_value));
}
verbose(env, ")");
}
@@ -923,6 +940,20 @@ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
reg->smax_value = (s64)imm;
reg->umin_value = imm;
reg->umax_value = imm;
+
+ reg->s32_min_value = (s32)imm;
+ reg->s32_max_value = (s32)imm;
+ reg->u32_min_value = (u32)imm;
+ reg->u32_max_value = (u32)imm;
+}
+
+static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
+{
+ reg->var_off = tnum_const_subreg(reg->var_off, imm);
+ reg->s32_min_value = (s32)imm;
+ reg->s32_max_value = (s32)imm;
+ reg->u32_min_value = (u32)imm;
+ reg->u32_max_value = (u32)imm;
}
/* Mark the 'variable offset' part of a register as zero. This should be
@@ -977,8 +1008,52 @@ static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
tnum_equals_const(reg->var_off, 0);
}
-/* Attempts to improve min/max values based on var_off information */
-static void __update_reg_bounds(struct bpf_reg_state *reg)
+/* Reset the min/max bounds of a register */
+static void __mark_reg_unbounded(struct bpf_reg_state *reg)
+{
+ reg->smin_value = S64_MIN;
+ reg->smax_value = S64_MAX;
+ reg->umin_value = 0;
+ reg->umax_value = U64_MAX;
+
+ reg->s32_min_value = S32_MIN;
+ reg->s32_max_value = S32_MAX;
+ reg->u32_min_value = 0;
+ reg->u32_max_value = U32_MAX;
+}
+
+static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
+{
+ reg->smin_value = S64_MIN;
+ reg->smax_value = S64_MAX;
+ reg->umin_value = 0;
+ reg->umax_value = U64_MAX;
+}
+
+static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
+{
+ reg->s32_min_value = S32_MIN;
+ reg->s32_max_value = S32_MAX;
+ reg->u32_min_value = 0;
+ reg->u32_max_value = U32_MAX;
+}
+
+static void __update_reg32_bounds(struct bpf_reg_state *reg)
+{
+ struct tnum var32_off = tnum_subreg(reg->var_off);
+
+ /* min signed is max(sign bit) | min(other bits) */
+ reg->s32_min_value = max_t(s32, reg->s32_min_value,
+ var32_off.value | (var32_off.mask & S32_MIN));
+ /* max signed is min(sign bit) | max(other bits) */
+ reg->s32_max_value = min_t(s32, reg->s32_max_value,
+ var32_off.value | (var32_off.mask & S32_MAX));
+ reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
+ reg->u32_max_value = min(reg->u32_max_value,
+ (u32)(var32_off.value | var32_off.mask));
+}
+
+static void __update_reg64_bounds(struct bpf_reg_state *reg)
{
/* min signed is max(sign bit) | min(other bits) */
reg->smin_value = max_t(s64, reg->smin_value,
@@ -991,8 +1066,48 @@ static void __update_reg_bounds(struct bpf_reg_state *reg)
reg->var_off.value | reg->var_off.mask);
}
+static void __update_reg_bounds(struct bpf_reg_state *reg)
+{
+ __update_reg32_bounds(reg);
+ __update_reg64_bounds(reg);
+}
+
/* Uses signed min/max values to inform unsigned, and vice-versa */
-static void __reg_deduce_bounds(struct bpf_reg_state *reg)
+static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
+{
+ /* Learn sign from signed bounds.
+ * If we cannot cross the sign boundary, then signed and unsigned bounds
+ * are the same, so combine. This works even in the negative case, e.g.
+ * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
+ */
+ if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
+ reg->s32_min_value = reg->u32_min_value =
+ max_t(u32, reg->s32_min_value, reg->u32_min_value);
+ reg->s32_max_value = reg->u32_max_value =
+ min_t(u32, reg->s32_max_value, reg->u32_max_value);
+ return;
+ }
+ /* Learn sign from unsigned bounds. Signed bounds cross the sign
+ * boundary, so we must be careful.
+ */
+ if ((s32)reg->u32_max_value >= 0) {
+ /* Positive. We can't learn anything from the smin, but smax
+ * is positive, hence safe.
+ */
+ reg->s32_min_value = reg->u32_min_value;
+ reg->s32_max_value = reg->u32_max_value =
+ min_t(u32, reg->s32_max_value, reg->u32_max_value);
+ } else if ((s32)reg->u32_min_value < 0) {
+ /* Negative. We can't learn anything from the smax, but smin
+ * is negative, hence safe.
+ */
+ reg->s32_min_value = reg->u32_min_value =
+ max_t(u32, reg->s32_min_value, reg->u32_min_value);
+ reg->s32_max_value = reg->u32_max_value;
+ }
+}
+
+static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
{
/* Learn sign from signed bounds.
* If we cannot cross the sign boundary, then signed and unsigned bounds
@@ -1026,32 +1141,106 @@ static void __reg_deduce_bounds(struct bpf_reg_state *reg)
}
}
+static void __reg_deduce_bounds(struct bpf_reg_state *reg)
+{
+ __reg32_deduce_bounds(reg);
+ __reg64_deduce_bounds(reg);
+}
+
/* Attempts to improve var_off based on unsigned min/max information */
static void __reg_bound_offset(struct bpf_reg_state *reg)
{
- reg->var_off = tnum_intersect(reg->var_off,
- tnum_range(reg->umin_value,
- reg->umax_value));
+ struct tnum var64_off = tnum_intersect(reg->var_off,
+ tnum_range(reg->umin_value,
+ reg->umax_value));
+ struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
+ tnum_range(reg->u32_min_value,
+ reg->u32_max_value));
+
+ reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
+}
+
+static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
+{
+ reg->umin_value = reg->u32_min_value;
+ reg->umax_value = reg->u32_max_value;
+ /* Attempt to pull 32-bit signed bounds into 64-bit bounds
+ * but must be positive otherwise set to worse case bounds
+ * and refine later from tnum.
+ */
+ if (reg->s32_min_value > 0)
+ reg->smin_value = reg->s32_min_value;
+ else
+ reg->smin_value = 0;
+ if (reg->s32_max_value > 0)
+ reg->smax_value = reg->s32_max_value;
+ else
+ reg->smax_value = U32_MAX;
}
-static void __reg_bound_offset32(struct bpf_reg_state *reg)
+static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
{
- u64 mask = 0xffffFFFF;
- struct tnum range = tnum_range(reg->umin_value & mask,
- reg->umax_value & mask);
- struct tnum lo32 = tnum_cast(reg->var_off, 4);
- struct tnum hi32 = tnum_lshift(tnum_rshift(reg->var_off, 32), 32);
+ /* special case when 64-bit register has upper 32-bit register
+ * zeroed. Typically happens after zext or <<32, >>32 sequence
+ * allowing us to use 32-bit bounds directly,
+ */
+ if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
+ __reg_assign_32_into_64(reg);
+ } else {
+ /* Otherwise the best we can do is push lower 32bit known and
+ * unknown bits into register (var_off set from jmp logic)
+ * then learn as much as possible from the 64-bit tnum
+ * known and unknown bits. The previous smin/smax bounds are
+ * invalid here because of jmp32 compare so mark them unknown
+ * so they do not impact tnum bounds calculation.
+ */
+ __mark_reg64_unbounded(reg);
+ __update_reg_bounds(reg);
+ }
- reg->var_off = tnum_or(hi32, tnum_intersect(lo32, range));
+ /* Intersecting with the old var_off might have improved our bounds
+ * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
+ * then new var_off is (0; 0x7f...fc) which improves our umax.
+ */
+ __reg_deduce_bounds(reg);
+ __reg_bound_offset(reg);
+ __update_reg_bounds(reg);
}
-/* Reset the min/max bounds of a register */
-static void __mark_reg_unbounded(struct bpf_reg_state *reg)
+static bool __reg64_bound_s32(s64 a)
{
- reg->smin_value = S64_MIN;
- reg->smax_value = S64_MAX;
- reg->umin_value = 0;
- reg->umax_value = U64_MAX;
+ if (a > S32_MIN && a < S32_MAX)
+ return true;
+ return false;
+}
+
+static bool __reg64_bound_u32(u64 a)
+{
+ if (a > U32_MIN && a < U32_MAX)
+ return true;
+ return false;
+}
+
+static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
+{
+ __mark_reg32_unbounded(reg);
+
+ if (__reg64_bound_s32(reg->smin_value))
+ reg->s32_min_value = (s32)reg->smin_value;
+ if (__reg64_bound_s32(reg->smax_value))
+ reg->s32_max_value = (s32)reg->smax_value;
+ if (__reg64_bound_u32(reg->umin_value))
+ reg->u32_min_value = (u32)reg->umin_value;
+ if (__reg64_bound_u32(reg->umax_value))
+ reg->u32_max_value = (u32)reg->umax_value;
+
+ /* Intersecting with the old var_off might have improved our bounds
+ * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
+ * then new var_off is (0; 0x7f...fc) which improves our umax.
+ */
+ __reg_deduce_bounds(reg);
+ __reg_bound_offset(reg);
+ __update_reg_bounds(reg);
}
/* Mark a register as having a completely unknown (scalar) value. */
@@ -2784,6 +2973,12 @@ static int check_tp_buffer_access(struct bpf_verifier_env *env,
return 0;
}
+/* BPF architecture zero extends alu32 ops into 64-bit registesr */
+static void zext_32_to_64(struct bpf_reg_state *reg)
+{
+ reg->var_off = tnum_subreg(reg->var_off);
+ __reg_assign_32_into_64(reg);
+}
/* truncate register to smaller size (in bytes)
* must be called with size < BPF_REG_SIZE
@@ -2806,6 +3001,14 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
}
reg->smin_value = reg->umin_value;
reg->smax_value = reg->umax_value;
+
+ /* If size is smaller than 32bit register the 32bit register
+ * values are also truncated so we push 64-bit bounds into
+ * 32-bit bounds. Above were truncated < 32-bits already.
+ */
+ if (size >= 4)
+ return;
+ __reg_combine_64_into_32(reg);
}
static bool bpf_map_is_rdonly(const struct bpf_map *map)
@@ -3460,13 +3663,17 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
expected_type = CONST_PTR_TO_MAP;
if (type != expected_type)
goto err_type;
- } else if (arg_type == ARG_PTR_TO_CTX) {
+ } else if (arg_type == ARG_PTR_TO_CTX ||
+ arg_type == ARG_PTR_TO_CTX_OR_NULL) {
expected_type = PTR_TO_CTX;
- if (type != expected_type)
- goto err_type;
- err = check_ctx_reg(env, reg, regno);
- if (err < 0)
- return err;
+ if (!(register_is_null(reg) &&
+ arg_type == ARG_PTR_TO_CTX_OR_NULL)) {
+ if (type != expected_type)
+ goto err_type;
+ err = check_ctx_reg(env, reg, regno);
+ if (err < 0)
+ return err;
+ }
} else if (arg_type == ARG_PTR_TO_SOCK_COMMON) {
expected_type = PTR_TO_SOCK_COMMON;
/* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
@@ -3576,11 +3783,15 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
} else if (arg_type_is_mem_size(arg_type)) {
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
- /* remember the mem_size which may be used later
- * to refine return values.
+ /* This is used to refine r0 return value bounds for helpers
+ * that enforce this value as an upper bound on return values.
+ * See do_refine_retval_range() for helpers that can refine
+ * the return value. C type of helper is u32 so we pull register
+ * bound from umax_value however, if negative verifier errors
+ * out. Only upper bounds can be learned because retval is an
+ * int type and negative retvals are allowed.
*/
- meta->msize_smax_value = reg->smax_value;
- meta->msize_umax_value = reg->umax_value;
+ meta->msize_max_value = reg->umax_value;
/* The register is SCALAR_VALUE; the access check
* happens using its boundaries.
@@ -3649,7 +3860,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
if (func_id != BPF_FUNC_perf_event_read &&
func_id != BPF_FUNC_perf_event_output &&
func_id != BPF_FUNC_skb_output &&
- func_id != BPF_FUNC_perf_event_read_value)
+ func_id != BPF_FUNC_perf_event_read_value &&
+ func_id != BPF_FUNC_xdp_output)
goto error;
break;
case BPF_MAP_TYPE_STACK_TRACE:
@@ -3693,14 +3905,16 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
if (func_id != BPF_FUNC_sk_redirect_map &&
func_id != BPF_FUNC_sock_map_update &&
func_id != BPF_FUNC_map_delete_elem &&
- func_id != BPF_FUNC_msg_redirect_map)
+ func_id != BPF_FUNC_msg_redirect_map &&
+ func_id != BPF_FUNC_sk_select_reuseport)
goto error;
break;
case BPF_MAP_TYPE_SOCKHASH:
if (func_id != BPF_FUNC_sk_redirect_hash &&
func_id != BPF_FUNC_sock_hash_update &&
func_id != BPF_FUNC_map_delete_elem &&
- func_id != BPF_FUNC_msg_redirect_hash)
+ func_id != BPF_FUNC_msg_redirect_hash &&
+ func_id != BPF_FUNC_sk_select_reuseport)
goto error;
break;
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
@@ -3737,6 +3951,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_FUNC_perf_event_output:
case BPF_FUNC_perf_event_read_value:
case BPF_FUNC_skb_output:
+ case BPF_FUNC_xdp_output:
if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
goto error;
break;
@@ -3774,7 +3989,9 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
goto error;
break;
case BPF_FUNC_sk_select_reuseport:
- if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
+ if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
+ map->map_type != BPF_MAP_TYPE_SOCKMAP &&
+ map->map_type != BPF_MAP_TYPE_SOCKHASH)
goto error;
break;
case BPF_FUNC_map_peek_elem:
@@ -4117,10 +4334,11 @@ static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
func_id != BPF_FUNC_probe_read_str))
return;
- ret_reg->smax_value = meta->msize_smax_value;
- ret_reg->umax_value = meta->msize_umax_value;
+ ret_reg->smax_value = meta->msize_max_value;
+ ret_reg->s32_max_value = meta->msize_max_value;
__reg_deduce_bounds(ret_reg);
__reg_bound_offset(ret_reg);
+ __update_reg_bounds(ret_reg);
}
static int
@@ -4427,7 +4645,17 @@ static bool signed_add_overflows(s64 a, s64 b)
return res < a;
}
-static bool signed_sub_overflows(s64 a, s64 b)
+static bool signed_add32_overflows(s64 a, s64 b)
+{
+ /* Do the add in u32, where overflow is well-defined */
+ s32 res = (s32)((u32)a + (u32)b);
+
+ if (b < 0)
+ return res > a;
+ return res < a;
+}
+
+static bool signed_sub_overflows(s32 a, s32 b)
{
/* Do the sub in u64, where overflow is well-defined */
s64 res = (s64)((u64)a - (u64)b);
@@ -4437,6 +4665,16 @@ static bool signed_sub_overflows(s64 a, s64 b)
return res > a;
}
+static bool signed_sub32_overflows(s32 a, s32 b)
+{
+ /* Do the sub in u64, where overflow is well-defined */
+ s32 res = (s32)((u32)a - (u32)b);
+
+ if (b < 0)
+ return res < a;
+ return res > a;
+}
+
static bool check_reg_sane_offset(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
enum bpf_reg_type type)
@@ -4673,6 +4911,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
!check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
return -EINVAL;
+ /* pointer types do not carry 32-bit bounds at the moment. */
+ __mark_reg32_unbounded(dst_reg);
+
switch (opcode) {
case BPF_ADD:
ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
@@ -4836,6 +5077,518 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
return 0;
}
+static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ s32 smin_val = src_reg->s32_min_value;
+ s32 smax_val = src_reg->s32_max_value;
+ u32 umin_val = src_reg->u32_min_value;
+ u32 umax_val = src_reg->u32_max_value;
+
+ if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
+ signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
+ dst_reg->s32_min_value = S32_MIN;
+ dst_reg->s32_max_value = S32_MAX;
+ } else {
+ dst_reg->s32_min_value += smin_val;
+ dst_reg->s32_max_value += smax_val;
+ }
+ if (dst_reg->u32_min_value + umin_val < umin_val ||
+ dst_reg->u32_max_value + umax_val < umax_val) {
+ dst_reg->u32_min_value = 0;
+ dst_reg->u32_max_value = U32_MAX;
+ } else {
+ dst_reg->u32_min_value += umin_val;
+ dst_reg->u32_max_value += umax_val;
+ }
+}
+
+static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ s64 smin_val = src_reg->smin_value;
+ s64 smax_val = src_reg->smax_value;
+ u64 umin_val = src_reg->umin_value;
+ u64 umax_val = src_reg->umax_value;
+
+ if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
+ signed_add_overflows(dst_reg->smax_value, smax_val)) {
+ dst_reg->smin_value = S64_MIN;
+ dst_reg->smax_value = S64_MAX;
+ } else {
+ dst_reg->smin_value += smin_val;
+ dst_reg->smax_value += smax_val;
+ }
+ if (dst_reg->umin_value + umin_val < umin_val ||
+ dst_reg->umax_value + umax_val < umax_val) {
+ dst_reg->umin_value = 0;
+ dst_reg->umax_value = U64_MAX;
+ } else {
+ dst_reg->umin_value += umin_val;
+ dst_reg->umax_value += umax_val;
+ }
+}
+
+static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ s32 smin_val = src_reg->s32_min_value;
+ s32 smax_val = src_reg->s32_max_value;
+ u32 umin_val = src_reg->u32_min_value;
+ u32 umax_val = src_reg->u32_max_value;
+
+ if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
+ signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
+ /* Overflow possible, we know nothing */
+ dst_reg->s32_min_value = S32_MIN;
+ dst_reg->s32_max_value = S32_MAX;
+ } else {
+ dst_reg->s32_min_value -= smax_val;
+ dst_reg->s32_max_value -= smin_val;
+ }
+ if (dst_reg->u32_min_value < umax_val) {
+ /* Overflow possible, we know nothing */
+ dst_reg->u32_min_value = 0;
+ dst_reg->u32_max_value = U32_MAX;
+ } else {
+ /* Cannot overflow (as long as bounds are consistent) */
+ dst_reg->u32_min_value -= umax_val;
+ dst_reg->u32_max_value -= umin_val;
+ }
+}
+
+static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ s64 smin_val = src_reg->smin_value;
+ s64 smax_val = src_reg->smax_value;
+ u64 umin_val = src_reg->umin_value;
+ u64 umax_val = src_reg->umax_value;
+
+ if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
+ signed_sub_overflows(dst_reg->smax_value, smin_val)) {
+ /* Overflow possible, we know nothing */
+ dst_reg->smin_value = S64_MIN;
+ dst_reg->smax_value = S64_MAX;
+ } else {
+ dst_reg->smin_value -= smax_val;
+ dst_reg->smax_value -= smin_val;
+ }
+ if (dst_reg->umin_value < umax_val) {
+ /* Overflow possible, we know nothing */
+ dst_reg->umin_value = 0;
+ dst_reg->umax_value = U64_MAX;
+ } else {
+ /* Cannot overflow (as long as bounds are consistent) */
+ dst_reg->umin_value -= umax_val;
+ dst_reg->umax_value -= umin_val;
+ }
+}
+
+static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ s32 smin_val = src_reg->s32_min_value;
+ u32 umin_val = src_reg->u32_min_value;
+ u32 umax_val = src_reg->u32_max_value;
+
+ if (smin_val < 0 || dst_reg->s32_min_value < 0) {
+ /* Ain't nobody got time to multiply that sign */
+ __mark_reg32_unbounded(dst_reg);
+ return;
+ }
+ /* Both values are positive, so we can work with unsigned and
+ * copy the result to signed (unless it exceeds S32_MAX).
+ */
+ if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
+ /* Potential overflow, we know nothing */
+ __mark_reg32_unbounded(dst_reg);
+ return;
+ }
+ dst_reg->u32_min_value *= umin_val;
+ dst_reg->u32_max_value *= umax_val;
+ if (dst_reg->u32_max_value > S32_MAX) {
+ /* Overflow possible, we know nothing */
+ dst_reg->s32_min_value = S32_MIN;
+ dst_reg->s32_max_value = S32_MAX;
+ } else {
+ dst_reg->s32_min_value = dst_reg->u32_min_value;
+ dst_reg->s32_max_value = dst_reg->u32_max_value;
+ }
+}
+
+static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ s64 smin_val = src_reg->smin_value;
+ u64 umin_val = src_reg->umin_value;
+ u64 umax_val = src_reg->umax_value;
+
+ if (smin_val < 0 || dst_reg->smin_value < 0) {
+ /* Ain't nobody got time to multiply that sign */
+ __mark_reg64_unbounded(dst_reg);
+ return;
+ }
+ /* Both values are positive, so we can work with unsigned and
+ * copy the result to signed (unless it exceeds S64_MAX).
+ */
+ if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
+ /* Potential overflow, we know nothing */
+ __mark_reg64_unbounded(dst_reg);
+ return;
+ }
+ dst_reg->umin_value *= umin_val;
+ dst_reg->umax_value *= umax_val;
+ if (dst_reg->umax_value > S64_MAX) {
+ /* Overflow possible, we know nothing */
+ dst_reg->smin_value = S64_MIN;
+ dst_reg->smax_value = S64_MAX;
+ } else {
+ dst_reg->smin_value = dst_reg->umin_value;
+ dst_reg->smax_value = dst_reg->umax_value;
+ }
+}
+
+static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ bool src_known = tnum_subreg_is_const(src_reg->var_off);
+ bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
+ struct tnum var32_off = tnum_subreg(dst_reg->var_off);
+ s32 smin_val = src_reg->s32_min_value;
+ u32 umax_val = src_reg->u32_max_value;
+
+ /* Assuming scalar64_min_max_and will be called so its safe
+ * to skip updating register for known 32-bit case.
+ */
+ if (src_known && dst_known)
+ return;
+
+ /* We get our minimum from the var_off, since that's inherently
+ * bitwise. Our maximum is the minimum of the operands' maxima.
+ */
+ dst_reg->u32_min_value = var32_off.value;
+ dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
+ if (dst_reg->s32_min_value < 0 || smin_val < 0) {
+ /* Lose signed bounds when ANDing negative numbers,
+ * ain't nobody got time for that.
+ */
+ dst_reg->s32_min_value = S32_MIN;
+ dst_reg->s32_max_value = S32_MAX;
+ } else {
+ /* ANDing two positives gives a positive, so safe to
+ * cast result into s64.
+ */
+ dst_reg->s32_min_value = dst_reg->u32_min_value;
+ dst_reg->s32_max_value = dst_reg->u32_max_value;
+ }
+
+}
+
+static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ bool src_known = tnum_is_const(src_reg->var_off);
+ bool dst_known = tnum_is_const(dst_reg->var_off);
+ s64 smin_val = src_reg->smin_value;
+ u64 umax_val = src_reg->umax_value;
+
+ if (src_known && dst_known) {
+ __mark_reg_known(dst_reg, dst_reg->var_off.value &
+ src_reg->var_off.value);
+ return;
+ }
+
+ /* We get our minimum from the var_off, since that's inherently
+ * bitwise. Our maximum is the minimum of the operands' maxima.
+ */
+ dst_reg->umin_value = dst_reg->var_off.value;
+ dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
+ if (dst_reg->smin_value < 0 || smin_val < 0) {
+ /* Lose signed bounds when ANDing negative numbers,
+ * ain't nobody got time for that.
+ */
+ dst_reg->smin_value = S64_MIN;
+ dst_reg->smax_value = S64_MAX;
+ } else {
+ /* ANDing two positives gives a positive, so safe to
+ * cast result into s64.
+ */
+ dst_reg->smin_value = dst_reg->umin_value;
+ dst_reg->smax_value = dst_reg->umax_value;
+ }
+ /* We may learn something more from the var_off */
+ __update_reg_bounds(dst_reg);
+}
+
+static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ bool src_known = tnum_subreg_is_const(src_reg->var_off);
+ bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
+ struct tnum var32_off = tnum_subreg(dst_reg->var_off);
+ s32 smin_val = src_reg->smin_value;
+ u32 umin_val = src_reg->umin_value;
+
+ /* Assuming scalar64_min_max_or will be called so it is safe
+ * to skip updating register for known case.
+ */
+ if (src_known && dst_known)
+ return;
+
+ /* We get our maximum from the var_off, and our minimum is the
+ * maximum of the operands' minima
+ */
+ dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
+ dst_reg->u32_max_value = var32_off.value | var32_off.mask;
+ if (dst_reg->s32_min_value < 0 || smin_val < 0) {
+ /* Lose signed bounds when ORing negative numbers,
+ * ain't nobody got time for that.
+ */
+ dst_reg->s32_min_value = S32_MIN;
+ dst_reg->s32_max_value = S32_MAX;
+ } else {
+ /* ORing two positives gives a positive, so safe to
+ * cast result into s64.
+ */
+ dst_reg->s32_min_value = dst_reg->umin_value;
+ dst_reg->s32_max_value = dst_reg->umax_value;
+ }
+}
+
+static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ bool src_known = tnum_is_const(src_reg->var_off);
+ bool dst_known = tnum_is_const(dst_reg->var_off);
+ s64 smin_val = src_reg->smin_value;
+ u64 umin_val = src_reg->umin_value;
+
+ if (src_known && dst_known) {
+ __mark_reg_known(dst_reg, dst_reg->var_off.value |
+ src_reg->var_off.value);
+ return;
+ }
+
+ /* We get our maximum from the var_off, and our minimum is the
+ * maximum of the operands' minima
+ */
+ dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
+ dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
+ if (dst_reg->smin_value < 0 || smin_val < 0) {
+ /* Lose signed bounds when ORing negative numbers,
+ * ain't nobody got time for that.
+ */
+ dst_reg->smin_value = S64_MIN;
+ dst_reg->smax_value = S64_MAX;
+ } else {
+ /* ORing two positives gives a positive, so safe to
+ * cast result into s64.
+ */
+ dst_reg->smin_value = dst_reg->umin_value;
+ dst_reg->smax_value = dst_reg->umax_value;
+ }
+ /* We may learn something more from the var_off */
+ __update_reg_bounds(dst_reg);
+}
+
+static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
+ u64 umin_val, u64 umax_val)
+{
+ /* We lose all sign bit information (except what we can pick
+ * up from var_off)
+ */
+ dst_reg->s32_min_value = S32_MIN;
+ dst_reg->s32_max_value = S32_MAX;
+ /* If we might shift our top bit out, then we know nothing */
+ if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
+ dst_reg->u32_min_value = 0;
+ dst_reg->u32_max_value = U32_MAX;
+ } else {
+ dst_reg->u32_min_value <<= umin_val;
+ dst_reg->u32_max_value <<= umax_val;
+ }
+}
+
+static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ u32 umax_val = src_reg->u32_max_value;
+ u32 umin_val = src_reg->u32_min_value;
+ /* u32 alu operation will zext upper bits */
+ struct tnum subreg = tnum_subreg(dst_reg->var_off);
+
+ __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
+ dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
+ /* Not required but being careful mark reg64 bounds as unknown so
+ * that we are forced to pick them up from tnum and zext later and
+ * if some path skips this step we are still safe.
+ */
+ __mark_reg64_unbounded(dst_reg);
+ __update_reg32_bounds(dst_reg);
+}
+
+static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
+ u64 umin_val, u64 umax_val)
+{
+ /* Special case <<32 because it is a common compiler pattern to sign
+ * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
+ * positive we know this shift will also be positive so we can track
+ * bounds correctly. Otherwise we lose all sign bit information except
+ * what we can pick up from var_off. Perhaps we can generalize this
+ * later to shifts of any length.
+ */
+ if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
+ dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
+ else
+ dst_reg->smax_value = S64_MAX;
+
+ if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
+ dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
+ else
+ dst_reg->smin_value = S64_MIN;
+
+ /* If we might shift our top bit out, then we know nothing */
+ if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
+ dst_reg->umin_value = 0;
+ dst_reg->umax_value = U64_MAX;
+ } else {
+ dst_reg->umin_value <<= umin_val;
+ dst_reg->umax_value <<= umax_val;
+ }
+}
+
+static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ u64 umax_val = src_reg->umax_value;
+ u64 umin_val = src_reg->umin_value;
+
+ /* scalar64 calc uses 32bit unshifted bounds so must be called first */
+ __scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
+ __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
+
+ dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
+ /* We may learn something more from the var_off */
+ __update_reg_bounds(dst_reg);
+}
+
+static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ struct tnum subreg = tnum_subreg(dst_reg->var_off);
+ u32 umax_val = src_reg->u32_max_value;
+ u32 umin_val = src_reg->u32_min_value;
+
+ /* BPF_RSH is an unsigned shift. If the value in dst_reg might
+ * be negative, then either:
+ * 1) src_reg might be zero, so the sign bit of the result is
+ * unknown, so we lose our signed bounds
+ * 2) it's known negative, thus the unsigned bounds capture the
+ * signed bounds
+ * 3) the signed bounds cross zero, so they tell us nothing
+ * about the result
+ * If the value in dst_reg is known nonnegative, then again the
+ * unsigned bounts capture the signed bounds.
+ * Thus, in all cases it suffices to blow away our signed bounds
+ * and rely on inferring new ones from the unsigned bounds and
+ * var_off of the result.
+ */
+ dst_reg->s32_min_value = S32_MIN;
+ dst_reg->s32_max_value = S32_MAX;
+
+ dst_reg->var_off = tnum_rshift(subreg, umin_val);
+ dst_reg->u32_min_value >>= umax_val;
+ dst_reg->u32_max_value >>= umin_val;
+
+ __mark_reg64_unbounded(dst_reg);
+ __update_reg32_bounds(dst_reg);
+}
+
+static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ u64 umax_val = src_reg->umax_value;
+ u64 umin_val = src_reg->umin_value;
+
+ /* BPF_RSH is an unsigned shift. If the value in dst_reg might
+ * be negative, then either:
+ * 1) src_reg might be zero, so the sign bit of the result is
+ * unknown, so we lose our signed bounds
+ * 2) it's known negative, thus the unsigned bounds capture the
+ * signed bounds
+ * 3) the signed bounds cross zero, so they tell us nothing
+ * about the result
+ * If the value in dst_reg is known nonnegative, then again the
+ * unsigned bounts capture the signed bounds.
+ * Thus, in all cases it suffices to blow away our signed bounds
+ * and rely on inferring new ones from the unsigned bounds and
+ * var_off of the result.
+ */
+ dst_reg->smin_value = S64_MIN;
+ dst_reg->smax_value = S64_MAX;
+ dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
+ dst_reg->umin_value >>= umax_val;
+ dst_reg->umax_value >>= umin_val;
+
+ /* Its not easy to operate on alu32 bounds here because it depends
+ * on bits being shifted in. Take easy way out and mark unbounded
+ * so we can recalculate later from tnum.
+ */
+ __mark_reg32_unbounded(dst_reg);
+ __update_reg_bounds(dst_reg);
+}
+
+static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ u64 umin_val = src_reg->u32_min_value;
+
+ /* Upon reaching here, src_known is true and
+ * umax_val is equal to umin_val.
+ */
+ dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
+ dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
+
+ dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
+
+ /* blow away the dst_reg umin_value/umax_value and rely on
+ * dst_reg var_off to refine the result.
+ */
+ dst_reg->u32_min_value = 0;
+ dst_reg->u32_max_value = U32_MAX;
+
+ __mark_reg64_unbounded(dst_reg);
+ __update_reg32_bounds(dst_reg);
+}
+
+static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ u64 umin_val = src_reg->umin_value;
+
+ /* Upon reaching here, src_known is true and umax_val is equal
+ * to umin_val.
+ */
+ dst_reg->smin_value >>= umin_val;
+ dst_reg->smax_value >>= umin_val;
+
+ dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
+
+ /* blow away the dst_reg umin_value/umax_value and rely on
+ * dst_reg var_off to refine the result.
+ */
+ dst_reg->umin_value = 0;
+ dst_reg->umax_value = U64_MAX;
+
+ /* Its not easy to operate on alu32 bounds here because it depends
+ * on bits being shifted in from upper 32-bits. Take easy way out
+ * and mark unbounded so we can recalculate later from tnum.
+ */
+ __mark_reg32_unbounded(dst_reg);
+ __update_reg_bounds(dst_reg);
+}
+
/* WARNING: This function does calculations on 64-bit values, but the actual
* execution may occur on 32-bit values. Therefore, things like bitshifts
* need extra checks in the 32-bit case.
@@ -4850,33 +5603,47 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
bool src_known, dst_known;
s64 smin_val, smax_val;
u64 umin_val, umax_val;
+ s32 s32_min_val, s32_max_val;
+ u32 u32_min_val, u32_max_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
u32 dst = insn->dst_reg;
int ret;
-
- if (insn_bitness == 32) {
- /* Relevant for 32-bit RSH: Information can propagate towards
- * LSB, so it isn't sufficient to only truncate the output to
- * 32 bits.
- */
- coerce_reg_to_size(dst_reg, 4);
- coerce_reg_to_size(&src_reg, 4);
- }
+ bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
umin_val = src_reg.umin_value;
umax_val = src_reg.umax_value;
- src_known = tnum_is_const(src_reg.var_off);
- dst_known = tnum_is_const(dst_reg->var_off);
- if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
- smin_val > smax_val || umin_val > umax_val) {
- /* Taint dst register if offset had invalid bounds derived from
- * e.g. dead branches.
- */
- __mark_reg_unknown(env, dst_reg);
- return 0;
+ s32_min_val = src_reg.s32_min_value;
+ s32_max_val = src_reg.s32_max_value;
+ u32_min_val = src_reg.u32_min_value;
+ u32_max_val = src_reg.u32_max_value;
+
+ if (alu32) {
+ src_known = tnum_subreg_is_const(src_reg.var_off);
+ dst_known = tnum_subreg_is_const(dst_reg->var_off);
+ if ((src_known &&
+ (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
+ s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
+ /* Taint dst register if offset had invalid bounds
+ * derived from e.g. dead branches.
+ */
+ __mark_reg_unknown(env, dst_reg);
+ return 0;
+ }
+ } else {
+ src_known = tnum_is_const(src_reg.var_off);
+ dst_known = tnum_is_const(dst_reg->var_off);
+ if ((src_known &&
+ (smin_val != smax_val || umin_val != umax_val)) ||
+ smin_val > smax_val || umin_val > umax_val) {
+ /* Taint dst register if offset had invalid bounds
+ * derived from e.g. dead branches.
+ */
+ __mark_reg_unknown(env, dst_reg);
+ return 0;
+ }
}
if (!src_known &&
@@ -4885,6 +5652,20 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
return 0;
}
+ /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
+ * There are two classes of instructions: The first class we track both
+ * alu32 and alu64 sign/unsigned bounds independently this provides the
+ * greatest amount of precision when alu operations are mixed with jmp32
+ * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
+ * and BPF_OR. This is possible because these ops have fairly easy to
+ * understand and calculate behavior in both 32-bit and 64-bit alu ops.
+ * See alu32 verifier tests for examples. The second class of
+ * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
+ * with regards to tracking sign/unsigned bounds because the bits may
+ * cross subreg boundaries in the alu64 case. When this happens we mark
+ * the reg unbounded in the subreg bound space and use the resulting
+ * tnum to calculate an approximation of the sign/unsigned bounds.
+ */
switch (opcode) {
case BPF_ADD:
ret = sanitize_val_alu(env, insn);
@@ -4892,22 +5673,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
return ret;
}
- if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
- signed_add_overflows(dst_reg->smax_value, smax_val)) {
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
- } else {
- dst_reg->smin_value += smin_val;
- dst_reg->smax_value += smax_val;
- }
- if (dst_reg->umin_value + umin_val < umin_val ||
- dst_reg->umax_value + umax_val < umax_val) {
- dst_reg->umin_value = 0;
- dst_reg->umax_value = U64_MAX;
- } else {
- dst_reg->umin_value += umin_val;
- dst_reg->umax_value += umax_val;
- }
+ scalar32_min_max_add(dst_reg, &src_reg);
+ scalar_min_max_add(dst_reg, &src_reg);
dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
break;
case BPF_SUB:
@@ -4916,111 +5683,24 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
return ret;
}
- if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
- signed_sub_overflows(dst_reg->smax_value, smin_val)) {
- /* Overflow possible, we know nothing */
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
- } else {
- dst_reg->smin_value -= smax_val;
- dst_reg->smax_value -= smin_val;
- }
- if (dst_reg->umin_value < umax_val) {
- /* Overflow possible, we know nothing */
- dst_reg->umin_value = 0;
- dst_reg->umax_value = U64_MAX;
- } else {
- /* Cannot overflow (as long as bounds are consistent) */
- dst_reg->umin_value -= umax_val;
- dst_reg->umax_value -= umin_val;
- }
+ scalar32_min_max_sub(dst_reg, &src_reg);
+ scalar_min_max_sub(dst_reg, &src_reg);
dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
break;
case BPF_MUL:
dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
- if (smin_val < 0 || dst_reg->smin_value < 0) {
- /* Ain't nobody got time to multiply that sign */
- __mark_reg_unbounded(dst_reg);
- __update_reg_bounds(dst_reg);
- break;
- }
- /* Both values are positive, so we can work with unsigned and
- * copy the result to signed (unless it exceeds S64_MAX).
- */
- if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
- /* Potential overflow, we know nothing */
- __mark_reg_unbounded(dst_reg);
- /* (except what we can learn from the var_off) */
- __update_reg_bounds(dst_reg);
- break;
- }
- dst_reg->umin_value *= umin_val;
- dst_reg->umax_value *= umax_val;
- if (dst_reg->umax_value > S64_MAX) {
- /* Overflow possible, we know nothing */
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
- } else {
- dst_reg->smin_value = dst_reg->umin_value;
- dst_reg->smax_value = dst_reg->umax_value;
- }
+ scalar32_min_max_mul(dst_reg, &src_reg);
+ scalar_min_max_mul(dst_reg, &src_reg);
break;
case BPF_AND:
- if (src_known && dst_known) {
- __mark_reg_known(dst_reg, dst_reg->var_off.value &
- src_reg.var_off.value);
- break;
- }
- /* We get our minimum from the var_off, since that's inherently
- * bitwise. Our maximum is the minimum of the operands' maxima.
- */
dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
- dst_reg->umin_value = dst_reg->var_off.value;
- dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
- if (dst_reg->smin_value < 0 || smin_val < 0) {
- /* Lose signed bounds when ANDing negative numbers,
- * ain't nobody got time for that.
- */
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
- } else {
- /* ANDing two positives gives a positive, so safe to
- * cast result into s64.
- */
- dst_reg->smin_value = dst_reg->umin_value;
- dst_reg->smax_value = dst_reg->umax_value;
- }
- /* We may learn something more from the var_off */
- __update_reg_bounds(dst_reg);
+ scalar32_min_max_and(dst_reg, &src_reg);
+ scalar_min_max_and(dst_reg, &src_reg);
break;
case BPF_OR:
- if (src_known && dst_known) {
- __mark_reg_known(dst_reg, dst_reg->var_off.value |
- src_reg.var_off.value);
- break;
- }
- /* We get our maximum from the var_off, and our minimum is the
- * maximum of the operands' minima
- */
dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
- dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
- dst_reg->umax_value = dst_reg->var_off.value |
- dst_reg->var_off.mask;
- if (dst_reg->smin_value < 0 || smin_val < 0) {
- /* Lose signed bounds when ORing negative numbers,
- * ain't nobody got time for that.
- */
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
- } else {
- /* ORing two positives gives a positive, so safe to
- * cast result into s64.
- */
- dst_reg->smin_value = dst_reg->umin_value;
- dst_reg->smax_value = dst_reg->umax_value;
- }
- /* We may learn something more from the var_off */
- __update_reg_bounds(dst_reg);
+ scalar32_min_max_or(dst_reg, &src_reg);
+ scalar_min_max_or(dst_reg, &src_reg);
break;
case BPF_LSH:
if (umax_val >= insn_bitness) {
@@ -5030,22 +5710,10 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
- /* We lose all sign bit information (except what we can pick
- * up from var_off)
- */
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
- /* If we might shift our top bit out, then we know nothing */
- if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
- dst_reg->umin_value = 0;
- dst_reg->umax_value = U64_MAX;
- } else {
- dst_reg->umin_value <<= umin_val;
- dst_reg->umax_value <<= umax_val;
- }
- dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
- /* We may learn something more from the var_off */
- __update_reg_bounds(dst_reg);
+ if (alu32)
+ scalar32_min_max_lsh(dst_reg, &src_reg);
+ else
+ scalar_min_max_lsh(dst_reg, &src_reg);
break;
case BPF_RSH:
if (umax_val >= insn_bitness) {
@@ -5055,27 +5723,10 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
- /* BPF_RSH is an unsigned shift. If the value in dst_reg might
- * be negative, then either:
- * 1) src_reg might be zero, so the sign bit of the result is
- * unknown, so we lose our signed bounds
- * 2) it's known negative, thus the unsigned bounds capture the
- * signed bounds
- * 3) the signed bounds cross zero, so they tell us nothing
- * about the result
- * If the value in dst_reg is known nonnegative, then again the
- * unsigned bounts capture the signed bounds.
- * Thus, in all cases it suffices to blow away our signed bounds
- * and rely on inferring new ones from the unsigned bounds and
- * var_off of the result.
- */
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
- dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
- dst_reg->umin_value >>= umax_val;
- dst_reg->umax_value >>= umin_val;
- /* We may learn something more from the var_off */
- __update_reg_bounds(dst_reg);
+ if (alu32)
+ scalar32_min_max_rsh(dst_reg, &src_reg);
+ else
+ scalar_min_max_rsh(dst_reg, &src_reg);
break;
case BPF_ARSH:
if (umax_val >= insn_bitness) {
@@ -5085,38 +5736,21 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
-
- /* Upon reaching here, src_known is true and
- * umax_val is equal to umin_val.
- */
- if (insn_bitness == 32) {
- dst_reg->smin_value = (u32)(((s32)dst_reg->smin_value) >> umin_val);
- dst_reg->smax_value = (u32)(((s32)dst_reg->smax_value) >> umin_val);
- } else {
- dst_reg->smin_value >>= umin_val;
- dst_reg->smax_value >>= umin_val;
- }
-
- dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val,
- insn_bitness);
-
- /* blow away the dst_reg umin_value/umax_value and rely on
- * dst_reg var_off to refine the result.
- */
- dst_reg->umin_value = 0;
- dst_reg->umax_value = U64_MAX;
- __update_reg_bounds(dst_reg);
+ if (alu32)
+ scalar32_min_max_arsh(dst_reg, &src_reg);
+ else
+ scalar_min_max_arsh(dst_reg, &src_reg);
break;
default:
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
- if (BPF_CLASS(insn->code) != BPF_ALU64) {
- /* 32-bit ALU ops are (32,32)->32 */
- coerce_reg_to_size(dst_reg, 4);
- }
+ /* ALU32 ops are zero extended into 64bit register */
+ if (alu32)
+ zext_32_to_64(dst_reg);
+ __update_reg_bounds(dst_reg);
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
return 0;
@@ -5290,7 +5924,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
mark_reg_unknown(env, regs,
insn->dst_reg);
}
- coerce_reg_to_size(dst_reg, 4);
+ zext_32_to_64(dst_reg);
}
} else {
/* case: R = imm
@@ -5460,55 +6094,83 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
new_range);
}
-/* compute branch direction of the expression "if (reg opcode val) goto target;"
- * and return:
- * 1 - branch will be taken and "goto target" will be executed
- * 0 - branch will not be taken and fall-through to next insn
- * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
- */
-static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
- bool is_jmp32)
+static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
{
- struct bpf_reg_state reg_lo;
- s64 sval;
+ struct tnum subreg = tnum_subreg(reg->var_off);
+ s32 sval = (s32)val;
- if (__is_pointer_value(false, reg))
- return -1;
+ switch (opcode) {
+ case BPF_JEQ:
+ if (tnum_is_const(subreg))
+ return !!tnum_equals_const(subreg, val);
+ break;
+ case BPF_JNE:
+ if (tnum_is_const(subreg))
+ return !tnum_equals_const(subreg, val);
+ break;
+ case BPF_JSET:
+ if ((~subreg.mask & subreg.value) & val)
+ return 1;
+ if (!((subreg.mask | subreg.value) & val))
+ return 0;
+ break;
+ case BPF_JGT:
+ if (reg->u32_min_value > val)
+ return 1;
+ else if (reg->u32_max_value <= val)
+ return 0;
+ break;
+ case BPF_JSGT:
+ if (reg->s32_min_value > sval)
+ return 1;
+ else if (reg->s32_max_value < sval)
+ return 0;
+ break;
+ case BPF_JLT:
+ if (reg->u32_max_value < val)
+ return 1;
+ else if (reg->u32_min_value >= val)
+ return 0;
+ break;
+ case BPF_JSLT:
+ if (reg->s32_max_value < sval)
+ return 1;
+ else if (reg->s32_min_value >= sval)
+ return 0;
+ break;
+ case BPF_JGE:
+ if (reg->u32_min_value >= val)
+ return 1;
+ else if (reg->u32_max_value < val)
+ return 0;
+ break;
+ case BPF_JSGE:
+ if (reg->s32_min_value >= sval)
+ return 1;
+ else if (reg->s32_max_value < sval)
+ return 0;
+ break;
+ case BPF_JLE:
+ if (reg->u32_max_value <= val)
+ return 1;
+ else if (reg->u32_min_value > val)
+ return 0;
+ break;
+ case BPF_JSLE:
+ if (reg->s32_max_value <= sval)
+ return 1;
+ else if (reg->s32_min_value > sval)
+ return 0;
+ break;
+ }
- if (is_jmp32) {
- reg_lo = *reg;
- reg = &reg_lo;
- /* For JMP32, only low 32 bits are compared, coerce_reg_to_size
- * could truncate high bits and update umin/umax according to
- * information of low bits.
- */
- coerce_reg_to_size(reg, 4);
- /* smin/smax need special handling. For example, after coerce,
- * if smin_value is 0x00000000ffffffffLL, the value is -1 when
- * used as operand to JMP32. It is a negative number from s32's
- * point of view, while it is a positive number when seen as
- * s64. The smin/smax are kept as s64, therefore, when used with
- * JMP32, they need to be transformed into s32, then sign
- * extended back to s64.
- *
- * Also, smin/smax were copied from umin/umax. If umin/umax has
- * different sign bit, then min/max relationship doesn't
- * maintain after casting into s32, for this case, set smin/smax
- * to safest range.
- */
- if ((reg->umax_value ^ reg->umin_value) &
- (1ULL << 31)) {
- reg->smin_value = S32_MIN;
- reg->smax_value = S32_MAX;
- }
- reg->smin_value = (s64)(s32)reg->smin_value;
- reg->smax_value = (s64)(s32)reg->smax_value;
+ return -1;
+}
- val = (u32)val;
- sval = (s64)(s32)val;
- } else {
- sval = (s64)val;
- }
+
+static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
+{
+ s64 sval = (s64)val;
switch (opcode) {
case BPF_JEQ:
@@ -5578,27 +6240,22 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
return -1;
}
-/* Generate min value of the high 32-bit from TNUM info. */
-static u64 gen_hi_min(struct tnum var)
-{
- return var.value & ~0xffffffffULL;
-}
-
-/* Generate max value of the high 32-bit from TNUM info. */
-static u64 gen_hi_max(struct tnum var)
-{
- return (var.value | var.mask) & ~0xffffffffULL;
-}
-
-/* Return true if VAL is compared with a s64 sign extended from s32, and they
- * are with the same signedness.
+/* compute branch direction of the expression "if (reg opcode val) goto target;"
+ * and return:
+ * 1 - branch will be taken and "goto target" will be executed
+ * 0 - branch will not be taken and fall-through to next insn
+ * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
+ * range [0,10]
*/
-static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg)
+static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
+ bool is_jmp32)
{
- return ((s32)sval >= 0 &&
- reg->smin_value >= 0 && reg->smax_value <= S32_MAX) ||
- ((s32)sval < 0 &&
- reg->smax_value <= 0 && reg->smin_value >= S32_MIN);
+ if (__is_pointer_value(false, reg))
+ return -1;
+
+ if (is_jmp32)
+ return is_branch32_taken(reg, val, opcode);
+ return is_branch64_taken(reg, val, opcode);
}
/* Adjusts the register min/max values in the case that the dst_reg is the
@@ -5607,10 +6264,16 @@ static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg)
* In JEQ/JNE cases we also adjust the var_off values.
*/
static void reg_set_min_max(struct bpf_reg_state *true_reg,
- struct bpf_reg_state *false_reg, u64 val,
+ struct bpf_reg_state *false_reg,
+ u64 val, u32 val32,
u8 opcode, bool is_jmp32)
{
- s64 sval;
+ struct tnum false_32off = tnum_subreg(false_reg->var_off);
+ struct tnum false_64off = false_reg->var_off;
+ struct tnum true_32off = tnum_subreg(true_reg->var_off);
+ struct tnum true_64off = true_reg->var_off;
+ s64 sval = (s64)val;
+ s32 sval32 = (s32)val32;
/* If the dst_reg is a pointer, we can't learn anything about its
* variable offset from the compare (unless src_reg were a pointer into
@@ -5621,9 +6284,6 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
if (__is_pointer_value(false, false_reg))
return;
- val = is_jmp32 ? (u32)val : val;
- sval = is_jmp32 ? (s64)(s32)val : (s64)val;
-
switch (opcode) {
case BPF_JEQ:
case BPF_JNE:
@@ -5635,211 +6295,150 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
* if it is true we know the value for sure. Likewise for
* BPF_JNE.
*/
- if (is_jmp32) {
- u64 old_v = reg->var_off.value;
- u64 hi_mask = ~0xffffffffULL;
-
- reg->var_off.value = (old_v & hi_mask) | val;
- reg->var_off.mask &= hi_mask;
- } else {
+ if (is_jmp32)
+ __mark_reg32_known(reg, val32);
+ else
__mark_reg_known(reg, val);
- }
break;
}
case BPF_JSET:
- false_reg->var_off = tnum_and(false_reg->var_off,
- tnum_const(~val));
- if (is_power_of_2(val))
- true_reg->var_off = tnum_or(true_reg->var_off,
- tnum_const(val));
+ if (is_jmp32) {
+ false_32off = tnum_and(false_32off, tnum_const(~val32));
+ if (is_power_of_2(val32))
+ true_32off = tnum_or(true_32off,
+ tnum_const(val32));
+ } else {
+ false_64off = tnum_and(false_64off, tnum_const(~val));
+ if (is_power_of_2(val))
+ true_64off = tnum_or(true_64off,
+ tnum_const(val));
+ }
break;
case BPF_JGE:
case BPF_JGT:
{
- u64 false_umax = opcode == BPF_JGT ? val : val - 1;
- u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
-
if (is_jmp32) {
- false_umax += gen_hi_max(false_reg->var_off);
- true_umin += gen_hi_min(true_reg->var_off);
+ u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1;
+ u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
+
+ false_reg->u32_max_value = min(false_reg->u32_max_value,
+ false_umax);
+ true_reg->u32_min_value = max(true_reg->u32_min_value,
+ true_umin);
+ } else {
+ u64 false_umax = opcode == BPF_JGT ? val : val - 1;
+ u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
+
+ false_reg->umax_value = min(false_reg->umax_value, false_umax);
+ true_reg->umin_value = max(true_reg->umin_value, true_umin);
}
- false_reg->umax_value = min(false_reg->umax_value, false_umax);
- true_reg->umin_value = max(true_reg->umin_value, true_umin);
break;
}
case BPF_JSGE:
case BPF_JSGT:
{
- s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
- s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
+ if (is_jmp32) {
+ s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1;
+ s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
- /* If the full s64 was not sign-extended from s32 then don't
- * deduct further info.
- */
- if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
- break;
- false_reg->smax_value = min(false_reg->smax_value, false_smax);
- true_reg->smin_value = max(true_reg->smin_value, true_smin);
+ false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
+ true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
+ } else {
+ s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
+ s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
+
+ false_reg->smax_value = min(false_reg->smax_value, false_smax);
+ true_reg->smin_value = max(true_reg->smin_value, true_smin);
+ }
break;
}
case BPF_JLE:
case BPF_JLT:
{
- u64 false_umin = opcode == BPF_JLT ? val : val + 1;
- u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
-
if (is_jmp32) {
- false_umin += gen_hi_min(false_reg->var_off);
- true_umax += gen_hi_max(true_reg->var_off);
+ u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1;
+ u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
+
+ false_reg->u32_min_value = max(false_reg->u32_min_value,
+ false_umin);
+ true_reg->u32_max_value = min(true_reg->u32_max_value,
+ true_umax);
+ } else {
+ u64 false_umin = opcode == BPF_JLT ? val : val + 1;
+ u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
+
+ false_reg->umin_value = max(false_reg->umin_value, false_umin);
+ true_reg->umax_value = min(true_reg->umax_value, true_umax);
}
- false_reg->umin_value = max(false_reg->umin_value, false_umin);
- true_reg->umax_value = min(true_reg->umax_value, true_umax);
break;
}
case BPF_JSLE:
case BPF_JSLT:
{
- s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
- s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
+ if (is_jmp32) {
+ s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1;
+ s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
- if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
- break;
- false_reg->smin_value = max(false_reg->smin_value, false_smin);
- true_reg->smax_value = min(true_reg->smax_value, true_smax);
+ false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
+ true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
+ } else {
+ s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
+ s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
+
+ false_reg->smin_value = max(false_reg->smin_value, false_smin);
+ true_reg->smax_value = min(true_reg->smax_value, true_smax);
+ }
break;
}
default:
- break;
+ return;
}
- __reg_deduce_bounds(false_reg);
- __reg_deduce_bounds(true_reg);
- /* We might have learned some bits from the bounds. */
- __reg_bound_offset(false_reg);
- __reg_bound_offset(true_reg);
if (is_jmp32) {
- __reg_bound_offset32(false_reg);
- __reg_bound_offset32(true_reg);
+ false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
+ tnum_subreg(false_32off));
+ true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
+ tnum_subreg(true_32off));
+ __reg_combine_32_into_64(false_reg);
+ __reg_combine_32_into_64(true_reg);
+ } else {
+ false_reg->var_off = false_64off;
+ true_reg->var_off = true_64off;
+ __reg_combine_64_into_32(false_reg);
+ __reg_combine_64_into_32(true_reg);
}
- /* Intersecting with the old var_off might have improved our bounds
- * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
- * then new var_off is (0; 0x7f...fc) which improves our umax.
- */
- __update_reg_bounds(false_reg);
- __update_reg_bounds(true_reg);
}
/* Same as above, but for the case that dst_reg holds a constant and src_reg is
* the variable reg.
*/
static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
- struct bpf_reg_state *false_reg, u64 val,
+ struct bpf_reg_state *false_reg,
+ u64 val, u32 val32,
u8 opcode, bool is_jmp32)
{
- s64 sval;
-
- if (__is_pointer_value(false, false_reg))
- return;
-
- val = is_jmp32 ? (u32)val : val;
- sval = is_jmp32 ? (s64)(s32)val : (s64)val;
-
- switch (opcode) {
- case BPF_JEQ:
- case BPF_JNE:
- {
- struct bpf_reg_state *reg =
- opcode == BPF_JEQ ? true_reg : false_reg;
-
- if (is_jmp32) {
- u64 old_v = reg->var_off.value;
- u64 hi_mask = ~0xffffffffULL;
-
- reg->var_off.value = (old_v & hi_mask) | val;
- reg->var_off.mask &= hi_mask;
- } else {
- __mark_reg_known(reg, val);
- }
- break;
- }
- case BPF_JSET:
- false_reg->var_off = tnum_and(false_reg->var_off,
- tnum_const(~val));
- if (is_power_of_2(val))
- true_reg->var_off = tnum_or(true_reg->var_off,
- tnum_const(val));
- break;
- case BPF_JGE:
- case BPF_JGT:
- {
- u64 false_umin = opcode == BPF_JGT ? val : val + 1;
- u64 true_umax = opcode == BPF_JGT ? val - 1 : val;
-
- if (is_jmp32) {
- false_umin += gen_hi_min(false_reg->var_off);
- true_umax += gen_hi_max(true_reg->var_off);
- }
- false_reg->umin_value = max(false_reg->umin_value, false_umin);
- true_reg->umax_value = min(true_reg->umax_value, true_umax);
- break;
- }
- case BPF_JSGE:
- case BPF_JSGT:
- {
- s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1;
- s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval;
-
- if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
- break;
- false_reg->smin_value = max(false_reg->smin_value, false_smin);
- true_reg->smax_value = min(true_reg->smax_value, true_smax);
- break;
- }
- case BPF_JLE:
- case BPF_JLT:
- {
- u64 false_umax = opcode == BPF_JLT ? val : val - 1;
- u64 true_umin = opcode == BPF_JLT ? val + 1 : val;
-
- if (is_jmp32) {
- false_umax += gen_hi_max(false_reg->var_off);
- true_umin += gen_hi_min(true_reg->var_off);
- }
- false_reg->umax_value = min(false_reg->umax_value, false_umax);
- true_reg->umin_value = max(true_reg->umin_value, true_umin);
- break;
- }
- case BPF_JSLE:
- case BPF_JSLT:
- {
- s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1;
- s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval;
-
- if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
- break;
- false_reg->smax_value = min(false_reg->smax_value, false_smax);
- true_reg->smin_value = max(true_reg->smin_value, true_smin);
- break;
- }
- default:
- break;
- }
-
- __reg_deduce_bounds(false_reg);
- __reg_deduce_bounds(true_reg);
- /* We might have learned some bits from the bounds. */
- __reg_bound_offset(false_reg);
- __reg_bound_offset(true_reg);
- if (is_jmp32) {
- __reg_bound_offset32(false_reg);
- __reg_bound_offset32(true_reg);
- }
- /* Intersecting with the old var_off might have improved our bounds
- * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
- * then new var_off is (0; 0x7f...fc) which improves our umax.
+ /* How can we transform "a <op> b" into "b <op> a"? */
+ static const u8 opcode_flip[16] = {
+ /* these stay the same */
+ [BPF_JEQ >> 4] = BPF_JEQ,
+ [BPF_JNE >> 4] = BPF_JNE,
+ [BPF_JSET >> 4] = BPF_JSET,
+ /* these swap "lesser" and "greater" (L and G in the opcodes) */
+ [BPF_JGE >> 4] = BPF_JLE,
+ [BPF_JGT >> 4] = BPF_JLT,
+ [BPF_JLE >> 4] = BPF_JGE,
+ [BPF_JLT >> 4] = BPF_JGT,
+ [BPF_JSGE >> 4] = BPF_JSLE,
+ [BPF_JSGT >> 4] = BPF_JSLT,
+ [BPF_JSLE >> 4] = BPF_JSGE,
+ [BPF_JSLT >> 4] = BPF_JSGT
+ };
+ opcode = opcode_flip[opcode >> 4];
+ /* This uses zero as "not present in table"; luckily the zero opcode,
+ * BPF_JA, can't get here.
*/
- __update_reg_bounds(false_reg);
- __update_reg_bounds(true_reg);
+ if (opcode)
+ reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
}
/* Regs are known to be equal, so intersect their min/max/var_off */
@@ -6128,13 +6727,22 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
dst_reg = &regs[insn->dst_reg];
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
- if (BPF_SRC(insn->code) == BPF_K)
- pred = is_branch_taken(dst_reg, insn->imm,
- opcode, is_jmp32);
- else if (src_reg->type == SCALAR_VALUE &&
- tnum_is_const(src_reg->var_off))
- pred = is_branch_taken(dst_reg, src_reg->var_off.value,
- opcode, is_jmp32);
+ if (BPF_SRC(insn->code) == BPF_K) {
+ pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
+ } else if (src_reg->type == SCALAR_VALUE &&
+ is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
+ pred = is_branch_taken(dst_reg,
+ tnum_subreg(src_reg->var_off).value,
+ opcode,
+ is_jmp32);
+ } else if (src_reg->type == SCALAR_VALUE &&
+ !is_jmp32 && tnum_is_const(src_reg->var_off)) {
+ pred = is_branch_taken(dst_reg,
+ src_reg->var_off.value,
+ opcode,
+ is_jmp32);
+ }
+
if (pred >= 0) {
err = mark_chain_precision(env, insn->dst_reg);
if (BPF_SRC(insn->code) == BPF_X && !err)
@@ -6168,32 +6776,24 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
*/
if (BPF_SRC(insn->code) == BPF_X) {
struct bpf_reg_state *src_reg = &regs[insn->src_reg];
- struct bpf_reg_state lo_reg0 = *dst_reg;
- struct bpf_reg_state lo_reg1 = *src_reg;
- struct bpf_reg_state *src_lo, *dst_lo;
-
- dst_lo = &lo_reg0;
- src_lo = &lo_reg1;
- coerce_reg_to_size(dst_lo, 4);
- coerce_reg_to_size(src_lo, 4);
if (dst_reg->type == SCALAR_VALUE &&
src_reg->type == SCALAR_VALUE) {
if (tnum_is_const(src_reg->var_off) ||
- (is_jmp32 && tnum_is_const(src_lo->var_off)))
+ (is_jmp32 &&
+ tnum_is_const(tnum_subreg(src_reg->var_off))))
reg_set_min_max(&other_branch_regs[insn->dst_reg],
dst_reg,
- is_jmp32
- ? src_lo->var_off.value
- : src_reg->var_off.value,
+ src_reg->var_off.value,
+ tnum_subreg(src_reg->var_off).value,
opcode, is_jmp32);
else if (tnum_is_const(dst_reg->var_off) ||
- (is_jmp32 && tnum_is_const(dst_lo->var_off)))
+ (is_jmp32 &&
+ tnum_is_const(tnum_subreg(dst_reg->var_off))))
reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
src_reg,
- is_jmp32
- ? dst_lo->var_off.value
- : dst_reg->var_off.value,
+ dst_reg->var_off.value,
+ tnum_subreg(dst_reg->var_off).value,
opcode, is_jmp32);
else if (!is_jmp32 &&
(opcode == BPF_JEQ || opcode == BPF_JNE))
@@ -6204,7 +6804,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
}
} else if (dst_reg->type == SCALAR_VALUE) {
reg_set_min_max(&other_branch_regs[insn->dst_reg],
- dst_reg, insn->imm, opcode, is_jmp32);
+ dst_reg, insn->imm, (u32)insn->imm,
+ opcode, is_jmp32);
}
/* detect if R == 0 where R is returned from bpf_map_lookup_elem().
@@ -6405,8 +7006,9 @@ static int check_return_code(struct bpf_verifier_env *env)
struct tnum range = tnum_range(0, 1);
int err;
- /* The struct_ops func-ptr's return type could be "void" */
- if (env->prog->type == BPF_PROG_TYPE_STRUCT_OPS &&
+ /* LSM and struct_ops func-ptr's return type could be "void" */
+ if ((env->prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
+ env->prog->type == BPF_PROG_TYPE_LSM) &&
!prog->aux->attach_func_proto->type)
return 0;
@@ -8139,26 +8741,48 @@ static bool is_tracing_prog_type(enum bpf_prog_type type)
}
}
+static bool is_preallocated_map(struct bpf_map *map)
+{
+ if (!check_map_prealloc(map))
+ return false;
+ if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta))
+ return false;
+ return true;
+}
+
static int check_map_prog_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map,
struct bpf_prog *prog)
{
- /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
- * preallocated hash maps, since doing memory allocation
- * in overflow_handler can crash depending on where nmi got
- * triggered.
+ /*
+ * Validate that trace type programs use preallocated hash maps.
+ *
+ * For programs attached to PERF events this is mandatory as the
+ * perf NMI can hit any arbitrary code sequence.
+ *
+ * All other trace types using preallocated hash maps are unsafe as
+ * well because tracepoint or kprobes can be inside locked regions
+ * of the memory allocator or at a place where a recursion into the
+ * memory allocator would see inconsistent state.
+ *
+ * On RT enabled kernels run-time allocation of all trace type
+ * programs is strictly prohibited due to lock type constraints. On
+ * !RT kernels it is allowed for backwards compatibility reasons for
+ * now, but warnings are emitted so developers are made aware of
+ * the unsafety and can fix their programs before this is enforced.
*/
- if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
- if (!check_map_prealloc(map)) {
+ if (is_tracing_prog_type(prog->type) && !is_preallocated_map(map)) {
+ if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
verbose(env, "perf_event programs can only use preallocated hash map\n");
return -EINVAL;
}
- if (map->inner_map_meta &&
- !check_map_prealloc(map->inner_map_meta)) {
- verbose(env, "perf_event programs can only use preallocated inner hash map\n");
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ verbose(env, "trace type programs can only use preallocated hash map\n");
return -EINVAL;
}
+ WARN_ONCE(1, "trace type BPF program uses run-time allocation\n");
+ verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
}
if ((is_tracing_prog_type(prog->type) ||
@@ -9774,6 +10398,26 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
return 0;
}
+#define SECURITY_PREFIX "security_"
+
+static int check_attach_modify_return(struct bpf_verifier_env *env)
+{
+ struct bpf_prog *prog = env->prog;
+ unsigned long addr = (unsigned long) prog->aux->trampoline->func.addr;
+
+ /* This is expected to be cleaned up in the future with the KRSI effort
+ * introducing the LSM_HOOK macro for cleaning up lsm_hooks.h.
+ */
+ if (within_error_injection_list(addr) ||
+ !strncmp(SECURITY_PREFIX, prog->aux->attach_func_name,
+ sizeof(SECURITY_PREFIX) - 1))
+ return 0;
+
+ verbose(env, "fmod_ret attach_btf_id %u (%s) is not modifiable\n",
+ prog->aux->attach_btf_id, prog->aux->attach_func_name);
+
+ return -EINVAL;
+}
static int check_attach_btf_id(struct bpf_verifier_env *env)
{
@@ -9794,7 +10438,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
return check_struct_ops_btf_id(env);
- if (prog->type != BPF_PROG_TYPE_TRACING && !prog_extension)
+ if (prog->type != BPF_PROG_TYPE_TRACING &&
+ prog->type != BPF_PROG_TYPE_LSM &&
+ !prog_extension)
return 0;
if (!btf_id) {
@@ -9924,8 +10570,17 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
if (!prog_extension)
return -EINVAL;
/* fallthrough */
+ case BPF_MODIFY_RETURN:
+ case BPF_LSM_MAC:
case BPF_TRACE_FENTRY:
case BPF_TRACE_FEXIT:
+ prog->aux->attach_func_name = tname;
+ if (prog->type == BPF_PROG_TYPE_LSM) {
+ ret = bpf_lsm_verify_prog(&env->log, prog);
+ if (ret < 0)
+ return ret;
+ }
+
if (!btf_type_is_func(t)) {
verbose(env, "attach_btf_id %u is not a function\n",
btf_id);
@@ -9940,7 +10595,6 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
tr = bpf_trampoline_lookup(key);
if (!tr)
return -ENOMEM;
- prog->aux->attach_func_name = tname;
/* t is either vmlinux type or another program's type */
prog->aux->attach_func_proto = t;
mutex_lock(&tr->mutex);
@@ -9973,6 +10627,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
}
tr->func.addr = (void *)addr;
prog->aux->trampoline = tr;
+
+ if (prog->expected_attach_type == BPF_MODIFY_RETURN)
+ ret = check_attach_modify_return(env);
out:
mutex_unlock(&tr->mutex);
if (ret)
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 3dead0416b91..915dda3f7f19 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -6303,27 +6303,58 @@ void cgroup_sk_free(struct sock_cgroup_data *skcd)
#endif /* CONFIG_SOCK_CGROUP_DATA */
#ifdef CONFIG_CGROUP_BPF
-int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
- struct bpf_prog *replace_prog, enum bpf_attach_type type,
+int cgroup_bpf_attach(struct cgroup *cgrp,
+ struct bpf_prog *prog, struct bpf_prog *replace_prog,
+ struct bpf_cgroup_link *link,
+ enum bpf_attach_type type,
u32 flags)
{
int ret;
mutex_lock(&cgroup_mutex);
- ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, type, flags);
+ ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags);
mutex_unlock(&cgroup_mutex);
return ret;
}
+
+int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *old_prog,
+ struct bpf_prog *new_prog)
+{
+ struct bpf_cgroup_link *cg_link;
+ int ret;
+
+ if (link->ops != &bpf_cgroup_link_lops)
+ return -EINVAL;
+
+ cg_link = container_of(link, struct bpf_cgroup_link, link);
+
+ mutex_lock(&cgroup_mutex);
+ /* link might have been auto-released by dying cgroup, so fail */
+ if (!cg_link->cgroup) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (old_prog && link->prog != old_prog) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+ ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
+out_unlock:
+ mutex_unlock(&cgroup_mutex);
+ return ret;
+}
+
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type, u32 flags)
+ enum bpf_attach_type type)
{
int ret;
mutex_lock(&cgroup_mutex);
- ret = __cgroup_bpf_detach(cgrp, prog, type);
+ ret = __cgroup_bpf_detach(cgrp, prog, NULL, type);
mutex_unlock(&cgroup_mutex);
return ret;
}
+
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d22e4ba59dfa..318435c5bf0b 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8417,23 +8417,22 @@ static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
enum perf_bpf_event_type type)
{
bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD;
- char sym[KSYM_NAME_LEN];
int i;
if (prog->aux->func_cnt == 0) {
- bpf_get_prog_name(prog, sym);
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
(u64)(unsigned long)prog->bpf_func,
- prog->jited_len, unregister, sym);
+ prog->jited_len, unregister,
+ prog->aux->ksym.name);
} else {
for (i = 0; i < prog->aux->func_cnt; i++) {
struct bpf_prog *subprog = prog->aux->func[i];
- bpf_get_prog_name(subprog, sym);
perf_event_ksymbol(
PERF_RECORD_KSYMBOL_TYPE_BPF,
(u64)(unsigned long)subprog->bpf_func,
- subprog->jited_len, unregister, sym);
+ subprog->jited_len, unregister,
+ prog->aux->ksym.name);
}
}
}
@@ -9368,7 +9367,6 @@ static void bpf_overflow_handler(struct perf_event *event,
int ret = 0;
ctx.regs = perf_arch_bpf_user_pt_regs(regs);
- preempt_disable();
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
goto out;
rcu_read_lock();
@@ -9376,7 +9374,6 @@ static void bpf_overflow_handler(struct perf_event *event,
rcu_read_unlock();
out:
__this_cpu_dec(bpf_prog_active);
- preempt_enable();
if (!ret)
return;
diff --git a/kernel/extable.c b/kernel/extable.c
index a0024f27d3a1..7681f87e89dd 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -149,8 +149,6 @@ int kernel_text_address(unsigned long addr)
goto out;
if (is_bpf_text_address(addr))
goto out;
- if (is_bpf_image_address(addr))
- goto out;
ret = 0;
out:
if (no_rcu)
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index ec5c606bc3a1..55a6184f5990 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -268,16 +268,14 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
* All filters in the list are evaluated and the lowest BPF return
* value always takes priority (ignoring the DATA).
*/
- preempt_disable();
for (; f; f = f->prev) {
- u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
+ u32 cur_ret = bpf_prog_run_pin_on_cpu(f->prog, sd);
if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
ret = cur_ret;
*match = f;
}
}
- preempt_enable();
return ret;
}
#endif /* CONFIG_SECCOMP_FILTER */
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 68250d433bd7..ca1796747a77 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -83,7 +83,7 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
if (in_nmi()) /* not supported yet */
return 1;
- preempt_disable();
+ cant_sleep();
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
/*
@@ -115,11 +115,9 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
out:
__this_cpu_dec(bpf_prog_active);
- preempt_enable();
return ret;
}
-EXPORT_SYMBOL_GPL(trace_call_bpf);
#ifdef CONFIG_BPF_KPROBE_OVERRIDE
BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
@@ -781,8 +779,8 @@ static const struct bpf_func_proto bpf_send_signal_thread_proto = {
.arg1_type = ARG_ANYTHING,
};
-static const struct bpf_func_proto *
-tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+const struct bpf_func_proto *
+bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_map_lookup_elem:
@@ -843,6 +841,10 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_send_signal_proto;
case BPF_FUNC_send_signal_thread:
return &bpf_send_signal_thread_proto;
+ case BPF_FUNC_perf_event_read_value:
+ return &bpf_perf_event_read_value_proto;
+ case BPF_FUNC_get_ns_current_pid_tgid:
+ return &bpf_get_ns_current_pid_tgid_proto;
default:
return NULL;
}
@@ -858,14 +860,12 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_stackid_proto;
case BPF_FUNC_get_stack:
return &bpf_get_stack_proto;
- case BPF_FUNC_perf_event_read_value:
- return &bpf_perf_event_read_value_proto;
#ifdef CONFIG_BPF_KPROBE_OVERRIDE
case BPF_FUNC_override_return:
return &bpf_override_return_proto;
#endif
default:
- return tracing_func_proto(func_id, prog);
+ return bpf_tracing_func_proto(func_id, prog);
}
}
@@ -975,7 +975,7 @@ tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_get_stack:
return &bpf_get_stack_proto_tp;
default:
- return tracing_func_proto(func_id, prog);
+ return bpf_tracing_func_proto(func_id, prog);
}
}
@@ -1028,6 +1028,45 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
.arg3_type = ARG_CONST_SIZE,
};
+BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
+ void *, buf, u32, size, u64, flags)
+{
+#ifndef CONFIG_X86
+ return -ENOENT;
+#else
+ static const u32 br_entry_size = sizeof(struct perf_branch_entry);
+ struct perf_branch_stack *br_stack = ctx->data->br_stack;
+ u32 to_copy;
+
+ if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
+ return -EINVAL;
+
+ if (unlikely(!br_stack))
+ return -EINVAL;
+
+ if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
+ return br_stack->nr * br_entry_size;
+
+ if (!buf || (size % br_entry_size != 0))
+ return -EINVAL;
+
+ to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
+ memcpy(buf, br_stack->entries, to_copy);
+
+ return to_copy;
+#endif
+}
+
+static const struct bpf_func_proto bpf_read_branch_records_proto = {
+ .func = bpf_read_branch_records,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg4_type = ARG_ANYTHING,
+};
+
static const struct bpf_func_proto *
pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -1040,8 +1079,10 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_stack_proto_tp;
case BPF_FUNC_perf_prog_read_value:
return &bpf_perf_prog_read_value_proto;
+ case BPF_FUNC_read_branch_records:
+ return &bpf_read_branch_records_proto;
default:
- return tracing_func_proto(func_id, prog);
+ return bpf_tracing_func_proto(func_id, prog);
}
}
@@ -1104,6 +1145,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
};
extern const struct bpf_func_proto bpf_skb_output_proto;
+extern const struct bpf_func_proto bpf_xdp_output_proto;
BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
struct bpf_map *, map, u64, flags)
@@ -1168,7 +1210,7 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_get_stack:
return &bpf_get_stack_proto_raw_tp;
default:
- return tracing_func_proto(func_id, prog);
+ return bpf_tracing_func_proto(func_id, prog);
}
}
@@ -1179,6 +1221,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
#ifdef CONFIG_NET
case BPF_FUNC_skb_output:
return &bpf_skb_output_proto;
+ case BPF_FUNC_xdp_output:
+ return &bpf_xdp_output_proto;
#endif
default:
return raw_tp_prog_func_proto(func_id, prog);
@@ -1213,6 +1257,13 @@ static bool tracing_prog_is_valid_access(int off, int size,
return btf_ctx_access(off, size, type, prog, info);
}
+int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
.get_func_proto = raw_tp_prog_func_proto,
.is_valid_access = raw_tp_prog_is_valid_access,
@@ -1227,6 +1278,7 @@ const struct bpf_verifier_ops tracing_verifier_ops = {
};
const struct bpf_prog_ops tracing_prog_ops = {
+ .test_run = bpf_prog_test_run_tracing,
};
static bool raw_tp_writable_prog_is_valid_access(int off, int size,
@@ -1475,10 +1527,9 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
static __always_inline
void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
{
+ cant_sleep();
rcu_read_lock();
- preempt_disable();
(void) BPF_PROG_RUN(prog, args);
- preempt_enable();
rcu_read_unlock();
}
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 18d16f3ef980..2a8e8e9c1c75 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -1333,8 +1333,15 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
int size, esize;
int rctx;
- if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
- return;
+ if (bpf_prog_array_valid(call)) {
+ u32 ret;
+
+ preempt_disable();
+ ret = trace_call_bpf(call, regs);
+ preempt_enable();
+ if (!ret)
+ return;
+ }
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
diff --git a/lib/objagg.c b/lib/objagg.c
index 55621fb82e0a..5e1676ccdadd 100644
--- a/lib/objagg.c
+++ b/lib/objagg.c
@@ -28,7 +28,7 @@ struct objagg_hints_node {
struct objagg_hints_node *parent;
unsigned int root_id;
struct objagg_obj_stats_info stats_info;
- unsigned long obj[0];
+ unsigned long obj[];
};
static struct objagg_hints_node *
@@ -66,7 +66,7 @@ struct objagg_obj {
* including nested objects
*/
struct objagg_obj_stats stats;
- unsigned long obj[0];
+ unsigned long obj[];
};
static unsigned int objagg_obj_ref_inc(struct objagg_obj *objagg_obj)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index cecb230833be..a5fddf9ebcb7 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -6660,14 +6660,14 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
u64 start, finish;
int ret = 0, i;
- preempt_disable();
+ migrate_disable();
start = ktime_get_ns();
for (i = 0; i < runs; i++)
ret = BPF_PROG_RUN(fp, data);
finish = ktime_get_ns();
- preempt_enable();
+ migrate_enable();
*duration = finish - start;
do_div(*duration, runs);
diff --git a/net/802/psnap.c b/net/802/psnap.c
index 40ab2aea7b31..4492e8d7ad20 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -30,7 +30,7 @@ static struct datalink_proto *find_snap_client(const unsigned char *desc)
{
struct datalink_proto *proto = NULL, *p;
- list_for_each_entry_rcu(p, &snap_list, node) {
+ list_for_each_entry_rcu(p, &snap_list, node, lockdep_is_held(&snap_lock)) {
if (!memcmp(p->type, desc, 5)) {
proto = p;
break;
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index a313165e7a67..78ec2e1b14d1 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -359,9 +359,8 @@ static void __vlan_vid_del(struct vlan_info *vlan_info,
int err;
err = vlan_kill_rx_filter_info(dev, proto, vid);
- if (err)
- pr_warn("failed to kill vid %04x/%d for device %s\n",
- proto, vid, dev->name);
+ if (err && dev->reg_state != NETREG_UNREGISTERING)
+ netdev_warn(dev, "failed to kill vid %04x/%d\n", proto, vid);
list_del(&vid_info->list);
kfree(vid_info);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 3d21dd83f8cc..b85da4b7a77b 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -88,7 +88,7 @@ struct batadv_dhcp_packet {
__u8 sname[64];
__u8 file[128];
__be32 magic;
- __u8 options[0];
+ __u8 options[];
};
#define BATADV_DHCP_YIADDR_LEN sizeof(((struct batadv_dhcp_packet *)0)->yiaddr)
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 692306df7b6f..2a234d0ad445 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -13,7 +13,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2020.0"
+#define BATADV_SOURCE_VERSION "2020.1"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 852932838ddc..a9635c882fe0 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -862,7 +862,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
u8 *tt_change_ptr;
spin_lock_bh(&orig_node->vlan_list_lock);
- hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+ hlist_for_each_entry(vlan, &orig_node->vlan_list, list) {
num_vlan++;
num_entries += atomic_read(&vlan->tt.num_entries);
}
@@ -888,7 +888,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
(*tt_data)->num_vlan = htons(num_vlan);
tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
- hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+ hlist_for_each_entry(vlan, &orig_node->vlan_list, list) {
tt_vlan->vid = htons(vlan->vid);
tt_vlan->crc = htonl(vlan->tt.crc);
@@ -937,7 +937,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
int change_offset;
spin_lock_bh(&bat_priv->softif_vlan_list_lock);
- hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+ hlist_for_each_entry(vlan, &bat_priv->softif_vlan_list, list) {
vlan_entries = atomic_read(&vlan->tt.num_entries);
if (vlan_entries < 1)
continue;
@@ -967,7 +967,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
(*tt_data)->num_vlan = htons(num_vlan);
tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
- hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+ hlist_for_each_entry(vlan, &bat_priv->softif_vlan_list, list) {
vlan_entries = atomic_read(&vlan->tt.num_entries);
if (vlan_entries < 1)
continue;
diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h
index 0029d5119be6..2fd253a61a2a 100644
--- a/net/bluetooth/a2mp.h
+++ b/net/bluetooth/a2mp.h
@@ -36,14 +36,14 @@ struct a2mp_cmd {
__u8 code;
__u8 ident;
__le16 len;
- __u8 data[0];
+ __u8 data[];
} __packed;
/* A2MP command codes */
#define A2MP_COMMAND_REJ 0x01
struct a2mp_cmd_rej {
__le16 reason;
- __u8 data[0];
+ __u8 data[];
} __packed;
#define A2MP_DISCOVER_REQ 0x02
@@ -62,7 +62,7 @@ struct a2mp_cl {
struct a2mp_discov_rsp {
__le16 mtu;
__le16 ext_feat;
- struct a2mp_cl cl[0];
+ struct a2mp_cl cl[];
} __packed;
#define A2MP_CHANGE_NOTIFY 0x04
@@ -93,7 +93,7 @@ struct a2mp_amp_assoc_req {
struct a2mp_amp_assoc_rsp {
__u8 id;
__u8 status;
- __u8 amp_assoc[0];
+ __u8 amp_assoc[];
} __packed;
#define A2MP_CREATEPHYSLINK_REQ 0x0A
@@ -101,7 +101,7 @@ struct a2mp_amp_assoc_rsp {
struct a2mp_physlink_req {
__u8 local_id;
__u8 remote_id;
- __u8 amp_assoc[0];
+ __u8 amp_assoc[];
} __packed;
#define A2MP_CREATEPHYSLINK_RSP 0x0B
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 24f18b133959..9680473ed7ef 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -74,14 +74,14 @@ struct bnep_setup_conn_req {
__u8 type;
__u8 ctrl;
__u8 uuid_size;
- __u8 service[0];
+ __u8 service[];
} __packed;
struct bnep_set_filter_req {
__u8 type;
__u8 ctrl;
__be16 len;
- __u8 list[0];
+ __u8 list[];
} __packed;
struct bnep_control_rsp {
@@ -93,7 +93,7 @@ struct bnep_control_rsp {
struct bnep_ext_hdr {
__u8 type;
__u8 len;
- __u8 data[0];
+ __u8 data[];
} __packed;
/* BNEP ioctl defines */
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 87691404d0c6..e245bc155cc2 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -467,6 +467,23 @@ static void hci_conn_auto_accept(struct work_struct *work)
&conn->dst);
}
+static void le_disable_advertising(struct hci_dev *hdev)
+{
+ if (ext_adv_capable(hdev)) {
+ struct hci_cp_le_set_ext_adv_enable cp;
+
+ cp.enable = 0x00;
+ cp.num_of_sets = 0x00;
+
+ hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
+ &cp);
+ } else {
+ u8 enable = 0x00;
+ hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
+ &enable);
+ }
+}
+
static void le_conn_timeout(struct work_struct *work)
{
struct hci_conn *conn = container_of(work, struct hci_conn,
@@ -481,9 +498,8 @@ static void le_conn_timeout(struct work_struct *work)
* (which doesn't have a timeout of its own).
*/
if (conn->role == HCI_ROLE_SLAVE) {
- u8 enable = 0x00;
- hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
- &enable);
+ /* Disable LE Advertising */
+ le_disable_advertising(hdev);
hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
return;
}
@@ -898,6 +914,16 @@ static void hci_req_directed_advertising(struct hci_request *req,
cp.peer_addr_type = conn->dst_type;
bacpy(&cp.peer_addr, &conn->dst);
+ /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
+ * advertising_event_property LE_LEGACY_ADV_DIRECT_IND
+ * does not supports advertising data when the advertising set already
+ * contains some, the controller shall return erroc code 'Invalid
+ * HCI Command Parameters(0x12).
+ * So it is required to remove adv set for handle 0x00. since we use
+ * instance 0 for directed adv.
+ */
+ hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(cp.handle), &cp.handle);
+
hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
if (own_addr_type == ADDR_LE_DEV_RANDOM &&
@@ -1029,11 +1055,8 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
* anyway have to disable it in order to start directed
* advertising.
*/
- if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
- u8 enable = 0x00;
- hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
- &enable);
- }
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV))
+ __hci_req_disable_advertising(&req);
/* If requested to connect as slave use directed advertising */
if (conn->role == HCI_ROLE_SLAVE) {
@@ -1725,3 +1748,110 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
return hchan;
}
+
+u32 hci_conn_get_phy(struct hci_conn *conn)
+{
+ u32 phys = 0;
+
+ hci_dev_lock(conn->hdev);
+
+ /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
+ * Table 6.2: Packets defined for synchronous, asynchronous, and
+ * CSB logical transport types.
+ */
+ switch (conn->type) {
+ case SCO_LINK:
+ /* SCO logical transport (1 Mb/s):
+ * HV1, HV2, HV3 and DV.
+ */
+ phys |= BT_PHY_BR_1M_1SLOT;
+
+ break;
+
+ case ACL_LINK:
+ /* ACL logical transport (1 Mb/s) ptt=0:
+ * DH1, DM3, DH3, DM5 and DH5.
+ */
+ phys |= BT_PHY_BR_1M_1SLOT;
+
+ if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
+ phys |= BT_PHY_BR_1M_3SLOT;
+
+ if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
+ phys |= BT_PHY_BR_1M_5SLOT;
+
+ /* ACL logical transport (2 Mb/s) ptt=1:
+ * 2-DH1, 2-DH3 and 2-DH5.
+ */
+ if (!(conn->pkt_type & HCI_2DH1))
+ phys |= BT_PHY_EDR_2M_1SLOT;
+
+ if (!(conn->pkt_type & HCI_2DH3))
+ phys |= BT_PHY_EDR_2M_3SLOT;
+
+ if (!(conn->pkt_type & HCI_2DH5))
+ phys |= BT_PHY_EDR_2M_5SLOT;
+
+ /* ACL logical transport (3 Mb/s) ptt=1:
+ * 3-DH1, 3-DH3 and 3-DH5.
+ */
+ if (!(conn->pkt_type & HCI_3DH1))
+ phys |= BT_PHY_EDR_3M_1SLOT;
+
+ if (!(conn->pkt_type & HCI_3DH3))
+ phys |= BT_PHY_EDR_3M_3SLOT;
+
+ if (!(conn->pkt_type & HCI_3DH5))
+ phys |= BT_PHY_EDR_3M_5SLOT;
+
+ break;
+
+ case ESCO_LINK:
+ /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
+ phys |= BT_PHY_BR_1M_1SLOT;
+
+ if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
+ phys |= BT_PHY_BR_1M_3SLOT;
+
+ /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
+ if (!(conn->pkt_type & ESCO_2EV3))
+ phys |= BT_PHY_EDR_2M_1SLOT;
+
+ if (!(conn->pkt_type & ESCO_2EV5))
+ phys |= BT_PHY_EDR_2M_3SLOT;
+
+ /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
+ if (!(conn->pkt_type & ESCO_3EV3))
+ phys |= BT_PHY_EDR_3M_1SLOT;
+
+ if (!(conn->pkt_type & ESCO_3EV5))
+ phys |= BT_PHY_EDR_3M_3SLOT;
+
+ break;
+
+ case LE_LINK:
+ if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
+ phys |= BT_PHY_LE_1M_TX;
+
+ if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
+ phys |= BT_PHY_LE_1M_RX;
+
+ if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
+ phys |= BT_PHY_LE_2M_TX;
+
+ if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
+ phys |= BT_PHY_LE_2M_RX;
+
+ if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
+ phys |= BT_PHY_LE_CODED_TX;
+
+ if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
+ phys |= BT_PHY_LE_CODED_RX;
+
+ break;
+ }
+
+ hci_dev_unlock(conn->hdev);
+
+ return phys;
+}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index cbbc34a006d1..2e7bc2da8371 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -31,6 +31,8 @@
#include <linux/debugfs.h>
#include <linux/crypto.h>
#include <linux/property.h>
+#include <linux/suspend.h>
+#include <linux/wait.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -603,6 +605,9 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
if (hdev->commands[8] & 0x01)
hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
+ if (hdev->commands[18] & 0x04)
+ hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
+
/* Some older Broadcom based Bluetooth 1.2 controllers do not
* support the Read Page Scan Type command. Check support for
* this command in the bit mask of supported commands.
@@ -838,6 +843,26 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt)
sizeof(support), &support);
}
+ /* Set erroneous data reporting if supported to the wideband speech
+ * setting value
+ */
+ if (hdev->commands[18] & 0x08) {
+ bool enabled = hci_dev_test_flag(hdev,
+ HCI_WIDEBAND_SPEECH_ENABLED);
+
+ if (enabled !=
+ (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
+ struct hci_cp_write_def_err_data_reporting cp;
+
+ cp.err_data_reporting = enabled ?
+ ERR_DATA_REPORTING_ENABLED :
+ ERR_DATA_REPORTING_DISABLED;
+
+ hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
+ sizeof(cp), &cp);
+ }
+ }
+
/* Set Suggested Default Data Length to maximum if supported */
if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
struct hci_cp_le_write_def_data_len cp;
@@ -1764,6 +1789,9 @@ int hci_dev_do_close(struct hci_dev *hdev)
clear_bit(HCI_RUNNING, &hdev->flags);
hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
+ if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
+ wake_up(&hdev->suspend_wait_q);
+
/* After this point our queues are empty
* and no tasks are scheduled. */
hdev->close(hdev);
@@ -2285,7 +2313,7 @@ void hci_link_keys_clear(struct hci_dev *hdev)
{
struct link_key *key;
- list_for_each_entry_rcu(key, &hdev->link_keys, list) {
+ list_for_each_entry(key, &hdev->link_keys, list) {
list_del_rcu(&key->list);
kfree_rcu(key, rcu);
}
@@ -2295,7 +2323,7 @@ void hci_smp_ltks_clear(struct hci_dev *hdev)
{
struct smp_ltk *k;
- list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
+ list_for_each_entry(k, &hdev->long_term_keys, list) {
list_del_rcu(&k->list);
kfree_rcu(k, rcu);
}
@@ -2305,7 +2333,7 @@ void hci_smp_irks_clear(struct hci_dev *hdev)
{
struct smp_irk *k;
- list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
+ list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
list_del_rcu(&k->list);
kfree_rcu(k, rcu);
}
@@ -2315,7 +2343,7 @@ void hci_blocked_keys_clear(struct hci_dev *hdev)
{
struct blocked_key *b;
- list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
+ list_for_each_entry(b, &hdev->blocked_keys, list) {
list_del_rcu(&b->list);
kfree_rcu(b, rcu);
}
@@ -2327,7 +2355,7 @@ bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
struct blocked_key *b;
rcu_read_lock();
- list_for_each_entry(b, &hdev->blocked_keys, list) {
+ list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
blocked = true;
break;
@@ -3241,6 +3269,94 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
}
}
+static int hci_suspend_wait_event(struct hci_dev *hdev)
+{
+#define WAKE_COND \
+ (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
+ __SUSPEND_NUM_TASKS)
+
+ int i;
+ int ret = wait_event_timeout(hdev->suspend_wait_q,
+ WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
+
+ if (ret == 0) {
+ bt_dev_dbg(hdev, "Timed out waiting for suspend");
+ for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
+ if (test_bit(i, hdev->suspend_tasks))
+ bt_dev_dbg(hdev, "Bit %d is set", i);
+ clear_bit(i, hdev->suspend_tasks);
+ }
+
+ ret = -ETIMEDOUT;
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void hci_prepare_suspend(struct work_struct *work)
+{
+ struct hci_dev *hdev =
+ container_of(work, struct hci_dev, suspend_prepare);
+
+ hci_dev_lock(hdev);
+ hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
+ hci_dev_unlock(hdev);
+}
+
+static int hci_change_suspend_state(struct hci_dev *hdev,
+ enum suspended_state next)
+{
+ hdev->suspend_state_next = next;
+ set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
+ queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
+ return hci_suspend_wait_event(hdev);
+}
+
+static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct hci_dev *hdev =
+ container_of(nb, struct hci_dev, suspend_notifier);
+ int ret = 0;
+
+ /* If powering down, wait for completion. */
+ if (mgmt_powering_down(hdev)) {
+ set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
+ ret = hci_suspend_wait_event(hdev);
+ if (ret)
+ goto done;
+ }
+
+ /* Suspend notifier should only act on events when powered. */
+ if (!hdev_is_powered(hdev))
+ goto done;
+
+ if (action == PM_SUSPEND_PREPARE) {
+ /* Suspend consists of two actions:
+ * - First, disconnect everything and make the controller not
+ * connectable (disabling scanning)
+ * - Second, program event filter/whitelist and enable scan
+ */
+ ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
+
+ /* Only configure whitelist if disconnect succeeded */
+ if (!ret)
+ ret = hci_change_suspend_state(hdev,
+ BT_SUSPEND_COMPLETE);
+ } else if (action == PM_POST_SUSPEND) {
+ ret = hci_change_suspend_state(hdev, BT_RUNNING);
+ }
+
+ /* If suspend failed, restore it to running */
+ if (ret && action == PM_SUSPEND_PREPARE)
+ hci_change_suspend_state(hdev, BT_RUNNING);
+
+done:
+ return ret ? notifier_from_errno(-EBUSY) : NOTIFY_STOP;
+}
+
/* Alloc HCI device */
struct hci_dev *hci_alloc_dev(void)
{
@@ -3299,6 +3415,7 @@ struct hci_dev *hci_alloc_dev(void)
INIT_LIST_HEAD(&hdev->mgmt_pending);
INIT_LIST_HEAD(&hdev->blacklist);
INIT_LIST_HEAD(&hdev->whitelist);
+ INIT_LIST_HEAD(&hdev->wakeable);
INIT_LIST_HEAD(&hdev->uuids);
INIT_LIST_HEAD(&hdev->link_keys);
INIT_LIST_HEAD(&hdev->long_term_keys);
@@ -3318,6 +3435,7 @@ struct hci_dev *hci_alloc_dev(void)
INIT_WORK(&hdev->tx_work, hci_tx_work);
INIT_WORK(&hdev->power_on, hci_power_on);
INIT_WORK(&hdev->error_reset, hci_error_reset);
+ INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
@@ -3326,6 +3444,7 @@ struct hci_dev *hci_alloc_dev(void)
skb_queue_head_init(&hdev->raw_q);
init_waitqueue_head(&hdev->req_wait_q);
+ init_waitqueue_head(&hdev->suspend_wait_q);
INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
@@ -3437,6 +3556,11 @@ int hci_register_dev(struct hci_dev *hdev)
hci_sock_dev_event(hdev, HCI_DEV_REG);
hci_dev_hold(hdev);
+ hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
+ error = register_pm_notifier(&hdev->suspend_notifier);
+ if (error)
+ goto err_wqueue;
+
queue_work(hdev->req_workqueue, &hdev->power_on);
return id;
@@ -3470,6 +3594,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_dev_do_close(hdev);
+ unregister_pm_notifier(&hdev->suspend_notifier);
+
if (!test_bit(HCI_INIT, &hdev->flags) &&
!hci_dev_test_flag(hdev, HCI_SETUP) &&
!hci_dev_test_flag(hdev, HCI_CONFIG)) {
@@ -4387,13 +4513,16 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_sco_hdr *hdr = (void *) skb->data;
struct hci_conn *conn;
- __u16 handle;
+ __u16 handle, flags;
skb_pull(skb, HCI_SCO_HDR_SIZE);
handle = __le16_to_cpu(hdr->handle);
+ flags = hci_flags(handle);
+ handle = hci_handle(handle);
- BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
+ BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
+ handle, flags);
hdev->stat.sco_rx++;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 6ddc4a74a5e4..0a591be8b0ae 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -901,6 +901,37 @@ static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
hdev->inq_tx_power = rp->tx_power;
}
+static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+ if (rp->status)
+ return;
+
+ hdev->err_data_reporting = rp->err_data_reporting;
+}
+
+static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
+ struct sk_buff *skb)
+{
+ __u8 status = *((__u8 *)skb->data);
+ struct hci_cp_write_def_err_data_reporting *cp;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
+ if (!cp)
+ return;
+
+ hdev->err_data_reporting = cp->err_data_reporting;
+}
+
static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_pin_code_reply *rp = (void *) skb->data;
@@ -2202,10 +2233,22 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
- if (conn)
+ if (conn) {
+ u8 type = conn->type;
+
mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
conn->dst_type, status);
+ /* If the disconnection failed for any reason, the upper layer
+ * does not retry to disconnect in current implementation.
+ * Hence, we need to do some basic cleanup here and re-enable
+ * advertising if necessary.
+ */
+ hci_conn_del(conn);
+ if (type == LE_LINK)
+ hci_req_reenable_advertising(hdev);
+ }
+
hci_dev_unlock(hdev);
}
@@ -2474,6 +2517,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_conn_complete *ev = (void *) skb->data;
+ struct inquiry_entry *ie;
struct hci_conn *conn;
BT_DBG("%s", hdev->name);
@@ -2482,14 +2526,30 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
if (!conn) {
- if (ev->link_type != SCO_LINK)
- goto unlock;
+ /* Connection may not exist if auto-connected. Check the inquiry
+ * cache to see if we've already discovered this bdaddr before.
+ * If found and link is an ACL type, create a connection class
+ * automatically.
+ */
+ ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
+ if (ie && ev->link_type == ACL_LINK) {
+ conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
+ HCI_ROLE_SLAVE);
+ if (!conn) {
+ bt_dev_err(hdev, "no memory for new conn");
+ goto unlock;
+ }
+ } else {
+ if (ev->link_type != SCO_LINK)
+ goto unlock;
- conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
- if (!conn)
- goto unlock;
+ conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
+ &ev->bdaddr);
+ if (!conn)
+ goto unlock;
- conn->type = SCO_LINK;
+ conn->type = SCO_LINK;
+ }
}
if (!ev->status) {
@@ -2743,6 +2803,14 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_disconn_cfm(conn, ev->reason);
hci_conn_del(conn);
+ /* The suspend notifier is waiting for all devices to disconnect so
+ * clear the bit from pending tasks and inform the wait queue.
+ */
+ if (list_empty(&hdev->conn_hash.list) &&
+ test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
+ wake_up(&hdev->suspend_wait_q);
+ }
+
/* Re-enable advertising if necessary, since it might
* have been disabled by the connection. From the
* HCI_LE_Set_Advertise_Enable command description in
@@ -2895,14 +2963,14 @@ static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
if (!conn)
goto unlock;
- /* If we fail to read the encryption key size, assume maximum
- * (which is the same we do also when this HCI command isn't
- * supported.
+ /* While unexpected, the read_enc_key_size command may fail. The most
+ * secure approach is to then assume the key size is 0 to force a
+ * disconnection.
*/
if (rp->status) {
bt_dev_err(hdev, "failed to read key size for handle %u",
handle);
- conn->enc_key_size = HCI_LINK_KEY_SIZE;
+ conn->enc_key_size = 0;
} else {
conn->enc_key_size = rp->key_size;
}
@@ -3302,6 +3370,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cc_read_inq_rsp_tx_power(hdev, skb);
break;
+ case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
+ hci_cc_read_def_err_data_reporting(hdev, skb);
+ break;
+
+ case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
+ hci_cc_write_def_err_data_reporting(hdev, skb);
+ break;
+
case HCI_OP_PIN_CODE_REPLY:
hci_cc_pin_code_reply(hdev, skb);
break;
@@ -4557,6 +4633,16 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
goto confirm;
}
+ /* If there already exists link key in local host, leave the
+ * decision to user space since the remote device could be
+ * legitimate or malicious.
+ */
+ if (hci_find_link_key(hdev, &ev->bdaddr)) {
+ bt_dev_dbg(hdev, "Local host already has link key");
+ confirm_hint = 1;
+ goto confirm;
+ }
+
BT_DBG("Auto-accept of user confirmation with %ums delay",
hdev->auto_accept_delay);
@@ -5858,6 +5944,11 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
u8 status = 0, event = hdr->evt, req_evt = 0;
u16 opcode = HCI_OP_NOP;
+ if (!event) {
+ bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
+ goto done;
+ }
+
if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
opcode = __le16_to_cpu(cmd_hdr->opcode);
@@ -6069,6 +6160,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
req_complete_skb(hdev, status, opcode, orig_skb);
}
+done:
kfree_skb(orig_skb);
kfree_skb(skb);
hdev->stat.evt_rx++;
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 2a1b64dbf76e..649e1e5ed446 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -34,6 +34,9 @@
#define HCI_REQ_PEND 1
#define HCI_REQ_CANCELED 2
+#define LE_SUSPEND_SCAN_WINDOW 0x0012
+#define LE_SUSPEND_SCAN_INTERVAL 0x0060
+
void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
{
skb_queue_head_init(&req->cmd_q);
@@ -654,6 +657,11 @@ void hci_req_add_le_scan_disable(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
+ if (hdev->scanning_paused) {
+ bt_dev_dbg(hdev, "Scanning is paused for suspend");
+ return;
+ }
+
if (use_ext_scan(hdev)) {
struct hci_cp_le_set_ext_scan_enable cp;
@@ -670,15 +678,55 @@ void hci_req_add_le_scan_disable(struct hci_request *req)
}
}
-static void add_to_white_list(struct hci_request *req,
- struct hci_conn_params *params)
+static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
+ u8 bdaddr_type)
+{
+ struct hci_cp_le_del_from_white_list cp;
+
+ cp.bdaddr_type = bdaddr_type;
+ bacpy(&cp.bdaddr, bdaddr);
+
+ bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
+ cp.bdaddr_type);
+ hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
+}
+
+/* Adds connection to white list if needed. On error, returns -1. */
+static int add_to_white_list(struct hci_request *req,
+ struct hci_conn_params *params, u8 *num_entries,
+ bool allow_rpa)
{
struct hci_cp_le_add_to_white_list cp;
+ struct hci_dev *hdev = req->hdev;
+
+ /* Already in white list */
+ if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
+ params->addr_type))
+ return 0;
+
+ /* Select filter policy to accept all advertising */
+ if (*num_entries >= hdev->le_white_list_size)
+ return -1;
+
+ /* White list can not be used with RPAs */
+ if (!allow_rpa &&
+ hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
+ return -1;
+ }
+ /* During suspend, only wakeable devices can be in whitelist */
+ if (hdev->suspended && !params->wakeable)
+ return 0;
+
+ *num_entries += 1;
cp.bdaddr_type = params->addr_type;
bacpy(&cp.bdaddr, &params->addr);
+ bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
+ cp.bdaddr_type);
hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
+
+ return 0;
}
static u8 update_white_list(struct hci_request *req)
@@ -686,7 +734,14 @@ static u8 update_white_list(struct hci_request *req)
struct hci_dev *hdev = req->hdev;
struct hci_conn_params *params;
struct bdaddr_list *b;
- uint8_t white_list_entries = 0;
+ u8 num_entries = 0;
+ bool pend_conn, pend_report;
+ /* We allow whitelisting even with RPAs in suspend. In the worst case,
+ * we won't be able to wake from devices that use the privacy1.2
+ * features. Additionally, once we support privacy1.2 and IRK
+ * offloading, we can update this to also check for those conditions.
+ */
+ bool allow_rpa = hdev->suspended;
/* Go through the current white list programmed into the
* controller one by one and check if that address is still
@@ -695,29 +750,28 @@ static u8 update_white_list(struct hci_request *req)
* command to remove it from the controller.
*/
list_for_each_entry(b, &hdev->le_white_list, list) {
- /* If the device is neither in pend_le_conns nor
- * pend_le_reports then remove it from the whitelist.
+ pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
+ &b->bdaddr,
+ b->bdaddr_type);
+ pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
+ &b->bdaddr,
+ b->bdaddr_type);
+
+ /* If the device is not likely to connect or report,
+ * remove it from the whitelist.
*/
- if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
- &b->bdaddr, b->bdaddr_type) &&
- !hci_pend_le_action_lookup(&hdev->pend_le_reports,
- &b->bdaddr, b->bdaddr_type)) {
- struct hci_cp_le_del_from_white_list cp;
-
- cp.bdaddr_type = b->bdaddr_type;
- bacpy(&cp.bdaddr, &b->bdaddr);
-
- hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
- sizeof(cp), &cp);
+ if (!pend_conn && !pend_report) {
+ del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
continue;
}
- if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
- /* White list can not be used with RPAs */
+ /* White list can not be used with RPAs */
+ if (!allow_rpa &&
+ hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
return 0x00;
}
- white_list_entries++;
+ num_entries++;
}
/* Since all no longer valid white list entries have been
@@ -731,47 +785,17 @@ static u8 update_white_list(struct hci_request *req)
* white list.
*/
list_for_each_entry(params, &hdev->pend_le_conns, action) {
- if (hci_bdaddr_list_lookup(&hdev->le_white_list,
- &params->addr, params->addr_type))
- continue;
-
- if (white_list_entries >= hdev->le_white_list_size) {
- /* Select filter policy to accept all advertising */
+ if (add_to_white_list(req, params, &num_entries, allow_rpa))
return 0x00;
- }
-
- if (hci_find_irk_by_addr(hdev, &params->addr,
- params->addr_type)) {
- /* White list can not be used with RPAs */
- return 0x00;
- }
-
- white_list_entries++;
- add_to_white_list(req, params);
}
/* After adding all new pending connections, walk through
* the list of pending reports and also add these to the
- * white list if there is still space.
+ * white list if there is still space. Abort if space runs out.
*/
list_for_each_entry(params, &hdev->pend_le_reports, action) {
- if (hci_bdaddr_list_lookup(&hdev->le_white_list,
- &params->addr, params->addr_type))
- continue;
-
- if (white_list_entries >= hdev->le_white_list_size) {
- /* Select filter policy to accept all advertising */
- return 0x00;
- }
-
- if (hci_find_irk_by_addr(hdev, &params->addr,
- params->addr_type)) {
- /* White list can not be used with RPAs */
+ if (add_to_white_list(req, params, &num_entries, allow_rpa))
return 0x00;
- }
-
- white_list_entries++;
- add_to_white_list(req, params);
}
/* Select filter policy to use white list */
@@ -866,6 +890,12 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
struct hci_dev *hdev = req->hdev;
u8 own_addr_type;
u8 filter_policy;
+ u8 window, interval;
+
+ if (hdev->scanning_paused) {
+ bt_dev_dbg(hdev, "Scanning is paused for suspend");
+ return;
+ }
/* Set require_privacy to false since no SCAN_REQ are send
* during passive scanning. Not using an non-resolvable address
@@ -896,8 +926,17 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
(hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
filter_policy |= 0x02;
- hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
- hdev->le_scan_window, own_addr_type, filter_policy);
+ if (hdev->suspended) {
+ window = LE_SUSPEND_SCAN_WINDOW;
+ interval = LE_SUSPEND_SCAN_INTERVAL;
+ } else {
+ window = hdev->le_scan_window;
+ interval = hdev->le_scan_interval;
+ }
+
+ bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
+ hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
+ own_addr_type, filter_policy);
}
static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
@@ -918,6 +957,187 @@ static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
return adv_instance->scan_rsp_len;
}
+static void hci_req_clear_event_filter(struct hci_request *req)
+{
+ struct hci_cp_set_event_filter f;
+
+ memset(&f, 0, sizeof(f));
+ f.flt_type = HCI_FLT_CLEAR_ALL;
+ hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
+
+ /* Update page scan state (since we may have modified it when setting
+ * the event filter).
+ */
+ __hci_req_update_scan(req);
+}
+
+static void hci_req_set_event_filter(struct hci_request *req)
+{
+ struct bdaddr_list *b;
+ struct hci_cp_set_event_filter f;
+ struct hci_dev *hdev = req->hdev;
+ u8 scan;
+
+ /* Always clear event filter when starting */
+ hci_req_clear_event_filter(req);
+
+ list_for_each_entry(b, &hdev->wakeable, list) {
+ memset(&f, 0, sizeof(f));
+ bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
+ f.flt_type = HCI_FLT_CONN_SETUP;
+ f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
+ f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
+
+ bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
+ hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
+ }
+
+ scan = !list_empty(&hdev->wakeable) ? SCAN_PAGE : SCAN_DISABLED;
+ hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+}
+
+static void hci_req_config_le_suspend_scan(struct hci_request *req)
+{
+ /* Can't change params without disabling first */
+ hci_req_add_le_scan_disable(req);
+
+ /* Configure params and enable scanning */
+ hci_req_add_le_passive_scan(req);
+
+ /* Block suspend notifier on response */
+ set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
+}
+
+static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+{
+ bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
+ status);
+ if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
+ test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
+ wake_up(&hdev->suspend_wait_q);
+ }
+}
+
+/* Call with hci_dev_lock */
+void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
+{
+ int old_state;
+ struct hci_conn *conn;
+ struct hci_request req;
+ u8 page_scan;
+ int disconnect_counter;
+
+ if (next == hdev->suspend_state) {
+ bt_dev_dbg(hdev, "Same state before and after: %d", next);
+ goto done;
+ }
+
+ hdev->suspend_state = next;
+ hci_req_init(&req, hdev);
+
+ if (next == BT_SUSPEND_DISCONNECT) {
+ /* Mark device as suspended */
+ hdev->suspended = true;
+
+ /* Pause discovery if not already stopped */
+ old_state = hdev->discovery.state;
+ if (old_state != DISCOVERY_STOPPED) {
+ set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
+ queue_work(hdev->req_workqueue, &hdev->discov_update);
+ }
+
+ hdev->discovery_paused = true;
+ hdev->discovery_old_state = old_state;
+
+ /* Stop advertising */
+ old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
+ if (old_state) {
+ set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
+ cancel_delayed_work(&hdev->discov_off);
+ queue_delayed_work(hdev->req_workqueue,
+ &hdev->discov_off, 0);
+ }
+
+ hdev->advertising_paused = true;
+ hdev->advertising_old_state = old_state;
+ /* Disable page scan */
+ page_scan = SCAN_DISABLED;
+ hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
+
+ /* Disable LE passive scan */
+ hci_req_add_le_scan_disable(&req);
+
+ /* Mark task needing completion */
+ set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
+
+ /* Prevent disconnects from causing scanning to be re-enabled */
+ hdev->scanning_paused = true;
+
+ /* Run commands before disconnecting */
+ hci_req_run(&req, suspend_req_complete);
+
+ disconnect_counter = 0;
+ /* Soft disconnect everything (power off) */
+ list_for_each_entry(conn, &hdev->conn_hash.list, list) {
+ hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
+ disconnect_counter++;
+ }
+
+ if (disconnect_counter > 0) {
+ bt_dev_dbg(hdev,
+ "Had %d disconnects. Will wait on them",
+ disconnect_counter);
+ set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
+ }
+ } else if (next == BT_SUSPEND_COMPLETE) {
+ /* Unpause to take care of updating scanning params */
+ hdev->scanning_paused = false;
+ /* Enable event filter for paired devices */
+ hci_req_set_event_filter(&req);
+ /* Enable passive scan at lower duty cycle */
+ hci_req_config_le_suspend_scan(&req);
+ /* Pause scan changes again. */
+ hdev->scanning_paused = true;
+ hci_req_run(&req, suspend_req_complete);
+ } else {
+ hdev->suspended = false;
+ hdev->scanning_paused = false;
+
+ hci_req_clear_event_filter(&req);
+ /* Reset passive/background scanning to normal */
+ hci_req_config_le_suspend_scan(&req);
+
+ /* Unpause advertising */
+ hdev->advertising_paused = false;
+ if (hdev->advertising_old_state) {
+ set_bit(SUSPEND_UNPAUSE_ADVERTISING,
+ hdev->suspend_tasks);
+ hci_dev_set_flag(hdev, HCI_ADVERTISING);
+ queue_work(hdev->req_workqueue,
+ &hdev->discoverable_update);
+ hdev->advertising_old_state = 0;
+ }
+
+ /* Unpause discovery */
+ hdev->discovery_paused = false;
+ if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
+ hdev->discovery_old_state != DISCOVERY_STOPPING) {
+ set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
+ hci_discovery_set_state(hdev, DISCOVERY_STARTING);
+ queue_work(hdev->req_workqueue, &hdev->discov_update);
+ }
+
+ hci_req_run(&req, suspend_req_complete);
+ }
+
+ hdev->suspend_state = next;
+
+done:
+ clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
+ wake_up(&hdev->suspend_wait_q);
+}
+
static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
{
u8 instance = hdev->cur_adv_instance;
@@ -1499,7 +1719,7 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
if (err < 0) {
- BT_ERR("%s failed to generate new RPA", hdev->name);
+ bt_dev_err(hdev, "failed to generate new RPA");
return err;
}
@@ -2015,6 +2235,9 @@ void __hci_req_update_scan(struct hci_request *req)
if (mgmt_powering_down(hdev))
return;
+ if (hdev->scanning_paused)
+ return;
+
if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
disconnected_whitelist_entries(hdev))
scan = SCAN_PAGE;
@@ -2504,23 +2727,6 @@ static int active_scan(struct hci_request *req, unsigned long opt)
BT_DBG("%s", hdev->name);
- if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
- hci_dev_lock(hdev);
-
- /* Don't let discovery abort an outgoing connection attempt
- * that's using directed advertising.
- */
- if (hci_lookup_le_connect(hdev)) {
- hci_dev_unlock(hdev);
- return -EBUSY;
- }
-
- cancel_adv_timeout(hdev);
- hci_dev_unlock(hdev);
-
- __hci_req_disable_advertising(req);
- }
-
/* If controller is scanning, it means the background scanning is
* running. Thus, we should temporarily stop it in order to set the
* discovery scanning parameters.
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index a7019fbeadd3..0e81614d235e 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -68,6 +68,8 @@ void __hci_req_update_eir(struct hci_request *req);
void hci_req_add_le_scan_disable(struct hci_request *req);
void hci_req_add_le_passive_scan(struct hci_request *req);
+void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next);
+
void hci_req_reenable_advertising(struct hci_dev *hdev);
void __hci_req_enable_advertising(struct hci_request *req);
void __hci_req_disable_advertising(struct hci_request *req);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index bef84b95e2c4..3b4fa27a44e6 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -1279,7 +1279,7 @@ static int hidp_session_thread(void *arg)
add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
/* This memory barrier is paired with wq_has_sleeper(). See
* sock_poll_wait() for more information why this is needed. */
- smp_mb();
+ smp_mb__before_atomic();
/* notify synchronous startup that we're ready */
atomic_inc(&session->state);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 195459a1e53e..fd9d0d08f9c9 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -45,6 +45,7 @@
#define LE_FLOWCTL_MAX_CREDITS 65535
bool disable_ertm;
+bool enable_ecred;
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
@@ -419,6 +420,9 @@ static void l2cap_chan_timeout(struct work_struct *work)
BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
mutex_lock(&conn->chan_lock);
+ /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
+ * this work. No need to call l2cap_chan_hold(chan) here again.
+ */
l2cap_chan_lock(chan);
if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
@@ -431,12 +435,12 @@ static void l2cap_chan_timeout(struct work_struct *work)
l2cap_chan_close(chan, reason);
- l2cap_chan_unlock(chan);
-
chan->ops->close(chan);
- mutex_unlock(&conn->chan_lock);
+ l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
+
+ mutex_unlock(&conn->chan_lock);
}
struct l2cap_chan *l2cap_chan_create(void)
@@ -532,6 +536,17 @@ static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
skb_queue_head_init(&chan->tx_q);
}
+static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
+{
+ l2cap_le_flowctl_init(chan, tx_credits);
+
+ /* L2CAP implementations shall support a minimum MPS of 64 octets */
+ if (chan->mps < L2CAP_ECRED_MIN_MPS) {
+ chan->mps = L2CAP_ECRED_MIN_MPS;
+ chan->rx_credits = (chan->imtu / chan->mps) + 1;
+ }
+}
+
void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
@@ -638,6 +653,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
break;
case L2CAP_MODE_LE_FLOWCTL:
+ case L2CAP_MODE_EXT_FLOWCTL:
skb_queue_purge(&chan->tx_q);
break;
@@ -662,6 +678,29 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
}
EXPORT_SYMBOL_GPL(l2cap_chan_del);
+static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
+ void *data)
+{
+ struct l2cap_chan *chan;
+
+ list_for_each_entry(chan, &conn->chan_l, list) {
+ func(chan, data);
+ }
+}
+
+void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
+ void *data)
+{
+ if (!conn)
+ return;
+
+ mutex_lock(&conn->chan_lock);
+ __l2cap_chan_list(conn, func, data);
+ mutex_unlock(&conn->chan_lock);
+}
+
+EXPORT_SYMBOL_GPL(l2cap_chan_list);
+
static void l2cap_conn_update_id_addr(struct work_struct *work)
{
struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
@@ -704,6 +743,27 @@ static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
&rsp);
}
+static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct l2cap_ecred_conn_rsp rsp;
+ u16 result;
+
+ if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
+ result = L2CAP_CR_LE_AUTHORIZATION;
+ else
+ result = L2CAP_CR_LE_BAD_PSM;
+
+ l2cap_state_change(chan, BT_DISCONN);
+
+ memset(&rsp, 0, sizeof(rsp));
+
+ rsp.result = cpu_to_le16(result);
+
+ l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
+ &rsp);
+}
+
static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
@@ -749,8 +809,16 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
if (conn->hcon->type == ACL_LINK)
l2cap_chan_connect_reject(chan);
- else if (conn->hcon->type == LE_LINK)
- l2cap_chan_le_connect_reject(chan);
+ else if (conn->hcon->type == LE_LINK) {
+ switch (chan->mode) {
+ case L2CAP_MODE_LE_FLOWCTL:
+ l2cap_chan_le_connect_reject(chan);
+ break;
+ case L2CAP_MODE_EXT_FLOWCTL:
+ l2cap_chan_ecred_connect_reject(chan);
+ break;
+ }
+ }
}
l2cap_chan_del(chan, reason);
@@ -1273,8 +1341,13 @@ static void l2cap_chan_ready(struct l2cap_chan *chan)
chan->conf_state = 0;
__clear_chan_timer(chan);
- if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
- chan->ops->suspend(chan);
+ switch (chan->mode) {
+ case L2CAP_MODE_LE_FLOWCTL:
+ case L2CAP_MODE_EXT_FLOWCTL:
+ if (!chan->tx_credits)
+ chan->ops->suspend(chan);
+ break;
+ }
chan->state = BT_CONNECTED;
@@ -1306,6 +1379,81 @@ static void l2cap_le_connect(struct l2cap_chan *chan)
sizeof(req), &req);
}
+struct l2cap_ecred_conn_data {
+ struct {
+ struct l2cap_ecred_conn_req req;
+ __le16 scid[5];
+ } __packed pdu;
+ struct l2cap_chan *chan;
+ struct pid *pid;
+ int count;
+};
+
+static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
+{
+ struct l2cap_ecred_conn_data *conn = data;
+ struct pid *pid;
+
+ if (chan == conn->chan)
+ return;
+
+ if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
+ return;
+
+ pid = chan->ops->get_peer_pid(chan);
+
+ /* Only add deferred channels with the same PID/PSM */
+ if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
+ chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
+ return;
+
+ if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
+ return;
+
+ l2cap_ecred_init(chan, 0);
+
+ /* Set the same ident so we can match on the rsp */
+ chan->ident = conn->chan->ident;
+
+ /* Include all channels deferred */
+ conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
+
+ conn->count++;
+}
+
+static void l2cap_ecred_connect(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct l2cap_ecred_conn_data data;
+
+ if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
+ return;
+
+ if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
+ return;
+
+ l2cap_ecred_init(chan, 0);
+
+ data.pdu.req.psm = chan->psm;
+ data.pdu.req.mtu = cpu_to_le16(chan->imtu);
+ data.pdu.req.mps = cpu_to_le16(chan->mps);
+ data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
+ data.pdu.scid[0] = cpu_to_le16(chan->scid);
+
+ chan->ident = l2cap_get_ident(conn);
+ data.pid = chan->ops->get_peer_pid(chan);
+
+ data.count = 1;
+ data.chan = chan;
+ data.pid = chan->ops->get_peer_pid(chan);
+
+ __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
+
+ l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
+ sizeof(data.pdu.req) + data.count * sizeof(__le16),
+ &data.pdu);
+}
+
static void l2cap_le_start(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
@@ -1318,8 +1466,12 @@ static void l2cap_le_start(struct l2cap_chan *chan)
return;
}
- if (chan->state == BT_CONNECT)
- l2cap_le_connect(chan);
+ if (chan->state == BT_CONNECT) {
+ if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
+ l2cap_ecred_connect(chan);
+ else
+ l2cap_le_connect(chan);
+ }
}
static void l2cap_start_connection(struct l2cap_chan *chan)
@@ -1737,9 +1889,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
l2cap_chan_del(chan, err);
- l2cap_chan_unlock(chan);
-
chan->ops->close(chan);
+
+ l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
}
@@ -2505,6 +2657,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
switch (chan->mode) {
case L2CAP_MODE_LE_FLOWCTL:
+ case L2CAP_MODE_EXT_FLOWCTL:
/* Check outgoing MTU */
if (len > chan->omtu)
return -EMSGSIZE;
@@ -3773,6 +3926,45 @@ void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
&rsp);
}
+void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
+{
+ struct {
+ struct l2cap_ecred_conn_rsp rsp;
+ __le16 dcid[5];
+ } __packed pdu;
+ struct l2cap_conn *conn = chan->conn;
+ u16 ident = chan->ident;
+ int i = 0;
+
+ if (!ident)
+ return;
+
+ BT_DBG("chan %p ident %d", chan, ident);
+
+ pdu.rsp.mtu = cpu_to_le16(chan->imtu);
+ pdu.rsp.mps = cpu_to_le16(chan->mps);
+ pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
+ pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
+
+ mutex_lock(&conn->chan_lock);
+
+ list_for_each_entry(chan, &conn->chan_l, list) {
+ if (chan->ident != ident)
+ continue;
+
+ /* Reset ident so only one response is sent */
+ chan->ident = 0;
+
+ /* Include all channels pending with the same ident */
+ pdu.dcid[i++] = cpu_to_le16(chan->scid);
+ }
+
+ mutex_unlock(&conn->chan_lock);
+
+ l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
+ sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
+}
+
void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
{
struct l2cap_conn_rsp rsp;
@@ -4181,7 +4373,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
return 0;
}
- if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
+ if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
+ chan->state != BT_CONNECTED) {
cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
chan->dcid);
goto unlock;
@@ -4405,6 +4598,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
return 0;
}
+ l2cap_chan_hold(chan);
l2cap_chan_lock(chan);
rsp.dcid = cpu_to_le16(chan->scid);
@@ -4413,12 +4607,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
chan->ops->set_shutdown(chan);
- l2cap_chan_hold(chan);
l2cap_chan_del(chan, ECONNRESET);
- l2cap_chan_unlock(chan);
-
chan->ops->close(chan);
+
+ l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
@@ -4450,20 +4643,21 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
return 0;
}
+ l2cap_chan_hold(chan);
l2cap_chan_lock(chan);
if (chan->state != BT_DISCONN) {
l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
return 0;
}
- l2cap_chan_hold(chan);
l2cap_chan_del(chan, 0);
- l2cap_chan_unlock(chan);
-
chan->ops->close(chan);
+
+ l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
@@ -5714,6 +5908,356 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
return 0;
}
+static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ struct l2cap_ecred_conn_req *req = (void *) data;
+ struct {
+ struct l2cap_ecred_conn_rsp rsp;
+ __le16 dcid[5];
+ } __packed pdu;
+ struct l2cap_chan *chan, *pchan;
+ u16 mtu, mps;
+ __le16 psm;
+ u8 result, len = 0;
+ int i, num_scid;
+ bool defer = false;
+
+ if (!enable_ecred)
+ return -EINVAL;
+
+ if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
+ result = L2CAP_CR_LE_INVALID_PARAMS;
+ goto response;
+ }
+
+ mtu = __le16_to_cpu(req->mtu);
+ mps = __le16_to_cpu(req->mps);
+
+ if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
+ result = L2CAP_CR_LE_UNACCEPT_PARAMS;
+ goto response;
+ }
+
+ psm = req->psm;
+
+ BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
+
+ memset(&pdu, 0, sizeof(pdu));
+
+ /* Check if we have socket listening on psm */
+ pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
+ &conn->hcon->dst, LE_LINK);
+ if (!pchan) {
+ result = L2CAP_CR_LE_BAD_PSM;
+ goto response;
+ }
+
+ mutex_lock(&conn->chan_lock);
+ l2cap_chan_lock(pchan);
+
+ if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
+ SMP_ALLOW_STK)) {
+ result = L2CAP_CR_LE_AUTHENTICATION;
+ goto unlock;
+ }
+
+ result = L2CAP_CR_LE_SUCCESS;
+ cmd_len -= sizeof(req);
+ num_scid = cmd_len / sizeof(u16);
+
+ for (i = 0; i < num_scid; i++) {
+ u16 scid = __le16_to_cpu(req->scid[i]);
+
+ BT_DBG("scid[%d] 0x%4.4x", i, scid);
+
+ pdu.dcid[i] = 0x0000;
+ len += sizeof(*pdu.dcid);
+
+ /* Check for valid dynamic CID range */
+ if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
+ result = L2CAP_CR_LE_INVALID_SCID;
+ continue;
+ }
+
+ /* Check if we already have channel with that dcid */
+ if (__l2cap_get_chan_by_dcid(conn, scid)) {
+ result = L2CAP_CR_LE_SCID_IN_USE;
+ continue;
+ }
+
+ chan = pchan->ops->new_connection(pchan);
+ if (!chan) {
+ result = L2CAP_CR_LE_NO_MEM;
+ continue;
+ }
+
+ bacpy(&chan->src, &conn->hcon->src);
+ bacpy(&chan->dst, &conn->hcon->dst);
+ chan->src_type = bdaddr_src_type(conn->hcon);
+ chan->dst_type = bdaddr_dst_type(conn->hcon);
+ chan->psm = psm;
+ chan->dcid = scid;
+ chan->omtu = mtu;
+ chan->remote_mps = mps;
+
+ __l2cap_chan_add(conn, chan);
+
+ l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
+
+ /* Init response */
+ if (!pdu.rsp.credits) {
+ pdu.rsp.mtu = cpu_to_le16(chan->imtu);
+ pdu.rsp.mps = cpu_to_le16(chan->mps);
+ pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
+ }
+
+ pdu.dcid[i] = cpu_to_le16(chan->scid);
+
+ __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
+
+ chan->ident = cmd->ident;
+
+ if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
+ l2cap_state_change(chan, BT_CONNECT2);
+ defer = true;
+ chan->ops->defer(chan);
+ } else {
+ l2cap_chan_ready(chan);
+ }
+ }
+
+unlock:
+ l2cap_chan_unlock(pchan);
+ mutex_unlock(&conn->chan_lock);
+ l2cap_chan_put(pchan);
+
+response:
+ pdu.rsp.result = cpu_to_le16(result);
+
+ if (defer)
+ return 0;
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
+ sizeof(pdu.rsp) + len, &pdu);
+
+ return 0;
+}
+
+static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ struct l2cap_ecred_conn_rsp *rsp = (void *) data;
+ struct hci_conn *hcon = conn->hcon;
+ u16 mtu, mps, credits, result;
+ struct l2cap_chan *chan;
+ int err = 0, sec_level;
+ int i = 0;
+
+ if (cmd_len < sizeof(*rsp))
+ return -EPROTO;
+
+ mtu = __le16_to_cpu(rsp->mtu);
+ mps = __le16_to_cpu(rsp->mps);
+ credits = __le16_to_cpu(rsp->credits);
+ result = __le16_to_cpu(rsp->result);
+
+ BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
+ result);
+
+ mutex_lock(&conn->chan_lock);
+
+ cmd_len -= sizeof(*rsp);
+
+ list_for_each_entry(chan, &conn->chan_l, list) {
+ u16 dcid;
+
+ if (chan->ident != cmd->ident ||
+ chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
+ chan->state == BT_CONNECTED)
+ continue;
+
+ l2cap_chan_lock(chan);
+
+ /* Check that there is a dcid for each pending channel */
+ if (cmd_len < sizeof(dcid)) {
+ l2cap_chan_del(chan, ECONNREFUSED);
+ l2cap_chan_unlock(chan);
+ continue;
+ }
+
+ dcid = __le16_to_cpu(rsp->dcid[i++]);
+ cmd_len -= sizeof(u16);
+
+ BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
+
+ /* Check if dcid is already in use */
+ if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
+ /* If a device receives a
+ * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
+ * already-assigned Destination CID, then both the
+ * original channel and the new channel shall be
+ * immediately discarded and not used.
+ */
+ l2cap_chan_del(chan, ECONNREFUSED);
+ l2cap_chan_unlock(chan);
+ chan = __l2cap_get_chan_by_dcid(conn, dcid);
+ l2cap_chan_lock(chan);
+ l2cap_chan_del(chan, ECONNRESET);
+ l2cap_chan_unlock(chan);
+ continue;
+ }
+
+ switch (result) {
+ case L2CAP_CR_LE_AUTHENTICATION:
+ case L2CAP_CR_LE_ENCRYPTION:
+ /* If we already have MITM protection we can't do
+ * anything.
+ */
+ if (hcon->sec_level > BT_SECURITY_MEDIUM) {
+ l2cap_chan_del(chan, ECONNREFUSED);
+ break;
+ }
+
+ sec_level = hcon->sec_level + 1;
+ if (chan->sec_level < sec_level)
+ chan->sec_level = sec_level;
+
+ /* We'll need to send a new Connect Request */
+ clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
+
+ smp_conn_security(hcon, chan->sec_level);
+ break;
+
+ case L2CAP_CR_LE_BAD_PSM:
+ l2cap_chan_del(chan, ECONNREFUSED);
+ break;
+
+ default:
+ /* If dcid was not set it means channels was refused */
+ if (!dcid) {
+ l2cap_chan_del(chan, ECONNREFUSED);
+ break;
+ }
+
+ chan->ident = 0;
+ chan->dcid = dcid;
+ chan->omtu = mtu;
+ chan->remote_mps = mps;
+ chan->tx_credits = credits;
+ l2cap_chan_ready(chan);
+ break;
+ }
+
+ l2cap_chan_unlock(chan);
+ }
+
+ mutex_unlock(&conn->chan_lock);
+
+ return err;
+}
+
+static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ struct l2cap_ecred_reconf_req *req = (void *) data;
+ struct l2cap_ecred_reconf_rsp rsp;
+ u16 mtu, mps, result;
+ struct l2cap_chan *chan;
+ int i, num_scid;
+
+ if (!enable_ecred)
+ return -EINVAL;
+
+ if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
+ result = L2CAP_CR_LE_INVALID_PARAMS;
+ goto respond;
+ }
+
+ mtu = __le16_to_cpu(req->mtu);
+ mps = __le16_to_cpu(req->mps);
+
+ BT_DBG("mtu %u mps %u", mtu, mps);
+
+ if (mtu < L2CAP_ECRED_MIN_MTU) {
+ result = L2CAP_RECONF_INVALID_MTU;
+ goto respond;
+ }
+
+ if (mps < L2CAP_ECRED_MIN_MPS) {
+ result = L2CAP_RECONF_INVALID_MPS;
+ goto respond;
+ }
+
+ cmd_len -= sizeof(*req);
+ num_scid = cmd_len / sizeof(u16);
+ result = L2CAP_RECONF_SUCCESS;
+
+ for (i = 0; i < num_scid; i++) {
+ u16 scid;
+
+ scid = __le16_to_cpu(req->scid[i]);
+ if (!scid)
+ return -EPROTO;
+
+ chan = __l2cap_get_chan_by_dcid(conn, scid);
+ if (!chan)
+ continue;
+
+ /* If the MTU value is decreased for any of the included
+ * channels, then the receiver shall disconnect all
+ * included channels.
+ */
+ if (chan->omtu > mtu) {
+ BT_ERR("chan %p decreased MTU %u -> %u", chan,
+ chan->omtu, mtu);
+ result = L2CAP_RECONF_INVALID_MTU;
+ }
+
+ chan->omtu = mtu;
+ chan->remote_mps = mps;
+ }
+
+respond:
+ rsp.result = cpu_to_le16(result);
+
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
+ &rsp);
+
+ return 0;
+}
+
+static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
+ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+ u8 *data)
+{
+ struct l2cap_chan *chan;
+ struct l2cap_ecred_conn_rsp *rsp = (void *) data;
+ u16 result;
+
+ if (cmd_len < sizeof(*rsp))
+ return -EPROTO;
+
+ result = __le16_to_cpu(rsp->result);
+
+ BT_DBG("result 0x%4.4x", rsp->result);
+
+ if (!result)
+ return 0;
+
+ list_for_each_entry(chan, &conn->chan_l, list) {
+ if (chan->ident != cmd->ident)
+ continue;
+
+ l2cap_chan_del(chan, ECONNRESET);
+ }
+
+ return 0;
+}
+
static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
u8 *data)
@@ -5769,6 +6313,22 @@ static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
err = l2cap_le_credits(conn, cmd, cmd_len, data);
break;
+ case L2CAP_ECRED_CONN_REQ:
+ err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_ECRED_CONN_RSP:
+ err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_ECRED_RECONF_REQ:
+ err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_ECRED_RECONF_RSP:
+ err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
+ break;
+
case L2CAP_DISCONN_REQ:
err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
break;
@@ -5831,9 +6391,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
struct sk_buff *skb)
{
struct hci_conn *hcon = conn->hcon;
- u8 *data = skb->data;
- int len = skb->len;
- struct l2cap_cmd_hdr cmd;
+ struct l2cap_cmd_hdr *cmd;
int err;
l2cap_raw_recv(conn, skb);
@@ -5841,35 +6399,34 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
if (hcon->type != ACL_LINK)
goto drop;
- while (len >= L2CAP_CMD_HDR_SIZE) {
- u16 cmd_len;
- memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
- data += L2CAP_CMD_HDR_SIZE;
- len -= L2CAP_CMD_HDR_SIZE;
+ while (skb->len >= L2CAP_CMD_HDR_SIZE) {
+ u16 len;
+
+ cmd = (void *) skb->data;
+ skb_pull(skb, L2CAP_CMD_HDR_SIZE);
- cmd_len = le16_to_cpu(cmd.len);
+ len = le16_to_cpu(cmd->len);
- BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
- cmd.ident);
+ BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
+ cmd->ident);
- if (cmd_len > len || !cmd.ident) {
+ if (len > skb->len || !cmd->ident) {
BT_DBG("corrupted command");
break;
}
- err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
+ err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
if (err) {
struct l2cap_cmd_rej_unk rej;
BT_ERR("Wrong link type (%d)", err);
rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
- l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
sizeof(rej), &rej);
}
- data += cmd_len;
- len -= cmd_len;
+ skb_pull(skb, len);
}
drop:
@@ -6814,11 +7371,13 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
struct l2cap_le_credits pkt;
u16 return_credits;
- return_credits = ((chan->imtu / chan->mps) + 1) - chan->rx_credits;
+ return_credits = (chan->imtu / chan->mps) + 1;
- if (!return_credits)
+ if (chan->rx_credits >= return_credits)
return;
+ return_credits -= chan->rx_credits;
+
BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
chan->rx_credits += return_credits;
@@ -6831,7 +7390,7 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
}
-static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
+static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
{
int err;
@@ -6846,7 +7405,7 @@ static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
return err;
}
-static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
+static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
{
int err;
@@ -6894,7 +7453,7 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
}
if (skb->len == sdu_len)
- return l2cap_le_recv(chan, skb);
+ return l2cap_ecred_recv(chan, skb);
chan->sdu = skb;
chan->sdu_len = sdu_len;
@@ -6926,7 +7485,7 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
skb = NULL;
if (chan->sdu->len == chan->sdu_len) {
- err = l2cap_le_recv(chan, chan->sdu);
+ err = l2cap_ecred_recv(chan, chan->sdu);
if (!err) {
chan->sdu = NULL;
chan->sdu_last_frag = NULL;
@@ -6987,7 +7546,8 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
switch (chan->mode) {
case L2CAP_MODE_LE_FLOWCTL:
- if (l2cap_le_data_rcv(chan, skb) < 0)
+ case L2CAP_MODE_EXT_FLOWCTL:
+ if (l2cap_ecred_data_rcv(chan, skb) < 0)
goto drop;
goto done;
@@ -7206,6 +7766,33 @@ static bool is_valid_psm(u16 psm, u8 dst_type) {
return ((psm & 0x0101) == 0x0001);
}
+struct l2cap_chan_data {
+ struct l2cap_chan *chan;
+ struct pid *pid;
+ int count;
+};
+
+static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
+{
+ struct l2cap_chan_data *d = data;
+ struct pid *pid;
+
+ if (chan == d->chan)
+ return;
+
+ if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
+ return;
+
+ pid = chan->ops->get_peer_pid(chan);
+
+ /* Only count deferred channels with the same PID/PSM */
+ if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
+ chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
+ return;
+
+ d->count++;
+}
+
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
bdaddr_t *dst, u8 dst_type)
{
@@ -7214,8 +7801,8 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
struct hci_dev *hdev;
int err;
- BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
- dst_type, __le16_to_cpu(psm));
+ BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
+ dst, dst_type, __le16_to_cpu(psm), chan->mode);
hdev = hci_get_route(dst, &chan->src, chan->src_type);
if (!hdev)
@@ -7244,6 +7831,12 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
break;
case L2CAP_MODE_LE_FLOWCTL:
break;
+ case L2CAP_MODE_EXT_FLOWCTL:
+ if (!enable_ecred) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+ break;
case L2CAP_MODE_ERTM:
case L2CAP_MODE_STREAMING:
if (!disable_ertm)
@@ -7319,6 +7912,23 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
goto done;
}
+ if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
+ struct l2cap_chan_data data;
+
+ data.chan = chan;
+ data.pid = chan->ops->get_peer_pid(chan);
+ data.count = 1;
+
+ l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
+
+ /* Check if there isn't too many channels being connected */
+ if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
+ hci_conn_drop(hcon);
+ err = -EPROTO;
+ goto done;
+ }
+ }
+
mutex_lock(&conn->chan_lock);
l2cap_chan_lock(chan);
@@ -7368,6 +7978,38 @@ done:
}
EXPORT_SYMBOL_GPL(l2cap_chan_connect);
+static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
+{
+ struct l2cap_conn *conn = chan->conn;
+ struct {
+ struct l2cap_ecred_reconf_req req;
+ __le16 scid;
+ } pdu;
+
+ pdu.req.mtu = cpu_to_le16(chan->imtu);
+ pdu.req.mps = cpu_to_le16(chan->mps);
+ pdu.scid = cpu_to_le16(chan->scid);
+
+ chan->ident = l2cap_get_ident(conn);
+
+ l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
+ sizeof(pdu), &pdu);
+}
+
+int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
+{
+ if (chan->imtu > mtu)
+ return -EINVAL;
+
+ BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
+
+ chan->imtu = mtu;
+
+ l2cap_ecred_reconfigure(chan);
+
+ return 0;
+}
+
/* ---- L2CAP interface with lower layer (HCI) ---- */
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
@@ -7579,7 +8221,8 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
else
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
} else if (chan->state == BT_CONNECT2 &&
- chan->mode != L2CAP_MODE_LE_FLOWCTL) {
+ !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
+ chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
struct l2cap_conn_rsp rsp;
__u16 res, stat;
@@ -7787,3 +8430,6 @@ void l2cap_exit(void)
module_param(disable_ertm, bool, 0644);
MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
+
+module_param(enable_ecred, bool, 0644);
+MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index a7be8b59b3c2..117ba20ea194 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -232,7 +232,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
return -EINVAL;
}
- if (chan->psm && bdaddr_type_is_le(chan->src_type))
+ if (chan->psm && bdaddr_type_is_le(chan->src_type) && !chan->mode)
chan->mode = L2CAP_MODE_LE_FLOWCTL;
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
@@ -274,6 +274,12 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
case L2CAP_MODE_BASIC:
case L2CAP_MODE_LE_FLOWCTL:
break;
+ case L2CAP_MODE_EXT_FLOWCTL:
+ if (!enable_ecred) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+ break;
case L2CAP_MODE_ERTM:
case L2CAP_MODE_STREAMING:
if (!disable_ertm)
@@ -427,6 +433,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
opts.max_tx = chan->max_tx;
opts.txwin_size = chan->tx_win;
+ BT_DBG("mode 0x%2.2x", chan->mode);
+
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
err = -EFAULT;
@@ -499,6 +507,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct bt_security sec;
struct bt_power pwr;
+ u32 phys;
int len, err = 0;
BT_DBG("sk %p", sk);
@@ -603,6 +612,18 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,
err = -EFAULT;
break;
+ case BT_PHY:
+ if (sk->sk_state != BT_CONNECTED) {
+ err = -ENOTCONN;
+ break;
+ }
+
+ phys = hci_conn_get_phy(chan->conn->hcon);
+
+ if (put_user(phys, (u32 __user *) optval))
+ err = -EFAULT;
+ break;
+
default:
err = -ENOPROTOOPT;
break;
@@ -694,6 +715,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
break;
}
+ BT_DBG("mode 0x%2.2x", chan->mode);
+
chan->imtu = opts.imtu;
chan->omtu = opts.omtu;
chan->fcs = opts.fcs;
@@ -926,7 +949,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (sk->sk_state == BT_CONNECTED) {
+ if (chan->mode == L2CAP_MODE_LE_FLOWCTL &&
+ sk->sk_state == BT_CONNECTED) {
err = -EISCONN;
break;
}
@@ -936,7 +960,12 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- chan->imtu = opt;
+ if (chan->mode == L2CAP_MODE_EXT_FLOWCTL &&
+ sk->sk_state == BT_CONNECTED)
+ err = l2cap_chan_reconfigure(chan, opt);
+ else
+ chan->imtu = opt;
+
break;
default:
@@ -991,7 +1020,11 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP,
&bt_sk(sk)->flags)) {
- if (bdaddr_type_is_le(pi->chan->src_type)) {
+ if (pi->chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
+ sk->sk_state = BT_CONNECTED;
+ pi->chan->state = BT_CONNECTED;
+ __l2cap_ecred_conn_rsp_defer(pi->chan);
+ } else if (bdaddr_type_is_le(pi->chan->src_type)) {
sk->sk_state = BT_CONNECTED;
pi->chan->state = BT_CONNECTED;
__l2cap_le_connect_rsp_defer(pi->chan);
@@ -1042,7 +1075,7 @@ done:
}
/* Kill socket (only if zapped and orphan)
- * Must be called on unlocked socket.
+ * Must be called on unlocked socket, with l2cap channel lock.
*/
static void l2cap_sock_kill(struct sock *sk)
{
@@ -1193,6 +1226,7 @@ static int l2cap_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
int err;
+ struct l2cap_chan *chan;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -1202,9 +1236,17 @@ static int l2cap_sock_release(struct socket *sock)
bt_sock_unlink(&l2cap_sk_list, sk);
err = l2cap_sock_shutdown(sock, 2);
+ chan = l2cap_pi(sk)->chan;
+
+ l2cap_chan_hold(chan);
+ l2cap_chan_lock(chan);
sock_orphan(sk);
l2cap_sock_kill(sk);
+
+ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
+
return err;
}
@@ -1222,12 +1264,15 @@ static void l2cap_sock_cleanup_listen(struct sock *parent)
BT_DBG("child chan %p state %s", chan,
state_to_string(chan->state));
+ l2cap_chan_hold(chan);
l2cap_chan_lock(chan);
+
__clear_chan_timer(chan);
l2cap_chan_close(chan, ECONNRESET);
- l2cap_chan_unlock(chan);
-
l2cap_sock_kill(sk);
+
+ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
}
}
@@ -1459,6 +1504,13 @@ static long l2cap_sock_get_sndtimeo_cb(struct l2cap_chan *chan)
return sk->sk_sndtimeo;
}
+static struct pid *l2cap_sock_get_peer_pid_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+
+ return sk->sk_peer_pid;
+}
+
static void l2cap_sock_suspend_cb(struct l2cap_chan *chan)
{
struct sock *sk = chan->data;
@@ -1480,6 +1532,7 @@ static const struct l2cap_ops l2cap_chan_ops = {
.suspend = l2cap_sock_suspend_cb,
.set_shutdown = l2cap_sock_set_shutdown_cb,
.get_sndtimeo = l2cap_sock_get_sndtimeo_cb,
+ .get_peer_pid = l2cap_sock_get_peer_pid_cb,
.alloc_skb = l2cap_sock_alloc_skb_cb,
};
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 3074363c68df..6552003a170e 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -38,7 +38,7 @@
#include "mgmt_util.h"
#define MGMT_VERSION 1
-#define MGMT_REVISION 15
+#define MGMT_REVISION 16
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@@ -107,6 +107,7 @@ static const u16 mgmt_commands[] = {
MGMT_OP_READ_EXT_INFO,
MGMT_OP_SET_APPEARANCE,
MGMT_OP_SET_BLOCKED_KEYS,
+ MGMT_OP_SET_WIDEBAND_SPEECH,
};
static const u16 mgmt_events[] = {
@@ -762,6 +763,10 @@ static u32 get_supported_settings(struct hci_dev *hdev)
if (lmp_sc_capable(hdev))
settings |= MGMT_SETTING_SECURE_CONN;
+
+ if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
+ &hdev->quirks))
+ settings |= MGMT_SETTING_WIDEBAND_SPEECH;
}
if (lmp_le_capable(hdev)) {
@@ -846,6 +851,9 @@ static u32 get_current_settings(struct hci_dev *hdev)
settings |= MGMT_SETTING_STATIC_ADDRESS;
}
+ if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
+ settings |= MGMT_SETTING_WIDEBAND_SPEECH;
+
return settings;
}
@@ -1382,6 +1390,12 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
+ if (hdev->advertising_paused) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
+ MGMT_STATUS_BUSY);
+ goto failed;
+ }
+
if (!hdev_is_powered(hdev)) {
bool changed = false;
@@ -3589,6 +3603,62 @@ static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
err, NULL, 0);
}
+static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_mode *cp = data;
+ int err;
+ bool changed = false;
+
+ BT_DBG("request for %s", hdev->name);
+
+ if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_WIDEBAND_SPEECH,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ if (cp->val != 0x00 && cp->val != 0x01)
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_WIDEBAND_SPEECH,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ hci_dev_lock(hdev);
+
+ if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
+ err = mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_WIDEBAND_SPEECH,
+ MGMT_STATUS_BUSY);
+ goto unlock;
+ }
+
+ if (hdev_is_powered(hdev) &&
+ !!cp->val != hci_dev_test_flag(hdev,
+ HCI_WIDEBAND_SPEECH_ENABLED)) {
+ err = mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_WIDEBAND_SPEECH,
+ MGMT_STATUS_REJECTED);
+ goto unlock;
+ }
+
+ if (cp->val)
+ changed = !hci_dev_test_and_set_flag(hdev,
+ HCI_WIDEBAND_SPEECH_ENABLED);
+ else
+ changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_WIDEBAND_SPEECH_ENABLED);
+
+ err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
+ if (err < 0)
+ goto unlock;
+
+ if (changed)
+ err = new_settings(hdev, sk);
+
+unlock:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
u16 opcode, struct sk_buff *skb)
{
@@ -3865,6 +3935,13 @@ void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
}
hci_dev_unlock(hdev);
+
+ /* Handle suspend notifier */
+ if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
+ hdev->suspend_tasks)) {
+ bt_dev_dbg(hdev, "Unpaused discovery");
+ wake_up(&hdev->suspend_wait_q);
+ }
}
static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
@@ -3926,6 +4003,13 @@ static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
+ /* Can't start discovery when it is paused */
+ if (hdev->discovery_paused) {
+ err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
+ &cp->type, sizeof(cp->type));
+ goto failed;
+ }
+
/* Clear the discovery filter first to free any previously
* allocated memory for the UUID list.
*/
@@ -4093,6 +4177,12 @@ void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
}
hci_dev_unlock(hdev);
+
+ /* Handle suspend notifier */
+ if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
+ bt_dev_dbg(hdev, "Paused discovery");
+ wake_up(&hdev->suspend_wait_q);
+ }
}
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -4324,6 +4414,17 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status,
if (match.sk)
sock_put(match.sk);
+ /* Handle suspend notifier */
+ if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
+ hdev->suspend_tasks)) {
+ bt_dev_dbg(hdev, "Paused advertising");
+ wake_up(&hdev->suspend_wait_q);
+ } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
+ hdev->suspend_tasks)) {
+ bt_dev_dbg(hdev, "Unpaused advertising");
+ wake_up(&hdev->suspend_wait_q);
+ }
+
/* If "Set Advertising" was just disabled and instance advertising was
* set up earlier, then re-enable multi-instance advertising.
*/
@@ -4375,6 +4476,10 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS);
+ if (hdev->advertising_paused)
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+ MGMT_STATUS_BUSY);
+
hci_dev_lock(hdev);
val = !!cp->val;
@@ -6743,8 +6848,11 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
if (!err)
err = hci_req_run(&req, add_advertising_complete);
- if (err < 0)
+ if (err < 0) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+ MGMT_STATUS_FAILED);
mgmt_pending_remove(cmd);
+ }
unlock:
hci_dev_unlock(hdev);
@@ -6990,6 +7098,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
{ set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
{ set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
HCI_MGMT_VAR_LEN },
+ { set_wideband_speech, MGMT_SETTING_SIZE },
};
void mgmt_index_added(struct hci_dev *hdev)
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 3a9e9d9670be..2e20af317cea 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -40,7 +40,6 @@
static bool disable_cfc;
static bool l2cap_ertm;
static int channel_mtu = -1;
-static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
static struct task_struct *rfcomm_thread;
@@ -73,8 +72,6 @@ static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s);
/* ---- RFCOMM frame parsing macros ---- */
#define __get_dlci(b) ((b & 0xfc) >> 2)
-#define __get_channel(b) ((b & 0xf8) >> 3)
-#define __get_dir(b) ((b & 0x04) >> 2)
#define __get_type(b) ((b & 0xef))
#define __test_ea(b) ((b & 0x01))
@@ -87,7 +84,6 @@ static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s);
#define __ctrl(type, pf) (((type & 0xef) | (pf << 4)))
#define __dlci(dir, chn) (((chn & 0x1f) << 1) | dir)
#define __srv_channel(dlci) (dlci >> 1)
-#define __dir(dlci) (dlci & 0x01)
#define __len8(len) (((len) << 1) | 1)
#define __len16(len) ((len) << 1)
@@ -752,7 +748,8 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
/* Set L2CAP options */
sk = sock->sk;
lock_sock(sk);
- l2cap_pi(sk)->chan->imtu = l2cap_mtu;
+ /* Set MTU to 0 so L2CAP can auto select the MTU */
+ l2cap_pi(sk)->chan->imtu = 0;
l2cap_pi(sk)->chan->sec_level = sec_level;
if (l2cap_ertm)
l2cap_pi(sk)->chan->mode = L2CAP_MODE_ERTM;
@@ -2039,7 +2036,8 @@ static int rfcomm_add_listener(bdaddr_t *ba)
/* Set L2CAP options */
sk = sock->sk;
lock_sock(sk);
- l2cap_pi(sk)->chan->imtu = l2cap_mtu;
+ /* Set MTU to 0 so L2CAP can auto select the MTU */
+ l2cap_pi(sk)->chan->imtu = 0;
release_sock(sk);
/* Start listening on the socket */
@@ -2237,9 +2235,6 @@ MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control");
module_param(channel_mtu, int, 0644);
MODULE_PARM_DESC(channel_mtu, "Default MTU for the RFCOMM channel");
-module_param(l2cap_mtu, uint, 0644);
-MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection");
-
module_param(l2cap_ertm, bool, 0644);
MODULE_PARM_DESC(l2cap_ertm, "Use L2CAP ERTM mode for connection");
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 0c7d31c6c18c..a58584949a95 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -413,10 +413,8 @@ static int __rfcomm_create_dev(struct sock *sk, void __user *arg)
dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel);
if (IS_ERR(dlc))
return PTR_ERR(dlc);
- else if (dlc) {
- rfcomm_dlc_put(dlc);
+ if (dlc)
return -EBUSY;
- }
dlc = rfcomm_dlc_alloc(GFP_KERNEL);
if (!dlc)
return -ENOMEM;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index b91d6b440fdf..c8c3d38cdc7b 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -922,6 +922,7 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
struct sock *sk = sock->sk;
int len, err = 0;
struct bt_voice voice;
+ u32 phys;
BT_DBG("sk %p", sk);
@@ -956,6 +957,18 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
break;
+ case BT_PHY:
+ if (sk->sk_state != BT_CONNECTED) {
+ err = -ENOTCONN;
+ break;
+ }
+
+ phys = hci_conn_get_phy(sco_pi(sk)->conn->hcon);
+
+ if (put_user(phys, (u32 __user *) optval))
+ err = -EFAULT;
+ break;
+
default:
err = -ENOPROTOOPT;
break;
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 204f14f8b507..1476a91ce935 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -1145,7 +1145,7 @@ static void sc_generate_link_key(struct smp_chan *smp)
return;
if (test_bit(SMP_FLAG_CT2, &smp->flags)) {
- /* SALT = 0x00000000000000000000000000000000746D7031 */
+ /* SALT = 0x000000000000000000000000746D7031 */
const u8 salt[16] = { 0x31, 0x70, 0x6d, 0x74 };
if (smp_h7(smp->tfm_cmac, smp->tk, salt, smp->link_key)) {
@@ -1203,7 +1203,7 @@ static void sc_generate_ltk(struct smp_chan *smp)
set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags);
if (test_bit(SMP_FLAG_CT2, &smp->flags)) {
- /* SALT = 0x00000000000000000000000000000000746D7032 */
+ /* SALT = 0x000000000000000000000000746D7032 */
const u8 salt[16] = { 0x32, 0x70, 0x6d, 0x74 };
if (smp_h7(smp->tfm_cmac, key->val, salt, smp->tk))
@@ -2115,7 +2115,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
struct l2cap_chan *chan = conn->smp;
struct smp_chan *smp = chan->data;
struct hci_conn *hcon = conn->hcon;
- u8 *pkax, *pkbx, *na, *nb;
+ u8 *pkax, *pkbx, *na, *nb, confirm_hint;
u32 passkey;
int err;
@@ -2168,6 +2168,24 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
smp->prnd);
SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
+
+ /* Only Just-Works pairing requires extra checks */
+ if (smp->method != JUST_WORKS)
+ goto mackey_and_ltk;
+
+ /* If there already exists long term key in local host, leave
+ * the decision to user space since the remote device could
+ * be legitimate or malicious.
+ */
+ if (hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
+ hcon->role)) {
+ /* Set passkey to 0. The value can be any number since
+ * it'll be ignored anyway.
+ */
+ passkey = 0;
+ confirm_hint = 1;
+ goto confirm;
+ }
}
mackey_and_ltk:
@@ -2188,8 +2206,11 @@ mackey_and_ltk:
if (err)
return SMP_UNSPECIFIED;
+ confirm_hint = 0;
+
+confirm:
err = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, hcon->type,
- hcon->dst_type, passkey, 0);
+ hcon->dst_type, passkey, confirm_hint);
if (err)
return SMP_UNSPECIFIED;
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index d555c0d8657d..29dbdd4c29f6 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -10,6 +10,7 @@
#include <net/bpf_sk_storage.h>
#include <net/sock.h>
#include <net/tcp.h>
+#include <linux/error-injection.h>
#define CREATE_TRACE_POINTS
#include <trace/events/bpf_test_run.h>
@@ -37,7 +38,7 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
repeat = 1;
rcu_read_lock();
- preempt_disable();
+ migrate_disable();
time_start = ktime_get_ns();
for (i = 0; i < repeat; i++) {
bpf_cgroup_storage_set(storage);
@@ -54,18 +55,18 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
if (need_resched()) {
time_spent += ktime_get_ns() - time_start;
- preempt_enable();
+ migrate_enable();
rcu_read_unlock();
cond_resched();
rcu_read_lock();
- preempt_disable();
+ migrate_disable();
time_start = ktime_get_ns();
}
}
time_spent += ktime_get_ns() - time_start;
- preempt_enable();
+ migrate_enable();
rcu_read_unlock();
do_div(time_spent, repeat);
@@ -113,6 +114,9 @@ out:
* architecture dependent calling conventions. 7+ can be supported in the
* future.
*/
+__diag_push();
+__diag_ignore(GCC, 8, "-Wmissing-prototypes",
+ "Global functions as their definitions will be in vmlinux BTF");
int noinline bpf_fentry_test1(int a)
{
return a + 1;
@@ -143,6 +147,15 @@ int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
return a + (long)b + c + d + (long)e + f;
}
+int noinline bpf_modify_return_test(int a, int *b)
+{
+ *b += 1;
+ return a + *b;
+}
+__diag_pop();
+
+ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
+
static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
u32 headroom, u32 tailroom)
{
@@ -160,18 +173,48 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
kfree(data);
return ERR_PTR(-EFAULT);
}
- if (bpf_fentry_test1(1) != 2 ||
- bpf_fentry_test2(2, 3) != 5 ||
- bpf_fentry_test3(4, 5, 6) != 15 ||
- bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
- bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
- bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111) {
- kfree(data);
- return ERR_PTR(-EFAULT);
- }
+
return data;
}
+int bpf_prog_test_run_tracing(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ u16 side_effect = 0, ret = 0;
+ int b = 2, err = -EFAULT;
+ u32 retval = 0;
+
+ switch (prog->expected_attach_type) {
+ case BPF_TRACE_FENTRY:
+ case BPF_TRACE_FEXIT:
+ if (bpf_fentry_test1(1) != 2 ||
+ bpf_fentry_test2(2, 3) != 5 ||
+ bpf_fentry_test3(4, 5, 6) != 15 ||
+ bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
+ bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
+ bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111)
+ goto out;
+ break;
+ case BPF_MODIFY_RETURN:
+ ret = bpf_modify_return_test(1, &b);
+ if (b != 2)
+ side_effect = 1;
+ break;
+ default:
+ goto out;
+ }
+
+ retval = ((u32)side_effect << 16) | ret;
+ if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
+ goto out;
+
+ err = 0;
+out:
+ trace_bpf_test_finish(&err);
+ return err;
+}
+
static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
{
void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
@@ -277,6 +320,12 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
/* gso_segs is allowed */
if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
+ offsetof(struct __sk_buff, gso_size)))
+ return -EINVAL;
+
+ /* gso_size is allowed */
+
+ if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
sizeof(struct __sk_buff)))
return -EINVAL;
@@ -297,6 +346,7 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
if (__skb->gso_segs > GSO_MAX_SEGS)
return -EINVAL;
skb_shinfo(skb)->gso_segs = __skb->gso_segs;
+ skb_shinfo(skb)->gso_size = __skb->gso_size;
return 0;
}
diff --git a/net/bpfilter/main.c b/net/bpfilter/main.c
index efea4874743e..05e1cfc1e5cd 100644
--- a/net/bpfilter/main.c
+++ b/net/bpfilter/main.c
@@ -35,7 +35,6 @@ static void loop(void)
struct mbox_reply reply;
int n;
- fprintf(debug_f, "testing the buffer\n");
n = read(0, &req, sizeof(req));
if (n != sizeof(req)) {
fprintf(debug_f, "invalid request %d\n", n);
diff --git a/net/bridge/br_netlink_tunnel.c b/net/bridge/br_netlink_tunnel.c
index afee292fb004..162998e2f039 100644
--- a/net/bridge/br_netlink_tunnel.c
+++ b/net/bridge/br_netlink_tunnel.c
@@ -26,8 +26,8 @@ static size_t __get_vlan_tinfo_size(void)
nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_VLAN_TUNNEL_FLAGS */
}
-static bool vlan_tunid_inrange(struct net_bridge_vlan *v_curr,
- struct net_bridge_vlan *v_last)
+bool vlan_tunid_inrange(const struct net_bridge_vlan *v_curr,
+ const struct net_bridge_vlan *v_last)
{
__be32 tunid_curr = tunnel_id_to_key32(v_curr->tinfo.tunnel_id);
__be32 tunid_last = tunnel_id_to_key32(v_last->tinfo.tunnel_id);
@@ -193,8 +193,8 @@ static const struct nla_policy vlan_tunnel_policy[IFLA_BRIDGE_VLAN_TUNNEL_MAX +
[IFLA_BRIDGE_VLAN_TUNNEL_FLAGS] = { .type = NLA_U16 },
};
-static int br_vlan_tunnel_info(struct net_bridge_port *p, int cmd,
- u16 vid, u32 tun_id, bool *changed)
+int br_vlan_tunnel_info(const struct net_bridge_port *p, int cmd,
+ u16 vid, u32 tun_id, bool *changed)
{
int err = 0;
@@ -250,8 +250,8 @@ int br_parse_vlan_tunnel_info(struct nlattr *attr,
return 0;
}
-int br_process_vlan_tunnel_info(struct net_bridge *br,
- struct net_bridge_port *p, int cmd,
+int br_process_vlan_tunnel_info(const struct net_bridge *br,
+ const struct net_bridge_port *p, int cmd,
struct vtunnel_info *tinfo_curr,
struct vtunnel_info *tinfo_last,
bool *changed)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 5153ffe79a01..1f97703a52ff 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -1199,8 +1199,8 @@ static inline void br_vlan_notify(const struct net_bridge *br,
/* br_vlan_options.c */
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
-bool br_vlan_opts_eq(const struct net_bridge_vlan *v1,
- const struct net_bridge_vlan *v2);
+bool br_vlan_opts_eq_range(const struct net_bridge_vlan *v_curr,
+ const struct net_bridge_vlan *range_end);
bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v);
size_t br_vlan_opts_nl_size(void);
int br_vlan_process_options(const struct net_bridge *br,
diff --git a/net/bridge/br_private_tunnel.h b/net/bridge/br_private_tunnel.h
index 2bdef2ea3420..c54cc26211d7 100644
--- a/net/bridge/br_private_tunnel.h
+++ b/net/bridge/br_private_tunnel.h
@@ -18,8 +18,8 @@ struct vtunnel_info {
/* br_netlink_tunnel.c */
int br_parse_vlan_tunnel_info(struct nlattr *attr,
struct vtunnel_info *tinfo);
-int br_process_vlan_tunnel_info(struct net_bridge *br,
- struct net_bridge_port *p,
+int br_process_vlan_tunnel_info(const struct net_bridge *br,
+ const struct net_bridge_port *p,
int cmd,
struct vtunnel_info *tinfo_curr,
struct vtunnel_info *tinfo_last,
@@ -32,8 +32,9 @@ int br_fill_vlan_tunnel_info(struct sk_buff *skb,
/* br_vlan_tunnel.c */
int vlan_tunnel_init(struct net_bridge_vlan_group *vg);
void vlan_tunnel_deinit(struct net_bridge_vlan_group *vg);
-int nbp_vlan_tunnel_info_delete(struct net_bridge_port *port, u16 vid);
-int nbp_vlan_tunnel_info_add(struct net_bridge_port *port, u16 vid, u32 tun_id);
+int nbp_vlan_tunnel_info_delete(const struct net_bridge_port *port, u16 vid);
+int nbp_vlan_tunnel_info_add(const struct net_bridge_port *port, u16 vid,
+ u32 tun_id);
void nbp_vlan_tunnel_info_flush(struct net_bridge_port *port);
void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan);
@@ -42,19 +43,23 @@ int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
struct net_bridge_vlan_group *vg);
int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
struct net_bridge_vlan *vlan);
+bool vlan_tunid_inrange(const struct net_bridge_vlan *v_curr,
+ const struct net_bridge_vlan *v_last);
+int br_vlan_tunnel_info(const struct net_bridge_port *p, int cmd,
+ u16 vid, u32 tun_id, bool *changed);
#else
static inline int vlan_tunnel_init(struct net_bridge_vlan_group *vg)
{
return 0;
}
-static inline int nbp_vlan_tunnel_info_delete(struct net_bridge_port *port,
+static inline int nbp_vlan_tunnel_info_delete(const struct net_bridge_port *port,
u16 vid)
{
return 0;
}
-static inline int nbp_vlan_tunnel_info_add(struct net_bridge_port *port,
+static inline int nbp_vlan_tunnel_info_add(const struct net_bridge_port *port,
u16 vid, u32 tun_id)
{
return 0;
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 6b5deca08b89..f9092c71225f 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -1569,10 +1569,41 @@ void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
}
}
+static bool br_vlan_stats_fill(struct sk_buff *skb,
+ const struct net_bridge_vlan *v)
+{
+ struct br_vlan_stats stats;
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
+ if (!nest)
+ return false;
+
+ br_vlan_get_stats(v, &stats);
+ if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES, stats.rx_bytes,
+ BRIDGE_VLANDB_STATS_PAD) ||
+ nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
+ stats.rx_packets, BRIDGE_VLANDB_STATS_PAD) ||
+ nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES, stats.tx_bytes,
+ BRIDGE_VLANDB_STATS_PAD) ||
+ nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
+ stats.tx_packets, BRIDGE_VLANDB_STATS_PAD))
+ goto out_err;
+
+ nla_nest_end(skb, nest);
+
+ return true;
+
+out_err:
+ nla_nest_cancel(skb, nest);
+ return false;
+}
+
/* v_opts is used to dump the options which must be equal in the whole range */
static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
const struct net_bridge_vlan *v_opts,
- u16 flags)
+ u16 flags,
+ bool dump_stats)
{
struct bridge_vlan_info info;
struct nlattr *nest;
@@ -1596,8 +1627,13 @@ static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
goto out_err;
- if (v_opts && !br_vlan_opts_fill(skb, v_opts))
- goto out_err;
+ if (v_opts) {
+ if (!br_vlan_opts_fill(skb, v_opts))
+ goto out_err;
+
+ if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
+ goto out_err;
+ }
nla_nest_end(skb, nest);
@@ -1675,7 +1711,7 @@ void br_vlan_notify(const struct net_bridge *br,
goto out_kfree;
}
- if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags))
+ if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags, false))
goto out_err;
nlmsg_end(skb, nlh);
@@ -1694,14 +1730,16 @@ bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
{
return v_curr->vid - range_end->vid == 1 &&
range_end->flags == v_curr->flags &&
- br_vlan_opts_eq(v_curr, range_end);
+ br_vlan_opts_eq_range(v_curr, range_end);
}
static int br_vlan_dump_dev(const struct net_device *dev,
struct sk_buff *skb,
- struct netlink_callback *cb)
+ struct netlink_callback *cb,
+ u32 dump_flags)
{
struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
+ bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
struct net_bridge_vlan_group *vg;
int idx = 0, s_idx = cb->args[1];
struct nlmsghdr *nlh = NULL;
@@ -1754,12 +1792,13 @@ static int br_vlan_dump_dev(const struct net_device *dev,
continue;
}
- if (v->vid == pvid || !br_vlan_can_enter_range(v, range_end)) {
- u16 flags = br_vlan_flags(range_start, pvid);
+ if (dump_stats || v->vid == pvid ||
+ !br_vlan_can_enter_range(v, range_end)) {
+ u16 vlan_flags = br_vlan_flags(range_start, pvid);
if (!br_vlan_fill_vids(skb, range_start->vid,
range_end->vid, range_start,
- flags)) {
+ vlan_flags, dump_stats)) {
err = -EMSGSIZE;
break;
}
@@ -1778,7 +1817,8 @@ static int br_vlan_dump_dev(const struct net_device *dev,
*/
if (!err && range_start &&
!br_vlan_fill_vids(skb, range_start->vid, range_end->vid,
- range_start, br_vlan_flags(range_start, pvid)))
+ range_start, br_vlan_flags(range_start, pvid),
+ dump_stats))
err = -EMSGSIZE;
cb->args[1] = err ? idx : 0;
@@ -1788,18 +1828,27 @@ static int br_vlan_dump_dev(const struct net_device *dev,
return err;
}
+static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
+ [BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
+};
+
static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
int idx = 0, err = 0, s_idx = cb->args[0];
struct net *net = sock_net(skb->sk);
struct br_vlan_msg *bvm;
struct net_device *dev;
+ u32 dump_flags = 0;
- err = nlmsg_parse(cb->nlh, sizeof(*bvm), NULL, 0, NULL, cb->extack);
+ err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
+ br_vlan_db_dump_pol, cb->extack);
if (err < 0)
return err;
bvm = nlmsg_data(cb->nlh);
+ if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
+ dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
rcu_read_lock();
if (bvm->ifindex) {
@@ -1808,7 +1857,7 @@ static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
err = -ENODEV;
goto out_err;
}
- err = br_vlan_dump_dev(dev, skb, cb);
+ err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
if (err && err != -EMSGSIZE)
goto out_err;
} else {
@@ -1816,7 +1865,7 @@ static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (idx < s_idx)
goto skip;
- err = br_vlan_dump_dev(dev, skb, cb);
+ err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
if (err == -EMSGSIZE)
break;
skip:
@@ -1839,6 +1888,7 @@ static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] =
.len = sizeof(struct bridge_vlan_info) },
[BRIDGE_VLANDB_ENTRY_RANGE] = { .type = NLA_U16 },
[BRIDGE_VLANDB_ENTRY_STATE] = { .type = NLA_U8 },
+ [BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
};
static int br_vlan_rtm_process_one(struct net_device *dev,
diff --git a/net/bridge/br_vlan_options.c b/net/bridge/br_vlan_options.c
index cd2eb194eb98..b4add9ea8964 100644
--- a/net/bridge/br_vlan_options.c
+++ b/net/bridge/br_vlan_options.c
@@ -4,25 +4,58 @@
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
+#include <net/ip_tunnels.h>
#include "br_private.h"
+#include "br_private_tunnel.h"
-/* check if the options between two vlans are equal */
-bool br_vlan_opts_eq(const struct net_bridge_vlan *v1,
- const struct net_bridge_vlan *v2)
+static bool __vlan_tun_put(struct sk_buff *skb, const struct net_bridge_vlan *v)
{
- return v1->state == v2->state;
+ __be32 tid = tunnel_id_to_key32(v->tinfo.tunnel_id);
+ struct nlattr *nest;
+
+ if (!v->tinfo.tunnel_dst)
+ return true;
+
+ nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_TUNNEL_INFO);
+ if (!nest)
+ return false;
+ if (nla_put_u32(skb, BRIDGE_VLANDB_TINFO_ID, be32_to_cpu(tid))) {
+ nla_nest_cancel(skb, nest);
+ return false;
+ }
+ nla_nest_end(skb, nest);
+
+ return true;
+}
+
+static bool __vlan_tun_can_enter_range(const struct net_bridge_vlan *v_curr,
+ const struct net_bridge_vlan *range_end)
+{
+ return (!v_curr->tinfo.tunnel_dst && !range_end->tinfo.tunnel_dst) ||
+ vlan_tunid_inrange(v_curr, range_end);
+}
+
+/* check if the options' state of v_curr allow it to enter the range */
+bool br_vlan_opts_eq_range(const struct net_bridge_vlan *v_curr,
+ const struct net_bridge_vlan *range_end)
+{
+ return v_curr->state == range_end->state &&
+ __vlan_tun_can_enter_range(v_curr, range_end);
}
bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v)
{
return !nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_STATE,
- br_vlan_get_state(v));
+ br_vlan_get_state(v)) &&
+ __vlan_tun_put(skb, v);
}
size_t br_vlan_opts_nl_size(void)
{
- return nla_total_size(sizeof(u8)); /* BRIDGE_VLANDB_ENTRY_STATE */
+ return nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_STATE */
+ + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY_TUNNEL_INFO */
+ + nla_total_size(sizeof(u32)); /* BRIDGE_VLANDB_TINFO_ID */
}
static int br_vlan_modify_state(struct net_bridge_vlan_group *vg,
@@ -62,6 +95,68 @@ static int br_vlan_modify_state(struct net_bridge_vlan_group *vg,
return 0;
}
+static const struct nla_policy br_vlandb_tinfo_pol[BRIDGE_VLANDB_TINFO_MAX + 1] = {
+ [BRIDGE_VLANDB_TINFO_ID] = { .type = NLA_U32 },
+ [BRIDGE_VLANDB_TINFO_CMD] = { .type = NLA_U32 },
+};
+
+static int br_vlan_modify_tunnel(const struct net_bridge_port *p,
+ struct net_bridge_vlan *v,
+ struct nlattr **tb,
+ bool *changed,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tun_tb[BRIDGE_VLANDB_TINFO_MAX + 1], *attr;
+ struct bridge_vlan_info *vinfo;
+ u32 tun_id = 0;
+ int cmd, err;
+
+ if (!p) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't modify tunnel mapping of non-port vlans");
+ return -EINVAL;
+ }
+ if (!(p->flags & BR_VLAN_TUNNEL)) {
+ NL_SET_ERR_MSG_MOD(extack, "Port doesn't have tunnel flag set");
+ return -EINVAL;
+ }
+
+ attr = tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO];
+ err = nla_parse_nested(tun_tb, BRIDGE_VLANDB_TINFO_MAX, attr,
+ br_vlandb_tinfo_pol, extack);
+ if (err)
+ return err;
+
+ if (!tun_tb[BRIDGE_VLANDB_TINFO_CMD]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing tunnel command attribute");
+ return -ENOENT;
+ }
+ cmd = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_CMD]);
+ switch (cmd) {
+ case RTM_SETLINK:
+ if (!tun_tb[BRIDGE_VLANDB_TINFO_ID]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing tunnel id attribute");
+ return -ENOENT;
+ }
+ /* when working on vlan ranges this is the starting tunnel id */
+ tun_id = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_ID]);
+ /* vlan info attr is guaranteed by br_vlan_rtm_process_one */
+ vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
+ /* tunnel ids are mapped to each vlan in increasing order,
+ * the starting vlan is in BRIDGE_VLANDB_ENTRY_INFO and v is the
+ * current vlan, so we compute: tun_id + v - vinfo->vid
+ */
+ tun_id += v->vid - vinfo->vid;
+ break;
+ case RTM_DELLINK:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel command");
+ return -EINVAL;
+ }
+
+ return br_vlan_tunnel_info(p, cmd, v->vid, tun_id, changed);
+}
+
static int br_vlan_process_one_opts(const struct net_bridge *br,
const struct net_bridge_port *p,
struct net_bridge_vlan_group *vg,
@@ -80,6 +175,11 @@ static int br_vlan_process_one_opts(const struct net_bridge *br,
if (err)
return err;
}
+ if (tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO]) {
+ err = br_vlan_modify_tunnel(p, v, tb, changed, extack);
+ if (err)
+ return err;
+ }
return 0;
}
diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c
index d13d2080f527..169e005fbda2 100644
--- a/net/bridge/br_vlan_tunnel.c
+++ b/net/bridge/br_vlan_tunnel.c
@@ -89,7 +89,8 @@ out:
/* Must be protected by RTNL.
* Must be called with vid in range from 1 to 4094 inclusive.
*/
-int nbp_vlan_tunnel_info_add(struct net_bridge_port *port, u16 vid, u32 tun_id)
+int nbp_vlan_tunnel_info_add(const struct net_bridge_port *port, u16 vid,
+ u32 tun_id)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *vlan;
@@ -107,7 +108,7 @@ int nbp_vlan_tunnel_info_add(struct net_bridge_port *port, u16 vid, u32 tun_id)
/* Must be protected by RTNL.
* Must be called with vid in range from 1 to 4094 inclusive.
*/
-int nbp_vlan_tunnel_info_delete(struct net_bridge_port *port, u16 vid)
+int nbp_vlan_tunnel_info_delete(const struct net_bridge_port *port, u16 vid)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index e1256e03a9a8..78db58c7aec2 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1561,7 +1561,7 @@ struct compat_ebt_entry_mwt {
compat_uptr_t ptr;
} u;
compat_uint_t match_size;
- compat_uint_t data[0] __attribute__ ((aligned (__alignof__(struct compat_ebt_replace))));
+ compat_uint_t data[] __aligned(__alignof__(struct compat_ebt_replace));
};
/* account for possible padding between match_size and ->data */
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index 3ab23f698221..756b63b6f7b3 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -8,6 +8,7 @@
#include <linux/bpf.h>
#include <net/bpf_sk_storage.h>
#include <net/sock.h>
+#include <uapi/linux/sock_diag.h>
#include <uapi/linux/btf.h>
static atomic_t cache_idx;
@@ -60,7 +61,7 @@ struct bpf_sk_storage_data {
* the number of cachelines access during the cache hit case.
*/
struct bpf_sk_storage_map __rcu *smap;
- u8 data[0] __aligned(8);
+ u8 data[] __aligned(8);
};
/* Linked to bpf_sk_storage and bpf_sk_storage_map */
@@ -606,6 +607,14 @@ static void bpf_sk_storage_map_free(struct bpf_map *map)
kfree(map);
}
+/* U16_MAX is much more than enough for sk local storage
+ * considering a tcp_sock is ~2k.
+ */
+#define MAX_VALUE_SIZE \
+ min_t(u32, \
+ (KMALLOC_MAX_SIZE - MAX_BPF_STACK - sizeof(struct bpf_sk_storage_elem)), \
+ (U16_MAX - sizeof(struct bpf_sk_storage_elem)))
+
static int bpf_sk_storage_map_alloc_check(union bpf_attr *attr)
{
if (attr->map_flags & ~SK_STORAGE_CREATE_FLAG_MASK ||
@@ -619,12 +628,7 @@ static int bpf_sk_storage_map_alloc_check(union bpf_attr *attr)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (attr->value_size >= KMALLOC_MAX_SIZE -
- MAX_BPF_STACK - sizeof(struct bpf_sk_storage_elem) ||
- /* U16_MAX is much more than enough for sk local storage
- * considering a tcp_sock is ~2k.
- */
- attr->value_size > U16_MAX - sizeof(struct bpf_sk_storage_elem))
+ if (attr->value_size > MAX_VALUE_SIZE)
return -E2BIG;
return 0;
@@ -910,3 +914,270 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_SOCKET,
};
+
+struct bpf_sk_storage_diag {
+ u32 nr_maps;
+ struct bpf_map *maps[];
+};
+
+/* The reply will be like:
+ * INET_DIAG_BPF_SK_STORAGES (nla_nest)
+ * SK_DIAG_BPF_STORAGE (nla_nest)
+ * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
+ * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
+ * SK_DIAG_BPF_STORAGE (nla_nest)
+ * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
+ * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
+ * ....
+ */
+static int nla_value_size(u32 value_size)
+{
+ /* SK_DIAG_BPF_STORAGE (nla_nest)
+ * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
+ * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
+ */
+ return nla_total_size(0) + nla_total_size(sizeof(u32)) +
+ nla_total_size_64bit(value_size);
+}
+
+void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
+{
+ u32 i;
+
+ if (!diag)
+ return;
+
+ for (i = 0; i < diag->nr_maps; i++)
+ bpf_map_put(diag->maps[i]);
+
+ kfree(diag);
+}
+EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free);
+
+static bool diag_check_dup(const struct bpf_sk_storage_diag *diag,
+ const struct bpf_map *map)
+{
+ u32 i;
+
+ for (i = 0; i < diag->nr_maps; i++) {
+ if (diag->maps[i] == map)
+ return true;
+ }
+
+ return false;
+}
+
+struct bpf_sk_storage_diag *
+bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
+{
+ struct bpf_sk_storage_diag *diag;
+ struct nlattr *nla;
+ u32 nr_maps = 0;
+ int rem, err;
+
+ /* bpf_sk_storage_map is currently limited to CAP_SYS_ADMIN as
+ * the map_alloc_check() side also does.
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+
+ nla_for_each_nested(nla, nla_stgs, rem) {
+ if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
+ nr_maps++;
+ }
+
+ diag = kzalloc(sizeof(*diag) + sizeof(diag->maps[0]) * nr_maps,
+ GFP_KERNEL);
+ if (!diag)
+ return ERR_PTR(-ENOMEM);
+
+ nla_for_each_nested(nla, nla_stgs, rem) {
+ struct bpf_map *map;
+ int map_fd;
+
+ if (nla_type(nla) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
+ continue;
+
+ map_fd = nla_get_u32(nla);
+ map = bpf_map_get(map_fd);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto err_free;
+ }
+ if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
+ bpf_map_put(map);
+ err = -EINVAL;
+ goto err_free;
+ }
+ if (diag_check_dup(diag, map)) {
+ bpf_map_put(map);
+ err = -EEXIST;
+ goto err_free;
+ }
+ diag->maps[diag->nr_maps++] = map;
+ }
+
+ return diag;
+
+err_free:
+ bpf_sk_storage_diag_free(diag);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc);
+
+static int diag_get(struct bpf_sk_storage_data *sdata, struct sk_buff *skb)
+{
+ struct nlattr *nla_stg, *nla_value;
+ struct bpf_sk_storage_map *smap;
+
+ /* It cannot exceed max nlattr's payload */
+ BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < MAX_VALUE_SIZE);
+
+ nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE);
+ if (!nla_stg)
+ return -EMSGSIZE;
+
+ smap = rcu_dereference(sdata->smap);
+ if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id))
+ goto errout;
+
+ nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE,
+ smap->map.value_size,
+ SK_DIAG_BPF_STORAGE_PAD);
+ if (!nla_value)
+ goto errout;
+
+ if (map_value_has_spin_lock(&smap->map))
+ copy_map_value_locked(&smap->map, nla_data(nla_value),
+ sdata->data, true);
+ else
+ copy_map_value(&smap->map, nla_data(nla_value), sdata->data);
+
+ nla_nest_end(skb, nla_stg);
+ return 0;
+
+errout:
+ nla_nest_cancel(skb, nla_stg);
+ return -EMSGSIZE;
+}
+
+static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb,
+ int stg_array_type,
+ unsigned int *res_diag_size)
+{
+ /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
+ unsigned int diag_size = nla_total_size(0);
+ struct bpf_sk_storage *sk_storage;
+ struct bpf_sk_storage_elem *selem;
+ struct bpf_sk_storage_map *smap;
+ struct nlattr *nla_stgs;
+ unsigned int saved_len;
+ int err = 0;
+
+ rcu_read_lock();
+
+ sk_storage = rcu_dereference(sk->sk_bpf_storage);
+ if (!sk_storage || hlist_empty(&sk_storage->list)) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ nla_stgs = nla_nest_start(skb, stg_array_type);
+ if (!nla_stgs)
+ /* Continue to learn diag_size */
+ err = -EMSGSIZE;
+
+ saved_len = skb->len;
+ hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
+ smap = rcu_dereference(SDATA(selem)->smap);
+ diag_size += nla_value_size(smap->map.value_size);
+
+ if (nla_stgs && diag_get(SDATA(selem), skb))
+ /* Continue to learn diag_size */
+ err = -EMSGSIZE;
+ }
+
+ rcu_read_unlock();
+
+ if (nla_stgs) {
+ if (saved_len == skb->len)
+ nla_nest_cancel(skb, nla_stgs);
+ else
+ nla_nest_end(skb, nla_stgs);
+ }
+
+ if (diag_size == nla_total_size(0)) {
+ *res_diag_size = 0;
+ return 0;
+ }
+
+ *res_diag_size = diag_size;
+ return err;
+}
+
+int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
+ struct sock *sk, struct sk_buff *skb,
+ int stg_array_type,
+ unsigned int *res_diag_size)
+{
+ /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
+ unsigned int diag_size = nla_total_size(0);
+ struct bpf_sk_storage *sk_storage;
+ struct bpf_sk_storage_data *sdata;
+ struct nlattr *nla_stgs;
+ unsigned int saved_len;
+ int err = 0;
+ u32 i;
+
+ *res_diag_size = 0;
+
+ /* No map has been specified. Dump all. */
+ if (!diag->nr_maps)
+ return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type,
+ res_diag_size);
+
+ rcu_read_lock();
+ sk_storage = rcu_dereference(sk->sk_bpf_storage);
+ if (!sk_storage || hlist_empty(&sk_storage->list)) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ nla_stgs = nla_nest_start(skb, stg_array_type);
+ if (!nla_stgs)
+ /* Continue to learn diag_size */
+ err = -EMSGSIZE;
+
+ saved_len = skb->len;
+ for (i = 0; i < diag->nr_maps; i++) {
+ sdata = __sk_storage_lookup(sk_storage,
+ (struct bpf_sk_storage_map *)diag->maps[i],
+ false);
+
+ if (!sdata)
+ continue;
+
+ diag_size += nla_value_size(diag->maps[i]->value_size);
+
+ if (nla_stgs && diag_get(sdata, skb))
+ /* Continue to learn diag_size */
+ err = -EMSGSIZE;
+ }
+ rcu_read_unlock();
+
+ if (nla_stgs) {
+ if (saved_len == skb->len)
+ nla_nest_cancel(skb, nla_stgs);
+ else
+ nla_nest_end(skb, nla_stgs);
+ }
+
+ if (diag_size == nla_total_size(0)) {
+ *res_diag_size = 0;
+ return 0;
+ }
+
+ *res_diag_size = diag_size;
+ return err;
+}
+EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index a78e7f864c1e..639745d4f3b9 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -51,6 +51,7 @@
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/uio.h>
+#include <linux/indirect_call_wrapper.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
@@ -166,8 +167,6 @@ done:
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
struct sk_buff_head *queue,
unsigned int flags,
- void (*destructor)(struct sock *sk,
- struct sk_buff *skb),
int *off, int *err,
struct sk_buff **last)
{
@@ -198,8 +197,6 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
refcount_inc(&skb->users);
} else {
__skb_unlink(skb, queue);
- if (destructor)
- destructor(sk, skb);
}
*off = _off;
return skb;
@@ -212,7 +209,6 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
* @sk: socket
* @queue: socket queue from which to receive
* @flags: MSG\_ flags
- * @destructor: invoked under the receive lock on successful dequeue
* @off: an offset in bytes to peek skb from. Returns an offset
* within an skb where data actually starts
* @err: error code returned
@@ -245,10 +241,7 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
*/
struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
struct sk_buff_head *queue,
- unsigned int flags,
- void (*destructor)(struct sock *sk,
- struct sk_buff *skb),
- int *off, int *err,
+ unsigned int flags, int *off, int *err,
struct sk_buff **last)
{
struct sk_buff *skb;
@@ -269,8 +262,8 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
* However, this function was correct in any case. 8)
*/
spin_lock_irqsave(&queue->lock, cpu_flags);
- skb = __skb_try_recv_from_queue(sk, queue, flags, destructor,
- off, &error, last);
+ skb = __skb_try_recv_from_queue(sk, queue, flags, off, &error,
+ last);
spin_unlock_irqrestore(&queue->lock, cpu_flags);
if (error)
goto no_packet;
@@ -293,10 +286,7 @@ EXPORT_SYMBOL(__skb_try_recv_datagram);
struct sk_buff *__skb_recv_datagram(struct sock *sk,
struct sk_buff_head *sk_queue,
- unsigned int flags,
- void (*destructor)(struct sock *sk,
- struct sk_buff *skb),
- int *off, int *err)
+ unsigned int flags, int *off, int *err)
{
struct sk_buff *skb, *last;
long timeo;
@@ -304,8 +294,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk,
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
- skb = __skb_try_recv_datagram(sk, sk_queue, flags, destructor,
- off, err, &last);
+ skb = __skb_try_recv_datagram(sk, sk_queue, flags, off, err,
+ &last);
if (skb)
return skb;
@@ -326,7 +316,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
return __skb_recv_datagram(sk, &sk->sk_receive_queue,
flags | (noblock ? MSG_DONTWAIT : 0),
- NULL, &off, err);
+ &off, err);
}
EXPORT_SYMBOL(skb_recv_datagram);
@@ -414,6 +404,11 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
}
EXPORT_SYMBOL(skb_kill_datagram);
+INDIRECT_CALLABLE_DECLARE(static size_t simple_copy_to_iter(const void *addr,
+ size_t bytes,
+ void *data __always_unused,
+ struct iov_iter *i));
+
static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
struct iov_iter *to, int len, bool fault_short,
size_t (*cb)(const void *, size_t, void *,
@@ -427,7 +422,8 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
if (copy > 0) {
if (copy > len)
copy = len;
- n = cb(skb->data + offset, copy, data, to);
+ n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
+ skb->data + offset, copy, data, to);
offset += n;
if (n != copy)
goto short_copy;
@@ -449,8 +445,9 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
if (copy > len)
copy = len;
- n = cb(vaddr + skb_frag_off(frag) + offset - start,
- copy, data, to);
+ n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
+ vaddr + skb_frag_off(frag) + offset - start,
+ copy, data, to);
kunmap(page);
offset += n;
if (n != copy)
diff --git a/net/core/dev.c b/net/core/dev.c
index 500bba8874b0..9c9e763bfe0e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3266,7 +3266,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
* It may return NULL if the skb requires no segmentation. This is
* only possible when GSO is used for verifying header integrity.
*
- * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
+ * Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb.
*/
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path)
@@ -3295,7 +3295,7 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
features &= ~NETIF_F_GSO_PARTIAL;
}
- BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
+ BUILD_BUG_ON(SKB_GSO_CB_OFFSET +
sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
@@ -4638,7 +4638,6 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
kfree_skb(skb);
}
}
-EXPORT_SYMBOL_GPL(generic_xdp_tx);
static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
@@ -4849,7 +4848,8 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
skb->tc_at_ingress = 1;
mini_qdisc_bstats_cpu_update(miniq, skb);
- switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
+ switch (tcf_classify_ingress(skb, miniq->block, miniq->filter_list,
+ &cl_res, false)) {
case TC_ACT_OK:
case TC_ACT_RECLASSIFY:
skb->tc_index = TC_H_MIN(cl_res.classid);
@@ -8655,15 +8655,17 @@ static void dev_xdp_uninstall(struct net_device *dev)
* @dev: device
* @extack: netlink extended ack
* @fd: new program fd or negative value to clear
+ * @expected_fd: old program fd that userspace expects to replace or clear
* @flags: xdp-related flags
*
* Set or clear a bpf program for a device
*/
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
- int fd, u32 flags)
+ int fd, int expected_fd, u32 flags)
{
const struct net_device_ops *ops = dev->netdev_ops;
enum bpf_netdev_command query;
+ u32 prog_id, expected_id = 0;
struct bpf_prog *prog = NULL;
bpf_op_t bpf_op, bpf_chk;
bool offload;
@@ -8684,15 +8686,29 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
if (bpf_op == bpf_chk)
bpf_chk = generic_xdp_install;
- if (fd >= 0) {
- u32 prog_id;
+ prog_id = __dev_xdp_query(dev, bpf_op, query);
+ if (flags & XDP_FLAGS_REPLACE) {
+ if (expected_fd >= 0) {
+ prog = bpf_prog_get_type_dev(expected_fd,
+ BPF_PROG_TYPE_XDP,
+ bpf_op == ops->ndo_bpf);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+ expected_id = prog->aux->id;
+ bpf_prog_put(prog);
+ }
+ if (prog_id != expected_id) {
+ NL_SET_ERR_MSG(extack, "Active program does not match expected");
+ return -EEXIST;
+ }
+ }
+ if (fd >= 0) {
if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) {
NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time");
return -EEXIST;
}
- prog_id = __dev_xdp_query(dev, bpf_op, query);
if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && prog_id) {
NL_SET_ERR_MSG(extack, "XDP program already attached");
return -EBUSY;
@@ -8715,7 +8731,7 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
return 0;
}
} else {
- if (!__dev_xdp_query(dev, bpf_op, query))
+ if (!prog_id)
return 0;
}
@@ -9283,6 +9299,10 @@ int register_netdevice(struct net_device *dev)
BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
BUG_ON(!net);
+ ret = ethtool_check_ops(dev->ethtool_ops);
+ if (ret)
+ return ret;
+
spin_lock_init(&dev->addr_list_lock);
lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
@@ -10006,6 +10026,7 @@ EXPORT_SYMBOL(unregister_netdev);
int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
{
+ struct net *net_old = dev_net(dev);
int err, new_nsid, new_ifindex;
ASSERT_RTNL();
@@ -10021,7 +10042,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/* Get out if there is nothing todo */
err = 0;
- if (net_eq(dev_net(dev), net))
+ if (net_eq(net_old, net))
goto out;
/* Pick the destination device name, and ensure
@@ -10097,6 +10118,12 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
err = device_rename(&dev->dev, dev->name);
WARN_ON(err);
+ /* Adapt owner in case owning user namespace of target network
+ * namespace is different from the original one.
+ */
+ err = netdev_change_owner(dev, net_old, net);
+ WARN_ON(err);
+
/* Add the device back in the hashes */
list_netdevice(dev);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index dbaebbe573f0..547b587c1950 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -190,6 +190,9 @@ static int net_hwtstamp_validate(struct ifreq *ifr)
case HWTSTAMP_TX_ONESTEP_P2P:
tx_type_valid = 1;
break;
+ case __HWTSTAMP_TX_CNT:
+ /* not a real value */
+ break;
}
switch (rx_filter) {
@@ -211,6 +214,9 @@ static int net_hwtstamp_validate(struct ifreq *ifr)
case HWTSTAMP_FILTER_NTP_ALL:
rx_filter_valid = 1;
break;
+ case __HWTSTAMP_FILTER_CNT:
+ /* not a real value */
+ break;
}
if (!tx_type_valid || !rx_filter_valid)
diff --git a/net/core/devlink.c b/net/core/devlink.c
index b831c5545d6a..80f97722f31f 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -344,7 +344,7 @@ devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb,
struct devlink_region {
struct devlink *devlink;
struct list_head list;
- const char *name;
+ const struct devlink_region_ops *ops;
struct list_head snapshot_list;
u32 max_snapshots;
u32 cur_snapshots;
@@ -354,7 +354,6 @@ struct devlink_region {
struct devlink_snapshot {
struct list_head list;
struct devlink_region *region;
- devlink_snapshot_data_dest_t *data_destructor;
u8 *data;
u32 id;
};
@@ -365,7 +364,7 @@ devlink_region_get_by_name(struct devlink *devlink, const char *region_name)
struct devlink_region *region;
list_for_each_entry(region, &devlink->region_list, list)
- if (!strcmp(region->name, region_name))
+ if (!strcmp(region->ops->name, region_name))
return region;
return NULL;
@@ -545,6 +544,7 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg,
case DEVLINK_PORT_FLAVOUR_PHYSICAL:
case DEVLINK_PORT_FLAVOUR_CPU:
case DEVLINK_PORT_FLAVOUR_DSA:
+ case DEVLINK_PORT_FLAVOUR_VIRTUAL:
if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER,
attrs->phys.port_number))
return -EMSGSIZE;
@@ -2709,7 +2709,7 @@ static struct net *devlink_netns_get(struct sk_buff *skb,
struct net *net;
if (!!netns_pid_attr + !!netns_fd_attr + !!netns_id_attr > 1) {
- NL_SET_ERR_MSG(info->extack, "multiple netns identifying attributes specified");
+ NL_SET_ERR_MSG_MOD(info->extack, "multiple netns identifying attributes specified");
return ERR_PTR(-EINVAL);
}
@@ -2727,7 +2727,7 @@ static struct net *devlink_netns_get(struct sk_buff *skb,
net = ERR_PTR(-EINVAL);
}
if (IS_ERR(net)) {
- NL_SET_ERR_MSG(info->extack, "Unknown network namespace");
+ NL_SET_ERR_MSG_MOD(info->extack, "Unknown network namespace");
return ERR_PTR(-EINVAL);
}
if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
@@ -3694,7 +3694,7 @@ static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink,
if (err)
goto nla_put_failure;
- err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, region->name);
+ err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, region->ops->name);
if (err)
goto nla_put_failure;
@@ -3740,7 +3740,7 @@ static void devlink_nl_region_notify(struct devlink_region *region,
goto out_cancel_msg;
err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME,
- region->name);
+ region->ops->name);
if (err)
goto out_cancel_msg;
@@ -3768,13 +3768,201 @@ out_free_msg:
nlmsg_free(msg);
}
+/**
+ * __devlink_snapshot_id_increment - Increment number of snapshots using an id
+ * @devlink: devlink instance
+ * @id: the snapshot id
+ *
+ * Track when a new snapshot begins using an id. Load the count for the
+ * given id from the snapshot xarray, increment it, and store it back.
+ *
+ * Called when a new snapshot is created with the given id.
+ *
+ * The id *must* have been previously allocated by
+ * devlink_region_snapshot_id_get().
+ *
+ * Returns 0 on success, or an error on failure.
+ */
+static int __devlink_snapshot_id_increment(struct devlink *devlink, u32 id)
+{
+ unsigned long count;
+ void *p;
+
+ lockdep_assert_held(&devlink->lock);
+
+ p = xa_load(&devlink->snapshot_ids, id);
+ if (WARN_ON(!p))
+ return -EINVAL;
+
+ if (WARN_ON(!xa_is_value(p)))
+ return -EINVAL;
+
+ count = xa_to_value(p);
+ count++;
+
+ return xa_err(xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
+ GFP_KERNEL));
+}
+
+/**
+ * __devlink_snapshot_id_decrement - Decrease number of snapshots using an id
+ * @devlink: devlink instance
+ * @id: the snapshot id
+ *
+ * Track when a snapshot is deleted and stops using an id. Load the count
+ * for the given id from the snapshot xarray, decrement it, and store it
+ * back.
+ *
+ * If the count reaches zero, erase this id from the xarray, freeing it
+ * up for future re-use by devlink_region_snapshot_id_get().
+ *
+ * Called when a snapshot using the given id is deleted, and when the
+ * initial allocator of the id is finished using it.
+ */
+static void __devlink_snapshot_id_decrement(struct devlink *devlink, u32 id)
+{
+ unsigned long count;
+ void *p;
+
+ lockdep_assert_held(&devlink->lock);
+
+ p = xa_load(&devlink->snapshot_ids, id);
+ if (WARN_ON(!p))
+ return;
+
+ if (WARN_ON(!xa_is_value(p)))
+ return;
+
+ count = xa_to_value(p);
+
+ if (count > 1) {
+ count--;
+ xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
+ GFP_KERNEL);
+ } else {
+ /* If this was the last user, we can erase this id */
+ xa_erase(&devlink->snapshot_ids, id);
+ }
+}
+
+/**
+ * __devlink_snapshot_id_insert - Insert a specific snapshot ID
+ * @devlink: devlink instance
+ * @id: the snapshot id
+ *
+ * Mark the given snapshot id as used by inserting a zero value into the
+ * snapshot xarray.
+ *
+ * This must be called while holding the devlink instance lock. Unlike
+ * devlink_snapshot_id_get, the initial reference count is zero, not one.
+ * It is expected that the id will immediately be used before
+ * releasing the devlink instance lock.
+ *
+ * Returns zero on success, or an error code if the snapshot id could not
+ * be inserted.
+ */
+static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id)
+{
+ lockdep_assert_held(&devlink->lock);
+
+ if (WARN_ON(xa_load(&devlink->snapshot_ids, id)))
+ return -EEXIST;
+
+ return xa_err(xa_store(&devlink->snapshot_ids, id, xa_mk_value(0),
+ GFP_KERNEL));
+}
+
+/**
+ * __devlink_region_snapshot_id_get - get snapshot ID
+ * @devlink: devlink instance
+ * @id: storage to return snapshot id
+ *
+ * Allocates a new snapshot id. Returns zero on success, or a negative
+ * error on failure. Must be called while holding the devlink instance
+ * lock.
+ *
+ * Snapshot IDs are tracked using an xarray which stores the number of
+ * users of the snapshot id.
+ *
+ * Note that the caller of this function counts as a 'user', in order to
+ * avoid race conditions. The caller must release its hold on the
+ * snapshot by using devlink_region_snapshot_id_put.
+ */
+static int __devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
+{
+ lockdep_assert_held(&devlink->lock);
+
+ return xa_alloc(&devlink->snapshot_ids, id, xa_mk_value(1),
+ xa_limit_32b, GFP_KERNEL);
+}
+
+/**
+ * __devlink_region_snapshot_create - create a new snapshot
+ * This will add a new snapshot of a region. The snapshot
+ * will be stored on the region struct and can be accessed
+ * from devlink. This is useful for future analyses of snapshots.
+ * Multiple snapshots can be created on a region.
+ * The @snapshot_id should be obtained using the getter function.
+ *
+ * Must be called only while holding the devlink instance lock.
+ *
+ * @region: devlink region of the snapshot
+ * @data: snapshot data
+ * @snapshot_id: snapshot id to be created
+ */
+static int
+__devlink_region_snapshot_create(struct devlink_region *region,
+ u8 *data, u32 snapshot_id)
+{
+ struct devlink *devlink = region->devlink;
+ struct devlink_snapshot *snapshot;
+ int err;
+
+ lockdep_assert_held(&devlink->lock);
+
+ /* check if region can hold one more snapshot */
+ if (region->cur_snapshots == region->max_snapshots)
+ return -ENOSPC;
+
+ if (devlink_region_snapshot_get_by_id(region, snapshot_id))
+ return -EEXIST;
+
+ snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
+ if (!snapshot)
+ return -ENOMEM;
+
+ err = __devlink_snapshot_id_increment(devlink, snapshot_id);
+ if (err)
+ goto err_snapshot_id_increment;
+
+ snapshot->id = snapshot_id;
+ snapshot->region = region;
+ snapshot->data = data;
+
+ list_add_tail(&snapshot->list, &region->snapshot_list);
+
+ region->cur_snapshots++;
+
+ devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_NEW);
+ return 0;
+
+err_snapshot_id_increment:
+ kfree(snapshot);
+ return err;
+}
+
static void devlink_region_snapshot_del(struct devlink_region *region,
struct devlink_snapshot *snapshot)
{
+ struct devlink *devlink = region->devlink;
+
+ lockdep_assert_held(&devlink->lock);
+
devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_DEL);
region->cur_snapshots--;
list_del(&snapshot->list);
- (*snapshot->data_destructor)(snapshot->data);
+ region->ops->destructor(snapshot->data);
+ __devlink_snapshot_id_decrement(devlink, snapshot->id);
kfree(snapshot);
}
@@ -3877,6 +4065,71 @@ static int devlink_nl_cmd_region_del(struct sk_buff *skb,
return 0;
}
+static int
+devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_region *region;
+ const char *region_name;
+ u32 snapshot_id;
+ u8 *data;
+ int err;
+
+ if (!info->attrs[DEVLINK_ATTR_REGION_NAME]) {
+ NL_SET_ERR_MSG_MOD(info->extack, "No region name provided");
+ return -EINVAL;
+ }
+
+ if (!info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]) {
+ NL_SET_ERR_MSG_MOD(info->extack, "No snapshot id provided");
+ return -EINVAL;
+ }
+
+ region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
+ region = devlink_region_get_by_name(devlink, region_name);
+ if (!region) {
+ NL_SET_ERR_MSG_MOD(info->extack, "The requested region does not exist");
+ return -EINVAL;
+ }
+
+ if (!region->ops->snapshot) {
+ NL_SET_ERR_MSG_MOD(info->extack, "The requested region does not support taking an immediate snapshot");
+ return -EOPNOTSUPP;
+ }
+
+ if (region->cur_snapshots == region->max_snapshots) {
+ NL_SET_ERR_MSG_MOD(info->extack, "The region has reached the maximum number of stored snapshots");
+ return -ENOSPC;
+ }
+
+ snapshot_id = nla_get_u32(info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]);
+
+ if (devlink_region_snapshot_get_by_id(region, snapshot_id)) {
+ NL_SET_ERR_MSG_MOD(info->extack, "The requested snapshot id is already in use");
+ return -EEXIST;
+ }
+
+ err = __devlink_snapshot_id_insert(devlink, snapshot_id);
+ if (err)
+ return err;
+
+ err = region->ops->snapshot(devlink, info->extack, &data);
+ if (err)
+ goto err_snapshot_capture;
+
+ err = __devlink_region_snapshot_create(region, data, snapshot_id);
+ if (err)
+ goto err_snapshot_create;
+
+ return 0;
+
+err_snapshot_create:
+ region->ops->destructor(data);
+err_snapshot_capture:
+ __devlink_snapshot_id_decrement(devlink, snapshot_id);
+ return err;
+}
+
static int devlink_nl_cmd_region_read_chunk_fill(struct sk_buff *msg,
struct devlink *devlink,
u8 *chunk, u32 chunk_size,
@@ -4239,11 +4492,17 @@ struct devlink_fmsg_item {
int attrtype;
u8 nla_type;
u16 len;
- int value[0];
+ int value[];
};
struct devlink_fmsg {
struct list_head item_list;
+ bool putting_binary; /* This flag forces enclosing of binary data
+ * in an array brackets. It forces using
+ * of designated API:
+ * devlink_fmsg_binary_pair_nest_start()
+ * devlink_fmsg_binary_pair_nest_end()
+ */
};
static struct devlink_fmsg *devlink_fmsg_alloc(void)
@@ -4287,17 +4546,26 @@ static int devlink_fmsg_nest_common(struct devlink_fmsg *fmsg,
int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg)
{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_OBJ_NEST_START);
}
EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_start);
static int devlink_fmsg_nest_end(struct devlink_fmsg *fmsg)
{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
return devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_NEST_END);
}
int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg)
{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
return devlink_fmsg_nest_end(fmsg);
}
EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_end);
@@ -4308,6 +4576,9 @@ static int devlink_fmsg_put_name(struct devlink_fmsg *fmsg, const char *name)
{
struct devlink_fmsg_item *item;
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
if (strlen(name) + 1 > DEVLINK_FMSG_MAX_SIZE)
return -EMSGSIZE;
@@ -4328,6 +4599,9 @@ int devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name)
{
int err;
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
err = devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_PAIR_NEST_START);
if (err)
return err;
@@ -4342,6 +4616,9 @@ EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_start);
int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg)
{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
return devlink_fmsg_nest_end(fmsg);
}
EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_end);
@@ -4351,6 +4628,9 @@ int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
{
int err;
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
err = devlink_fmsg_pair_nest_start(fmsg, name);
if (err)
return err;
@@ -4367,6 +4647,9 @@ int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg)
{
int err;
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
err = devlink_fmsg_nest_end(fmsg);
if (err)
return err;
@@ -4379,6 +4662,30 @@ int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg)
}
EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_end);
+int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg,
+ const char *name)
+{
+ int err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ fmsg->putting_binary = true;
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_start);
+
+int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg)
+{
+ if (!fmsg->putting_binary)
+ return -EINVAL;
+
+ fmsg->putting_binary = false;
+ return devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_end);
+
static int devlink_fmsg_put_value(struct devlink_fmsg *fmsg,
const void *value, u16 value_len,
u8 value_nla_type)
@@ -4403,40 +4710,59 @@ static int devlink_fmsg_put_value(struct devlink_fmsg *fmsg,
int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value)
{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_FLAG);
}
EXPORT_SYMBOL_GPL(devlink_fmsg_bool_put);
int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value)
{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U8);
}
EXPORT_SYMBOL_GPL(devlink_fmsg_u8_put);
int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value)
{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U32);
}
EXPORT_SYMBOL_GPL(devlink_fmsg_u32_put);
int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value)
{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
return devlink_fmsg_put_value(fmsg, &value, sizeof(value), NLA_U64);
}
EXPORT_SYMBOL_GPL(devlink_fmsg_u64_put);
int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value)
{
+ if (fmsg->putting_binary)
+ return -EINVAL;
+
return devlink_fmsg_put_value(fmsg, value, strlen(value) + 1,
NLA_NUL_STRING);
}
EXPORT_SYMBOL_GPL(devlink_fmsg_string_put);
-static int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
- u16 value_len)
+int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
+ u16 value_len)
{
+ if (!fmsg->putting_binary)
+ return -EINVAL;
+
return devlink_fmsg_put_value(fmsg, value, value_len, NLA_BINARY);
}
+EXPORT_SYMBOL_GPL(devlink_fmsg_binary_put);
int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
bool value)
@@ -4547,10 +4873,11 @@ int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
const void *value, u32 value_len)
{
u32 data_size;
+ int end_err;
u32 offset;
int err;
- err = devlink_fmsg_arr_pair_nest_start(fmsg, name);
+ err = devlink_fmsg_binary_pair_nest_start(fmsg, name);
if (err)
return err;
@@ -4560,14 +4887,18 @@ int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
data_size = DEVLINK_FMSG_MAX_SIZE;
err = devlink_fmsg_binary_put(fmsg, value + offset, data_size);
if (err)
- return err;
+ break;
+ /* Exit from loop with a break (instead of
+ * return) to make sure putting_binary is turned off in
+ * devlink_fmsg_binary_pair_nest_end
+ */
}
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
- if (err)
- return err;
+ end_err = devlink_fmsg_binary_pair_nest_end(fmsg);
+ if (end_err)
+ err = end_err;
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_put);
@@ -4758,6 +5089,7 @@ struct devlink_health_reporter {
struct mutex dump_lock; /* lock parallel read/write from dump buffers */
u64 graceful_period;
bool auto_recover;
+ bool auto_dump;
u8 health_state;
u64 dump_ts;
u64 dump_real_ts;
@@ -4793,14 +5125,12 @@ devlink_health_reporter_find_by_name(struct devlink *devlink,
* @devlink: devlink
* @ops: ops
* @graceful_period: to avoid recovery loops, in msecs
- * @auto_recover: auto recover when error occurs
* @priv: priv
*/
struct devlink_health_reporter *
devlink_health_reporter_create(struct devlink *devlink,
const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, bool auto_recover,
- void *priv)
+ u64 graceful_period, void *priv)
{
struct devlink_health_reporter *reporter;
@@ -4810,8 +5140,7 @@ devlink_health_reporter_create(struct devlink *devlink,
goto unlock;
}
- if (WARN_ON(auto_recover && !ops->recover) ||
- WARN_ON(graceful_period && !ops->recover)) {
+ if (WARN_ON(graceful_period && !ops->recover)) {
reporter = ERR_PTR(-EINVAL);
goto unlock;
}
@@ -4826,7 +5155,8 @@ devlink_health_reporter_create(struct devlink *devlink,
reporter->ops = ops;
reporter->devlink = devlink;
reporter->graceful_period = graceful_period;
- reporter->auto_recover = auto_recover;
+ reporter->auto_recover = !!ops->recover;
+ reporter->auto_dump = !!ops->dump;
mutex_init(&reporter->dump_lock);
refcount_set(&reporter->refcount, 1);
list_add_tail(&reporter->list, &devlink->reporter_list);
@@ -4907,6 +5237,10 @@ devlink_nl_health_reporter_fill(struct sk_buff *msg,
nla_put_u64_64bit(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS,
reporter->dump_real_ts, DEVLINK_ATTR_PAD))
goto reporter_nest_cancel;
+ if (reporter->ops->dump &&
+ nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP,
+ reporter->auto_dump))
+ goto reporter_nest_cancel;
nla_nest_end(msg, reporter_attr);
genlmsg_end(msg, hdr);
@@ -5053,10 +5387,12 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
- mutex_lock(&reporter->dump_lock);
- /* store current dump of current error, for later analysis */
- devlink_health_do_dump(reporter, priv_ctx, NULL);
- mutex_unlock(&reporter->dump_lock);
+ if (reporter->auto_dump) {
+ mutex_lock(&reporter->dump_lock);
+ /* store current dump of current error, for later analysis */
+ devlink_health_do_dump(reporter, priv_ctx, NULL);
+ mutex_unlock(&reporter->dump_lock);
+ }
if (reporter->auto_recover)
return devlink_health_reporter_recover(reporter,
@@ -5230,6 +5566,11 @@ devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
err = -EOPNOTSUPP;
goto out;
}
+ if (!reporter->ops->dump &&
+ info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP]) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD])
reporter->graceful_period =
@@ -5239,6 +5580,10 @@ devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
reporter->auto_recover =
nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER]);
+ if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP])
+ reporter->auto_dump =
+ nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP]);
+
devlink_health_reporter_put(reporter);
return 0;
out:
@@ -5375,18 +5720,35 @@ struct devlink_stats {
};
/**
+ * struct devlink_trap_policer_item - Packet trap policer attributes.
+ * @policer: Immutable packet trap policer attributes.
+ * @rate: Rate in packets / sec.
+ * @burst: Burst size in packets.
+ * @list: trap_policer_list member.
+ *
+ * Describes packet trap policer attributes. Created by devlink during trap
+ * policer registration.
+ */
+struct devlink_trap_policer_item {
+ const struct devlink_trap_policer *policer;
+ u64 rate;
+ u64 burst;
+ struct list_head list;
+};
+
+/**
* struct devlink_trap_group_item - Packet trap group attributes.
* @group: Immutable packet trap group attributes.
- * @refcount: Number of trap items using the group.
+ * @policer_item: Associated policer item. Can be NULL.
* @list: trap_group_list member.
* @stats: Trap group statistics.
*
* Describes packet trap group attributes. Created by devlink during trap
- * registration.
+ * group registration.
*/
struct devlink_trap_group_item {
const struct devlink_trap_group *group;
- refcount_t refcount;
+ struct devlink_trap_policer_item *policer_item;
struct list_head list;
struct devlink_stats __percpu *stats;
};
@@ -5412,6 +5774,19 @@ struct devlink_trap_item {
void *priv;
};
+static struct devlink_trap_policer_item *
+devlink_trap_policer_item_lookup(struct devlink *devlink, u32 id)
+{
+ struct devlink_trap_policer_item *policer_item;
+
+ list_for_each_entry(policer_item, &devlink->trap_policer_list, list) {
+ if (policer_item->policer->id == id)
+ return policer_item;
+ }
+
+ return NULL;
+}
+
static struct devlink_trap_item *
devlink_trap_item_lookup(struct devlink *devlink, const char *name)
{
@@ -5469,6 +5844,9 @@ static int devlink_trap_metadata_put(struct sk_buff *msg,
if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT) &&
nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT))
goto nla_put_failure;
+ if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE) &&
+ nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE))
+ goto nla_put_failure;
nla_nest_end(msg, attr);
@@ -5736,6 +6114,19 @@ devlink_trap_group_item_lookup(struct devlink *devlink, const char *name)
}
static struct devlink_trap_group_item *
+devlink_trap_group_item_lookup_by_id(struct devlink *devlink, u16 id)
+{
+ struct devlink_trap_group_item *group_item;
+
+ list_for_each_entry(group_item, &devlink->trap_group_list, list) {
+ if (group_item->group->id == id)
+ return group_item;
+ }
+
+ return NULL;
+}
+
+static struct devlink_trap_group_item *
devlink_trap_group_item_get_from_info(struct devlink *devlink,
struct genl_info *info)
{
@@ -5772,6 +6163,11 @@ devlink_nl_trap_group_fill(struct sk_buff *msg, struct devlink *devlink,
nla_put_flag(msg, DEVLINK_ATTR_TRAP_GENERIC))
goto nla_put_failure;
+ if (group_item->policer_item &&
+ nla_put_u32(msg, DEVLINK_ATTR_TRAP_POLICER_ID,
+ group_item->policer_item->policer->id))
+ goto nla_put_failure;
+
err = devlink_trap_stats_put(msg, group_item->stats);
if (err)
goto nla_put_failure;
@@ -5873,7 +6269,7 @@ __devlink_trap_group_action_set(struct devlink *devlink,
int err;
list_for_each_entry(trap_item, &devlink->trap_list, list) {
- if (strcmp(trap_item->trap->group.name, group_name))
+ if (strcmp(trap_item->group_item->group->name, group_name))
continue;
err = __devlink_trap_action_set(devlink, trap_item,
trap_action, extack);
@@ -5887,7 +6283,7 @@ __devlink_trap_group_action_set(struct devlink *devlink,
static int
devlink_trap_group_action_set(struct devlink *devlink,
struct devlink_trap_group_item *group_item,
- struct genl_info *info)
+ struct genl_info *info, bool *p_modified)
{
enum devlink_trap_action trap_action;
int err;
@@ -5906,6 +6302,47 @@ devlink_trap_group_action_set(struct devlink *devlink,
if (err)
return err;
+ *p_modified = true;
+
+ return 0;
+}
+
+static int devlink_trap_group_set(struct devlink *devlink,
+ struct devlink_trap_group_item *group_item,
+ struct genl_info *info)
+{
+ struct devlink_trap_policer_item *policer_item;
+ struct netlink_ext_ack *extack = info->extack;
+ const struct devlink_trap_policer *policer;
+ struct nlattr **attrs = info->attrs;
+ int err;
+
+ if (!attrs[DEVLINK_ATTR_TRAP_POLICER_ID])
+ return 0;
+
+ if (!devlink->ops->trap_group_set)
+ return -EOPNOTSUPP;
+
+ policer_item = group_item->policer_item;
+ if (attrs[DEVLINK_ATTR_TRAP_POLICER_ID]) {
+ u32 policer_id;
+
+ policer_id = nla_get_u32(attrs[DEVLINK_ATTR_TRAP_POLICER_ID]);
+ policer_item = devlink_trap_policer_item_lookup(devlink,
+ policer_id);
+ if (policer_id && !policer_item) {
+ NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap policer");
+ return -ENOENT;
+ }
+ }
+ policer = policer_item ? policer_item->policer : NULL;
+
+ err = devlink->ops->trap_group_set(devlink, group_item->group, policer);
+ if (err)
+ return err;
+
+ group_item->policer_item = policer_item;
+
return 0;
}
@@ -5915,6 +6352,7 @@ static int devlink_nl_cmd_trap_group_set_doit(struct sk_buff *skb,
struct netlink_ext_ack *extack = info->extack;
struct devlink *devlink = info->user_ptr[0];
struct devlink_trap_group_item *group_item;
+ bool modified = false;
int err;
if (list_empty(&devlink->trap_group_list))
@@ -5926,14 +6364,262 @@ static int devlink_nl_cmd_trap_group_set_doit(struct sk_buff *skb,
return -ENOENT;
}
- err = devlink_trap_group_action_set(devlink, group_item, info);
+ err = devlink_trap_group_action_set(devlink, group_item, info,
+ &modified);
+ if (err)
+ return err;
+
+ err = devlink_trap_group_set(devlink, group_item, info);
+ if (err)
+ goto err_trap_group_set;
+
+ return 0;
+
+err_trap_group_set:
+ if (modified)
+ NL_SET_ERR_MSG_MOD(extack, "Trap group set failed, but some changes were committed already");
+ return err;
+}
+
+static struct devlink_trap_policer_item *
+devlink_trap_policer_item_get_from_info(struct devlink *devlink,
+ struct genl_info *info)
+{
+ u32 id;
+
+ if (!info->attrs[DEVLINK_ATTR_TRAP_POLICER_ID])
+ return NULL;
+ id = nla_get_u32(info->attrs[DEVLINK_ATTR_TRAP_POLICER_ID]);
+
+ return devlink_trap_policer_item_lookup(devlink, id);
+}
+
+static int
+devlink_trap_policer_stats_put(struct sk_buff *msg, struct devlink *devlink,
+ const struct devlink_trap_policer *policer)
+{
+ struct nlattr *attr;
+ u64 drops;
+ int err;
+
+ if (!devlink->ops->trap_policer_counter_get)
+ return 0;
+
+ err = devlink->ops->trap_policer_counter_get(devlink, policer, &drops);
+ if (err)
+ return err;
+
+ attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
+ if (!attr)
+ return -EMSGSIZE;
+
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_DROPPED, drops,
+ DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, attr);
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, attr);
+ return -EMSGSIZE;
+}
+
+static int
+devlink_nl_trap_policer_fill(struct sk_buff *msg, struct devlink *devlink,
+ const struct devlink_trap_policer_item *policer_item,
+ enum devlink_command cmd, u32 portid, u32 seq,
+ int flags)
+{
+ void *hdr;
+ int err;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, DEVLINK_ATTR_TRAP_POLICER_ID,
+ policer_item->policer->id))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_TRAP_POLICER_RATE,
+ policer_item->rate, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_TRAP_POLICER_BURST,
+ policer_item->burst, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ err = devlink_trap_policer_stats_put(msg, devlink,
+ policer_item->policer);
+ if (err)
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static int devlink_nl_cmd_trap_policer_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_trap_policer_item *policer_item;
+ struct netlink_ext_ack *extack = info->extack;
+ struct devlink *devlink = info->user_ptr[0];
+ struct sk_buff *msg;
+ int err;
+
+ if (list_empty(&devlink->trap_policer_list))
+ return -EOPNOTSUPP;
+
+ policer_item = devlink_trap_policer_item_get_from_info(devlink, info);
+ if (!policer_item) {
+ NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap policer");
+ return -ENOENT;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_trap_policer_fill(msg, devlink, policer_item,
+ DEVLINK_CMD_TRAP_POLICER_NEW,
+ info->snd_portid, info->snd_seq, 0);
+ if (err)
+ goto err_trap_policer_fill;
+
+ return genlmsg_reply(msg, info);
+
+err_trap_policer_fill:
+ nlmsg_free(msg);
+ return err;
+}
+
+static int devlink_nl_cmd_trap_policer_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ enum devlink_command cmd = DEVLINK_CMD_TRAP_POLICER_NEW;
+ struct devlink_trap_policer_item *policer_item;
+ u32 portid = NETLINK_CB(cb->skb).portid;
+ struct devlink *devlink;
+ int start = cb->args[0];
+ int idx = 0;
+ int err;
+
+ mutex_lock(&devlink_mutex);
+ list_for_each_entry(devlink, &devlink_list, list) {
+ if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+ continue;
+ mutex_lock(&devlink->lock);
+ list_for_each_entry(policer_item, &devlink->trap_policer_list,
+ list) {
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_trap_policer_fill(msg, devlink,
+ policer_item, cmd,
+ portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI);
+ if (err) {
+ mutex_unlock(&devlink->lock);
+ goto out;
+ }
+ idx++;
+ }
+ mutex_unlock(&devlink->lock);
+ }
+out:
+ mutex_unlock(&devlink_mutex);
+
+ cb->args[0] = idx;
+ return msg->len;
+}
+
+static int
+devlink_trap_policer_set(struct devlink *devlink,
+ struct devlink_trap_policer_item *policer_item,
+ struct genl_info *info)
+{
+ struct netlink_ext_ack *extack = info->extack;
+ struct nlattr **attrs = info->attrs;
+ u64 rate, burst;
+ int err;
+
+ rate = policer_item->rate;
+ burst = policer_item->burst;
+
+ if (attrs[DEVLINK_ATTR_TRAP_POLICER_RATE])
+ rate = nla_get_u64(attrs[DEVLINK_ATTR_TRAP_POLICER_RATE]);
+
+ if (attrs[DEVLINK_ATTR_TRAP_POLICER_BURST])
+ burst = nla_get_u64(attrs[DEVLINK_ATTR_TRAP_POLICER_BURST]);
+
+ if (rate < policer_item->policer->min_rate) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer rate lower than limit");
+ return -EINVAL;
+ }
+
+ if (rate > policer_item->policer->max_rate) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer rate higher than limit");
+ return -EINVAL;
+ }
+
+ if (burst < policer_item->policer->min_burst) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer burst size lower than limit");
+ return -EINVAL;
+ }
+
+ if (burst > policer_item->policer->max_burst) {
+ NL_SET_ERR_MSG_MOD(extack, "Policer burst size higher than limit");
+ return -EINVAL;
+ }
+
+ err = devlink->ops->trap_policer_set(devlink, policer_item->policer,
+ rate, burst, info->extack);
if (err)
return err;
+ policer_item->rate = rate;
+ policer_item->burst = burst;
+
return 0;
}
+static int devlink_nl_cmd_trap_policer_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_trap_policer_item *policer_item;
+ struct netlink_ext_ack *extack = info->extack;
+ struct devlink *devlink = info->user_ptr[0];
+
+ if (list_empty(&devlink->trap_policer_list))
+ return -EOPNOTSUPP;
+
+ if (!devlink->ops->trap_policer_set)
+ return -EOPNOTSUPP;
+
+ policer_item = devlink_trap_policer_item_get_from_info(devlink, info);
+ if (!policer_item) {
+ NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap policer");
+ return -ENOENT;
+ }
+
+ return devlink_trap_policer_set(devlink, policer_item, info);
+}
+
static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_UNSPEC] = { .strict_start_type =
+ DEVLINK_ATTR_TRAP_POLICER_ID },
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
@@ -5971,6 +6657,10 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_NETNS_PID] = { .type = NLA_U32 },
[DEVLINK_ATTR_NETNS_FD] = { .type = NLA_U32 },
[DEVLINK_ATTR_NETNS_ID] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP] = { .type = NLA_U8 },
+ [DEVLINK_ATTR_TRAP_POLICER_ID] = { .type = NLA_U32 },
+ [DEVLINK_ATTR_TRAP_POLICER_RATE] = { .type = NLA_U64 },
+ [DEVLINK_ATTR_TRAP_POLICER_BURST] = { .type = NLA_U64 },
};
static const struct genl_ops devlink_nl_ops[] = {
@@ -6094,7 +6784,8 @@ static const struct genl_ops devlink_nl_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_eswitch_get_doit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+ DEVLINK_NL_FLAG_NO_LOCK,
},
{
.cmd = DEVLINK_CMD_ESWITCH_SET,
@@ -6193,6 +6884,13 @@ static const struct genl_ops devlink_nl_ops[] = {
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
+ .cmd = DEVLINK_CMD_REGION_NEW,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_region_new,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ },
+ {
.cmd = DEVLINK_CMD_REGION_DEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_region_del,
@@ -6298,6 +6996,19 @@ static const struct genl_ops devlink_nl_ops[] = {
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
+ {
+ .cmd = DEVLINK_CMD_TRAP_POLICER_GET,
+ .doit = devlink_nl_cmd_trap_policer_get_doit,
+ .dumpit = devlink_nl_cmd_trap_policer_get_dumpit,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_TRAP_POLICER_SET,
+ .doit = devlink_nl_cmd_trap_policer_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
+ },
};
static struct genl_family devlink_nl_family __ro_after_init = {
@@ -6335,6 +7046,7 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
if (!devlink)
return NULL;
devlink->ops = ops;
+ xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC);
__devlink_net_set(devlink, &init_net);
INIT_LIST_HEAD(&devlink->port_list);
INIT_LIST_HEAD(&devlink->sb_list);
@@ -6345,6 +7057,7 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
INIT_LIST_HEAD(&devlink->reporter_list);
INIT_LIST_HEAD(&devlink->trap_list);
INIT_LIST_HEAD(&devlink->trap_group_list);
+ INIT_LIST_HEAD(&devlink->trap_policer_list);
mutex_init(&devlink->lock);
mutex_init(&devlink->reporters_lock);
return devlink;
@@ -6429,6 +7142,7 @@ void devlink_free(struct devlink *devlink)
{
mutex_destroy(&devlink->reporters_lock);
mutex_destroy(&devlink->lock);
+ WARN_ON(!list_empty(&devlink->trap_policer_list));
WARN_ON(!list_empty(&devlink->trap_group_list));
WARN_ON(!list_empty(&devlink->trap_list));
WARN_ON(!list_empty(&devlink->reporter_list));
@@ -6439,6 +7153,8 @@ void devlink_free(struct devlink *devlink)
WARN_ON(!list_empty(&devlink->sb_list));
WARN_ON(!list_empty(&devlink->port_list));
+ xa_destroy(&devlink->snapshot_ids);
+
kfree(devlink);
}
EXPORT_SYMBOL_GPL(devlink_free);
@@ -6734,6 +7450,7 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
switch (attrs->flavour) {
case DEVLINK_PORT_FLAVOUR_PHYSICAL:
+ case DEVLINK_PORT_FLAVOUR_VIRTUAL:
if (!attrs->split)
n = snprintf(name, len, "p%u", attrs->phys.port_number);
else
@@ -7553,21 +8270,24 @@ EXPORT_SYMBOL_GPL(devlink_param_value_str_fill);
* devlink_region_create - create a new address region
*
* @devlink: devlink
- * @region_name: region name
+ * @ops: region operations and name
* @region_max_snapshots: Maximum supported number of snapshots for region
* @region_size: size of region
*/
-struct devlink_region *devlink_region_create(struct devlink *devlink,
- const char *region_name,
- u32 region_max_snapshots,
- u64 region_size)
+struct devlink_region *
+devlink_region_create(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size)
{
struct devlink_region *region;
int err = 0;
+ if (WARN_ON(!ops) || WARN_ON(!ops->destructor))
+ return ERR_PTR(-EINVAL);
+
mutex_lock(&devlink->lock);
- if (devlink_region_get_by_name(devlink, region_name)) {
+ if (devlink_region_get_by_name(devlink, ops->name)) {
err = -EEXIST;
goto unlock;
}
@@ -7580,7 +8300,7 @@ struct devlink_region *devlink_region_create(struct devlink *devlink,
region->devlink = devlink;
region->max_snapshots = region_max_snapshots;
- region->name = region_name;
+ region->ops = ops;
region->size = region_size;
INIT_LIST_HEAD(&region->snapshot_list);
list_add_tail(&region->list, &devlink->region_list);
@@ -7626,75 +8346,66 @@ EXPORT_SYMBOL_GPL(devlink_region_destroy);
* Driver should use the same id for multiple snapshots taken
* on multiple regions at the same time/by the same trigger.
*
+ * The caller of this function must use devlink_region_snapshot_id_put
+ * when finished creating regions using this id.
+ *
+ * Returns zero on success, or a negative error code on failure.
+ *
* @devlink: devlink
+ * @id: storage to return id
*/
-u32 devlink_region_snapshot_id_get(struct devlink *devlink)
+int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
{
- u32 id;
+ int err;
mutex_lock(&devlink->lock);
- id = ++devlink->snapshot_id;
+ err = __devlink_region_snapshot_id_get(devlink, id);
mutex_unlock(&devlink->lock);
- return id;
+ return err;
}
EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get);
/**
+ * devlink_region_snapshot_id_put - put snapshot ID reference
+ *
+ * This should be called by a driver after finishing creating snapshots
+ * with an id. Doing so ensures that the ID can later be released in the
+ * event that all snapshots using it have been destroyed.
+ *
+ * @devlink: devlink
+ * @id: id to release reference on
+ */
+void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id)
+{
+ mutex_lock(&devlink->lock);
+ __devlink_snapshot_id_decrement(devlink, id);
+ mutex_unlock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_put);
+
+/**
* devlink_region_snapshot_create - create a new snapshot
* This will add a new snapshot of a region. The snapshot
* will be stored on the region struct and can be accessed
- * from devlink. This is useful for future analyses of snapshots.
+ * from devlink. This is useful for future analyses of snapshots.
* Multiple snapshots can be created on a region.
* The @snapshot_id should be obtained using the getter function.
*
* @region: devlink region of the snapshot
* @data: snapshot data
* @snapshot_id: snapshot id to be created
- * @data_destructor: pointer to destructor function to free data
*/
int devlink_region_snapshot_create(struct devlink_region *region,
- u8 *data, u32 snapshot_id,
- devlink_snapshot_data_dest_t *data_destructor)
+ u8 *data, u32 snapshot_id)
{
struct devlink *devlink = region->devlink;
- struct devlink_snapshot *snapshot;
int err;
mutex_lock(&devlink->lock);
-
- /* check if region can hold one more snapshot */
- if (region->cur_snapshots == region->max_snapshots) {
- err = -ENOMEM;
- goto unlock;
- }
-
- if (devlink_region_snapshot_get_by_id(region, snapshot_id)) {
- err = -EEXIST;
- goto unlock;
- }
-
- snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
- if (!snapshot) {
- err = -ENOMEM;
- goto unlock;
- }
-
- snapshot->id = snapshot_id;
- snapshot->region = region;
- snapshot->data = data;
- snapshot->data_destructor = data_destructor;
-
- list_add_tail(&snapshot->list, &region->snapshot_list);
-
- region->cur_snapshots++;
-
- devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_NEW);
+ err = __devlink_region_snapshot_create(region, data, snapshot_id);
mutex_unlock(&devlink->lock);
- return 0;
-unlock:
- mutex_unlock(&devlink->lock);
return err;
}
EXPORT_SYMBOL_GPL(devlink_region_snapshot_create);
@@ -7734,6 +8445,8 @@ static const struct devlink_trap devlink_trap_generic[] = {
DEVLINK_TRAP(NON_ROUTABLE, DROP),
DEVLINK_TRAP(DECAP_ERROR, EXCEPTION),
DEVLINK_TRAP(OVERLAY_SMAC_MC, DROP),
+ DEVLINK_TRAP(INGRESS_FLOW_ACTION_DROP, DROP),
+ DEVLINK_TRAP(EGRESS_FLOW_ACTION_DROP, DROP),
};
#define DEVLINK_TRAP_GROUP(_id) \
@@ -7747,6 +8460,7 @@ static const struct devlink_trap_group devlink_trap_group_generic[] = {
DEVLINK_TRAP_GROUP(L3_DROPS),
DEVLINK_TRAP_GROUP(BUFFER_DROPS),
DEVLINK_TRAP_GROUP(TUNNEL_DROPS),
+ DEVLINK_TRAP_GROUP(ACL_DROPS),
};
static int devlink_trap_generic_verify(const struct devlink_trap *trap)
@@ -7780,7 +8494,7 @@ static int devlink_trap_driver_verify(const struct devlink_trap *trap)
static int devlink_trap_verify(const struct devlink_trap *trap)
{
- if (!trap || !trap->name || !trap->group.name)
+ if (!trap || !trap->name)
return -EINVAL;
if (trap->generic)
@@ -7851,108 +8565,22 @@ devlink_trap_group_notify(struct devlink *devlink,
msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
}
-static struct devlink_trap_group_item *
-devlink_trap_group_item_create(struct devlink *devlink,
- const struct devlink_trap_group *group)
-{
- struct devlink_trap_group_item *group_item;
- int err;
-
- err = devlink_trap_group_verify(group);
- if (err)
- return ERR_PTR(err);
-
- group_item = kzalloc(sizeof(*group_item), GFP_KERNEL);
- if (!group_item)
- return ERR_PTR(-ENOMEM);
-
- group_item->stats = netdev_alloc_pcpu_stats(struct devlink_stats);
- if (!group_item->stats) {
- err = -ENOMEM;
- goto err_stats_alloc;
- }
-
- group_item->group = group;
- refcount_set(&group_item->refcount, 1);
-
- if (devlink->ops->trap_group_init) {
- err = devlink->ops->trap_group_init(devlink, group);
- if (err)
- goto err_group_init;
- }
-
- list_add_tail(&group_item->list, &devlink->trap_group_list);
- devlink_trap_group_notify(devlink, group_item,
- DEVLINK_CMD_TRAP_GROUP_NEW);
-
- return group_item;
-
-err_group_init:
- free_percpu(group_item->stats);
-err_stats_alloc:
- kfree(group_item);
- return ERR_PTR(err);
-}
-
-static void
-devlink_trap_group_item_destroy(struct devlink *devlink,
- struct devlink_trap_group_item *group_item)
-{
- devlink_trap_group_notify(devlink, group_item,
- DEVLINK_CMD_TRAP_GROUP_DEL);
- list_del(&group_item->list);
- free_percpu(group_item->stats);
- kfree(group_item);
-}
-
-static struct devlink_trap_group_item *
-devlink_trap_group_item_get(struct devlink *devlink,
- const struct devlink_trap_group *group)
-{
- struct devlink_trap_group_item *group_item;
-
- group_item = devlink_trap_group_item_lookup(devlink, group->name);
- if (group_item) {
- refcount_inc(&group_item->refcount);
- return group_item;
- }
-
- return devlink_trap_group_item_create(devlink, group);
-}
-
-static void
-devlink_trap_group_item_put(struct devlink *devlink,
- struct devlink_trap_group_item *group_item)
-{
- if (!refcount_dec_and_test(&group_item->refcount))
- return;
-
- devlink_trap_group_item_destroy(devlink, group_item);
-}
-
static int
devlink_trap_item_group_link(struct devlink *devlink,
struct devlink_trap_item *trap_item)
{
+ u16 group_id = trap_item->trap->init_group_id;
struct devlink_trap_group_item *group_item;
- group_item = devlink_trap_group_item_get(devlink,
- &trap_item->trap->group);
- if (IS_ERR(group_item))
- return PTR_ERR(group_item);
+ group_item = devlink_trap_group_item_lookup_by_id(devlink, group_id);
+ if (WARN_ON_ONCE(!group_item))
+ return -EINVAL;
trap_item->group_item = group_item;
return 0;
}
-static void
-devlink_trap_item_group_unlink(struct devlink *devlink,
- struct devlink_trap_item *trap_item)
-{
- devlink_trap_group_item_put(devlink, trap_item->group_item);
-}
-
static void devlink_trap_notify(struct devlink *devlink,
const struct devlink_trap_item *trap_item,
enum devlink_command cmd)
@@ -8015,7 +8643,6 @@ devlink_trap_register(struct devlink *devlink,
return 0;
err_trap_init:
- devlink_trap_item_group_unlink(devlink, trap_item);
err_group_link:
free_percpu(trap_item->stats);
err_stats_alloc:
@@ -8036,7 +8663,6 @@ static void devlink_trap_unregister(struct devlink *devlink,
list_del(&trap_item->list);
if (devlink->ops->trap_fini)
devlink->ops->trap_fini(devlink, trap, trap_item);
- devlink_trap_item_group_unlink(devlink, trap_item);
free_percpu(trap_item->stats);
kfree(trap_item);
}
@@ -8138,12 +8764,14 @@ devlink_trap_stats_update(struct devlink_stats __percpu *trap_stats,
static void
devlink_trap_report_metadata_fill(struct net_dm_hw_metadata *hw_metadata,
const struct devlink_trap_item *trap_item,
- struct devlink_port *in_devlink_port)
+ struct devlink_port *in_devlink_port,
+ const struct flow_action_cookie *fa_cookie)
{
struct devlink_trap_group_item *group_item = trap_item->group_item;
hw_metadata->trap_group_name = group_item->group->name;
hw_metadata->trap_name = trap_item->trap->name;
+ hw_metadata->fa_cookie = fa_cookie;
spin_lock(&in_devlink_port->type_lock);
if (in_devlink_port->type == DEVLINK_PORT_TYPE_ETH)
@@ -8157,9 +8785,12 @@ devlink_trap_report_metadata_fill(struct net_dm_hw_metadata *hw_metadata,
* @skb: Trapped packet.
* @trap_ctx: Trap context.
* @in_devlink_port: Input devlink port.
+ * @fa_cookie: Flow action cookie. Could be NULL.
*/
void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb,
- void *trap_ctx, struct devlink_port *in_devlink_port)
+ void *trap_ctx, struct devlink_port *in_devlink_port,
+ const struct flow_action_cookie *fa_cookie)
+
{
struct devlink_trap_item *trap_item = trap_ctx;
struct net_dm_hw_metadata hw_metadata = {};
@@ -8168,7 +8799,7 @@ void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb,
devlink_trap_stats_update(trap_item->group_item->stats, skb->len);
devlink_trap_report_metadata_fill(&hw_metadata, trap_item,
- in_devlink_port);
+ in_devlink_port, fa_cookie);
net_dm_hw_report(skb, &hw_metadata);
}
EXPORT_SYMBOL_GPL(devlink_trap_report);
@@ -8187,6 +8818,288 @@ void *devlink_trap_ctx_priv(void *trap_ctx)
}
EXPORT_SYMBOL_GPL(devlink_trap_ctx_priv);
+static int
+devlink_trap_group_item_policer_link(struct devlink *devlink,
+ struct devlink_trap_group_item *group_item)
+{
+ u32 policer_id = group_item->group->init_policer_id;
+ struct devlink_trap_policer_item *policer_item;
+
+ if (policer_id == 0)
+ return 0;
+
+ policer_item = devlink_trap_policer_item_lookup(devlink, policer_id);
+ if (WARN_ON_ONCE(!policer_item))
+ return -EINVAL;
+
+ group_item->policer_item = policer_item;
+
+ return 0;
+}
+
+static int
+devlink_trap_group_register(struct devlink *devlink,
+ const struct devlink_trap_group *group)
+{
+ struct devlink_trap_group_item *group_item;
+ int err;
+
+ if (devlink_trap_group_item_lookup(devlink, group->name))
+ return -EEXIST;
+
+ group_item = kzalloc(sizeof(*group_item), GFP_KERNEL);
+ if (!group_item)
+ return -ENOMEM;
+
+ group_item->stats = netdev_alloc_pcpu_stats(struct devlink_stats);
+ if (!group_item->stats) {
+ err = -ENOMEM;
+ goto err_stats_alloc;
+ }
+
+ group_item->group = group;
+
+ err = devlink_trap_group_item_policer_link(devlink, group_item);
+ if (err)
+ goto err_policer_link;
+
+ if (devlink->ops->trap_group_init) {
+ err = devlink->ops->trap_group_init(devlink, group);
+ if (err)
+ goto err_group_init;
+ }
+
+ list_add_tail(&group_item->list, &devlink->trap_group_list);
+ devlink_trap_group_notify(devlink, group_item,
+ DEVLINK_CMD_TRAP_GROUP_NEW);
+
+ return 0;
+
+err_group_init:
+err_policer_link:
+ free_percpu(group_item->stats);
+err_stats_alloc:
+ kfree(group_item);
+ return err;
+}
+
+static void
+devlink_trap_group_unregister(struct devlink *devlink,
+ const struct devlink_trap_group *group)
+{
+ struct devlink_trap_group_item *group_item;
+
+ group_item = devlink_trap_group_item_lookup(devlink, group->name);
+ if (WARN_ON_ONCE(!group_item))
+ return;
+
+ devlink_trap_group_notify(devlink, group_item,
+ DEVLINK_CMD_TRAP_GROUP_DEL);
+ list_del(&group_item->list);
+ free_percpu(group_item->stats);
+ kfree(group_item);
+}
+
+/**
+ * devlink_trap_groups_register - Register packet trap groups with devlink.
+ * @devlink: devlink.
+ * @groups: Packet trap groups.
+ * @groups_count: Count of provided packet trap groups.
+ *
+ * Return: Non-zero value on failure.
+ */
+int devlink_trap_groups_register(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count)
+{
+ int i, err;
+
+ mutex_lock(&devlink->lock);
+ for (i = 0; i < groups_count; i++) {
+ const struct devlink_trap_group *group = &groups[i];
+
+ err = devlink_trap_group_verify(group);
+ if (err)
+ goto err_trap_group_verify;
+
+ err = devlink_trap_group_register(devlink, group);
+ if (err)
+ goto err_trap_group_register;
+ }
+ mutex_unlock(&devlink->lock);
+
+ return 0;
+
+err_trap_group_register:
+err_trap_group_verify:
+ for (i--; i >= 0; i--)
+ devlink_trap_group_unregister(devlink, &groups[i]);
+ mutex_unlock(&devlink->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_trap_groups_register);
+
+/**
+ * devlink_trap_groups_unregister - Unregister packet trap groups from devlink.
+ * @devlink: devlink.
+ * @groups: Packet trap groups.
+ * @groups_count: Count of provided packet trap groups.
+ */
+void devlink_trap_groups_unregister(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count)
+{
+ int i;
+
+ mutex_lock(&devlink->lock);
+ for (i = groups_count - 1; i >= 0; i--)
+ devlink_trap_group_unregister(devlink, &groups[i]);
+ mutex_unlock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devlink_trap_groups_unregister);
+
+static void
+devlink_trap_policer_notify(struct devlink *devlink,
+ const struct devlink_trap_policer_item *policer_item,
+ enum devlink_command cmd)
+{
+ struct sk_buff *msg;
+ int err;
+
+ WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_POLICER_NEW &&
+ cmd != DEVLINK_CMD_TRAP_POLICER_DEL);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ err = devlink_nl_trap_policer_fill(msg, devlink, policer_item, cmd, 0,
+ 0, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+ msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+static int
+devlink_trap_policer_register(struct devlink *devlink,
+ const struct devlink_trap_policer *policer)
+{
+ struct devlink_trap_policer_item *policer_item;
+ int err;
+
+ if (devlink_trap_policer_item_lookup(devlink, policer->id))
+ return -EEXIST;
+
+ policer_item = kzalloc(sizeof(*policer_item), GFP_KERNEL);
+ if (!policer_item)
+ return -ENOMEM;
+
+ policer_item->policer = policer;
+ policer_item->rate = policer->init_rate;
+ policer_item->burst = policer->init_burst;
+
+ if (devlink->ops->trap_policer_init) {
+ err = devlink->ops->trap_policer_init(devlink, policer);
+ if (err)
+ goto err_policer_init;
+ }
+
+ list_add_tail(&policer_item->list, &devlink->trap_policer_list);
+ devlink_trap_policer_notify(devlink, policer_item,
+ DEVLINK_CMD_TRAP_POLICER_NEW);
+
+ return 0;
+
+err_policer_init:
+ kfree(policer_item);
+ return err;
+}
+
+static void
+devlink_trap_policer_unregister(struct devlink *devlink,
+ const struct devlink_trap_policer *policer)
+{
+ struct devlink_trap_policer_item *policer_item;
+
+ policer_item = devlink_trap_policer_item_lookup(devlink, policer->id);
+ if (WARN_ON_ONCE(!policer_item))
+ return;
+
+ devlink_trap_policer_notify(devlink, policer_item,
+ DEVLINK_CMD_TRAP_POLICER_DEL);
+ list_del(&policer_item->list);
+ if (devlink->ops->trap_policer_fini)
+ devlink->ops->trap_policer_fini(devlink, policer);
+ kfree(policer_item);
+}
+
+/**
+ * devlink_trap_policers_register - Register packet trap policers with devlink.
+ * @devlink: devlink.
+ * @policers: Packet trap policers.
+ * @policers_count: Count of provided packet trap policers.
+ *
+ * Return: Non-zero value on failure.
+ */
+int
+devlink_trap_policers_register(struct devlink *devlink,
+ const struct devlink_trap_policer *policers,
+ size_t policers_count)
+{
+ int i, err;
+
+ mutex_lock(&devlink->lock);
+ for (i = 0; i < policers_count; i++) {
+ const struct devlink_trap_policer *policer = &policers[i];
+
+ if (WARN_ON(policer->id == 0 ||
+ policer->max_rate < policer->min_rate ||
+ policer->max_burst < policer->min_burst)) {
+ err = -EINVAL;
+ goto err_trap_policer_verify;
+ }
+
+ err = devlink_trap_policer_register(devlink, policer);
+ if (err)
+ goto err_trap_policer_register;
+ }
+ mutex_unlock(&devlink->lock);
+
+ return 0;
+
+err_trap_policer_register:
+err_trap_policer_verify:
+ for (i--; i >= 0; i--)
+ devlink_trap_policer_unregister(devlink, &policers[i]);
+ mutex_unlock(&devlink->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_trap_policers_register);
+
+/**
+ * devlink_trap_policers_unregister - Unregister packet trap policers from devlink.
+ * @devlink: devlink.
+ * @policers: Packet trap policers.
+ * @policers_count: Count of provided packet trap policers.
+ */
+void
+devlink_trap_policers_unregister(struct devlink *devlink,
+ const struct devlink_trap_policer *policers,
+ size_t policers_count)
+{
+ int i;
+
+ mutex_lock(&devlink->lock);
+ for (i = policers_count - 1; i >= 0; i--)
+ devlink_trap_policer_unregister(devlink, &policers[i]);
+ mutex_unlock(&devlink->lock);
+}
+EXPORT_SYMBOL_GPL(devlink_trap_policers_unregister);
+
static void __devlink_compat_running_version(struct devlink *devlink,
char *buf, size_t len)
{
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 31700e0c3928..8e33cec9fc4e 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -29,6 +29,7 @@
#include <net/drop_monitor.h>
#include <net/genetlink.h>
#include <net/netevent.h>
+#include <net/flow_offload.h>
#include <trace/events/skb.h>
#include <trace/events/napi.h>
@@ -67,7 +68,7 @@ struct net_dm_hw_entry {
struct net_dm_hw_entries {
u32 num_entries;
- struct net_dm_hw_entry entries[0];
+ struct net_dm_hw_entry entries[];
};
struct per_cpu_dm_data {
@@ -701,6 +702,13 @@ static void net_dm_packet_work(struct work_struct *work)
}
static size_t
+net_dm_flow_action_cookie_size(const struct net_dm_hw_metadata *hw_metadata)
+{
+ return hw_metadata->fa_cookie ?
+ nla_total_size(hw_metadata->fa_cookie->cookie_len) : 0;
+}
+
+static size_t
net_dm_hw_packet_report_size(size_t payload_len,
const struct net_dm_hw_metadata *hw_metadata)
{
@@ -717,6 +725,8 @@ net_dm_hw_packet_report_size(size_t payload_len,
nla_total_size(strlen(hw_metadata->trap_name) + 1) +
/* NET_DM_ATTR_IN_PORT */
net_dm_in_port_size() +
+ /* NET_DM_ATTR_FLOW_ACTION_COOKIE */
+ net_dm_flow_action_cookie_size(hw_metadata) +
/* NET_DM_ATTR_TIMESTAMP */
nla_total_size(sizeof(u64)) +
/* NET_DM_ATTR_ORIG_LEN */
@@ -762,6 +772,12 @@ static int net_dm_hw_packet_report_fill(struct sk_buff *msg,
goto nla_put_failure;
}
+ if (hw_metadata->fa_cookie &&
+ nla_put(msg, NET_DM_ATTR_FLOW_ACTION_COOKIE,
+ hw_metadata->fa_cookie->cookie_len,
+ hw_metadata->fa_cookie->cookie))
+ goto nla_put_failure;
+
if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP,
ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD))
goto nla_put_failure;
@@ -794,11 +810,12 @@ nla_put_failure:
static struct net_dm_hw_metadata *
net_dm_hw_metadata_clone(const struct net_dm_hw_metadata *hw_metadata)
{
+ const struct flow_action_cookie *fa_cookie;
struct net_dm_hw_metadata *n_hw_metadata;
const char *trap_group_name;
const char *trap_name;
- n_hw_metadata = kmalloc(sizeof(*hw_metadata), GFP_ATOMIC);
+ n_hw_metadata = kzalloc(sizeof(*hw_metadata), GFP_ATOMIC);
if (!n_hw_metadata)
return NULL;
@@ -812,12 +829,25 @@ net_dm_hw_metadata_clone(const struct net_dm_hw_metadata *hw_metadata)
goto free_trap_group;
n_hw_metadata->trap_name = trap_name;
+ if (hw_metadata->fa_cookie) {
+ size_t cookie_size = sizeof(*fa_cookie) +
+ hw_metadata->fa_cookie->cookie_len;
+
+ fa_cookie = kmemdup(hw_metadata->fa_cookie, cookie_size,
+ GFP_ATOMIC);
+ if (!fa_cookie)
+ goto free_trap_name;
+ n_hw_metadata->fa_cookie = fa_cookie;
+ }
+
n_hw_metadata->input_dev = hw_metadata->input_dev;
if (n_hw_metadata->input_dev)
dev_hold(n_hw_metadata->input_dev);
return n_hw_metadata;
+free_trap_name:
+ kfree(trap_name);
free_trap_group:
kfree(trap_group_name);
free_hw_metadata:
@@ -830,6 +860,7 @@ net_dm_hw_metadata_free(const struct net_dm_hw_metadata *hw_metadata)
{
if (hw_metadata->input_dev)
dev_put(hw_metadata->input_dev);
+ kfree(hw_metadata->fa_cookie);
kfree(hw_metadata->trap_name);
kfree(hw_metadata->trap_group_name);
kfree(hw_metadata);
diff --git a/net/core/filter.c b/net/core/filter.c
index c180871e606d..7628b947dbc3 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2642,6 +2642,19 @@ static const struct bpf_func_proto bpf_msg_pop_data_proto = {
.arg4_type = ARG_ANYTHING,
};
+#ifdef CONFIG_CGROUP_NET_CLASSID
+BPF_CALL_0(bpf_get_cgroup_classid_curr)
+{
+ return __task_get_classid(current);
+}
+
+static const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto = {
+ .func = bpf_get_cgroup_classid_curr,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+};
+#endif
+
BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
{
return task_get_classid(skb);
@@ -3626,7 +3639,6 @@ err:
_trace_xdp_redirect_err(dev, xdp_prog, index, err);
return err;
}
-EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
{
@@ -4062,7 +4074,8 @@ BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
return -EINVAL;
- if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
+ if (unlikely(!xdp ||
+ xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
return -EFAULT;
return bpf_event_output(map, flags, meta, meta_size, xdp->data,
@@ -4080,6 +4093,19 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
+static int bpf_xdp_output_btf_ids[5];
+const struct bpf_func_proto bpf_xdp_output_proto = {
+ .func = bpf_xdp_event_output,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_PTR_TO_MEM,
+ .arg5_type = ARG_CONST_SIZE_OR_ZERO,
+ .btf_id = bpf_xdp_output_btf_ids,
+};
+
BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
{
return skb->sk ? sock_gen_cookie(skb->sk) : 0;
@@ -4104,6 +4130,18 @@ static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
+BPF_CALL_1(bpf_get_socket_cookie_sock, struct sock *, ctx)
+{
+ return sock_gen_cookie(ctx);
+}
+
+static const struct bpf_func_proto bpf_get_socket_cookie_sock_proto = {
+ .func = bpf_get_socket_cookie_sock,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
{
return sock_gen_cookie(ctx->sk);
@@ -4116,6 +4154,39 @@ static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
+static u64 __bpf_get_netns_cookie(struct sock *sk)
+{
+#ifdef CONFIG_NET_NS
+ return net_gen_cookie(sk ? sk->sk_net.net : &init_net);
+#else
+ return 0;
+#endif
+}
+
+BPF_CALL_1(bpf_get_netns_cookie_sock, struct sock *, ctx)
+{
+ return __bpf_get_netns_cookie(ctx);
+}
+
+static const struct bpf_func_proto bpf_get_netns_cookie_sock_proto = {
+ .func = bpf_get_netns_cookie_sock,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
+};
+
+BPF_CALL_1(bpf_get_netns_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
+{
+ return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL);
+}
+
+static const struct bpf_func_proto bpf_get_netns_cookie_sock_addr_proto = {
+ .func = bpf_get_netns_cookie_sock_addr,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
+};
+
BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
{
struct sock *sk = sk_to_full_sk(skb->sk);
@@ -4134,8 +4205,8 @@ static const struct bpf_func_proto bpf_get_socket_uid_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
-BPF_CALL_5(bpf_sockopt_event_output, struct bpf_sock_ops_kern *, bpf_sock,
- struct bpf_map *, map, u64, flags, void *, data, u64, size)
+BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map, u64, flags,
+ void *, data, u64, size)
{
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
return -EINVAL;
@@ -4143,8 +4214,8 @@ BPF_CALL_5(bpf_sockopt_event_output, struct bpf_sock_ops_kern *, bpf_sock,
return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
}
-static const struct bpf_func_proto bpf_sockopt_event_output_proto = {
- .func = bpf_sockopt_event_output,
+static const struct bpf_func_proto bpf_event_output_data_proto = {
+ .func = bpf_event_output_data,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
@@ -5330,8 +5401,7 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
BPF_CALL_1(bpf_sk_release, struct sock *, sk)
{
- /* Only full sockets have sk->sk_flags. */
- if (!sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE))
+ if (sk_is_refcounted(sk))
sock_gen_put(sk);
return 0;
}
@@ -5847,6 +5917,36 @@ static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = {
.arg5_type = ARG_CONST_SIZE,
};
+BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags)
+{
+ if (flags != 0)
+ return -EINVAL;
+ if (!skb_at_tc_ingress(skb))
+ return -EOPNOTSUPP;
+ if (unlikely(dev_net(skb->dev) != sock_net(sk)))
+ return -ENETUNREACH;
+ if (unlikely(sk->sk_reuseport))
+ return -ESOCKTNOSUPPORT;
+ if (sk_is_refcounted(sk) &&
+ unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
+ return -ENOENT;
+
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sock_pfree;
+
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_sk_assign_proto = {
+ .func = bpf_sk_assign,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_SOCK_COMMON,
+ .arg3_type = ARG_ANYTHING,
+};
+
#endif /* CONFIG_INET */
bool bpf_helper_changes_pkt_data(void *func)
@@ -5941,6 +6041,26 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_current_uid_gid_proto;
case BPF_FUNC_get_local_storage:
return &bpf_get_local_storage_proto;
+ case BPF_FUNC_get_socket_cookie:
+ return &bpf_get_socket_cookie_sock_proto;
+ case BPF_FUNC_get_netns_cookie:
+ return &bpf_get_netns_cookie_sock_proto;
+ case BPF_FUNC_perf_event_output:
+ return &bpf_event_output_data_proto;
+ case BPF_FUNC_get_current_pid_tgid:
+ return &bpf_get_current_pid_tgid_proto;
+ case BPF_FUNC_get_current_comm:
+ return &bpf_get_current_comm_proto;
+#ifdef CONFIG_CGROUPS
+ case BPF_FUNC_get_current_cgroup_id:
+ return &bpf_get_current_cgroup_id_proto;
+ case BPF_FUNC_get_current_ancestor_cgroup_id:
+ return &bpf_get_current_ancestor_cgroup_id_proto;
+#endif
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ case BPF_FUNC_get_cgroup_classid:
+ return &bpf_get_cgroup_classid_curr_proto;
+#endif
default:
return bpf_base_func_proto(func_id);
}
@@ -5965,8 +6085,26 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
}
case BPF_FUNC_get_socket_cookie:
return &bpf_get_socket_cookie_sock_addr_proto;
+ case BPF_FUNC_get_netns_cookie:
+ return &bpf_get_netns_cookie_sock_addr_proto;
case BPF_FUNC_get_local_storage:
return &bpf_get_local_storage_proto;
+ case BPF_FUNC_perf_event_output:
+ return &bpf_event_output_data_proto;
+ case BPF_FUNC_get_current_pid_tgid:
+ return &bpf_get_current_pid_tgid_proto;
+ case BPF_FUNC_get_current_comm:
+ return &bpf_get_current_comm_proto;
+#ifdef CONFIG_CGROUPS
+ case BPF_FUNC_get_current_cgroup_id:
+ return &bpf_get_current_cgroup_id_proto;
+ case BPF_FUNC_get_current_ancestor_cgroup_id:
+ return &bpf_get_current_ancestor_cgroup_id_proto;
+#endif
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ case BPF_FUNC_get_cgroup_classid:
+ return &bpf_get_cgroup_classid_curr_proto;
+#endif
#ifdef CONFIG_INET
case BPF_FUNC_sk_lookup_tcp:
return &bpf_sock_addr_sk_lookup_tcp_proto;
@@ -6140,6 +6278,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skb_ecn_set_ce_proto;
case BPF_FUNC_tcp_gen_syncookie:
return &bpf_tcp_gen_syncookie_proto;
+ case BPF_FUNC_sk_assign:
+ return &bpf_sk_assign_proto;
#endif
default:
return bpf_base_func_proto(func_id);
@@ -6209,7 +6349,7 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_get_local_storage:
return &bpf_get_local_storage_proto;
case BPF_FUNC_perf_event_output:
- return &bpf_sockopt_event_output_proto;
+ return &bpf_event_output_data_proto;
case BPF_FUNC_sk_storage_get:
return &bpf_sk_storage_get_proto;
case BPF_FUNC_sk_storage_delete:
@@ -7140,6 +7280,27 @@ static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type,
return insn - insn_buf;
}
+static struct bpf_insn *bpf_convert_shinfo_access(const struct bpf_insn *si,
+ struct bpf_insn *insn)
+{
+ /* si->dst_reg = skb_shinfo(SKB); */
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
+ BPF_REG_AX, si->src_reg,
+ offsetof(struct sk_buff, end));
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
+ si->dst_reg, si->src_reg,
+ offsetof(struct sk_buff, head));
+ *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
+#else
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
+ si->dst_reg, si->src_reg,
+ offsetof(struct sk_buff, end));
+#endif
+
+ return insn;
+}
+
static u32 bpf_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
@@ -7462,26 +7623,21 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct __sk_buff, gso_segs):
- /* si->dst_reg = skb_shinfo(SKB); */
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
- BPF_REG_AX, si->src_reg,
- offsetof(struct sk_buff, end));
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
- si->dst_reg, si->src_reg,
- offsetof(struct sk_buff, head));
- *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
-#else
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
- si->dst_reg, si->src_reg,
- offsetof(struct sk_buff, end));
-#endif
+ insn = bpf_convert_shinfo_access(si, insn);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs),
si->dst_reg, si->dst_reg,
bpf_target_off(struct skb_shared_info,
gso_segs, 2,
target_size));
break;
+ case offsetof(struct __sk_buff, gso_size):
+ insn = bpf_convert_shinfo_access(si, insn);
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_size),
+ si->dst_reg, si->dst_reg,
+ bpf_target_off(struct skb_shared_info,
+ gso_size, 2,
+ target_size));
+ break;
case offsetof(struct __sk_buff, wire_len):
BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, pkt_len) != 4);
@@ -8620,6 +8776,7 @@ struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
struct bpf_map *, map, void *, key, u32, flags)
{
+ bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY;
struct sock_reuseport *reuse;
struct sock *selected_sk;
@@ -8628,26 +8785,20 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
return -ENOENT;
reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
- if (!reuse)
- /* selected_sk is unhashed (e.g. by close()) after the
- * above map_lookup_elem(). Treat selected_sk has already
- * been removed from the map.
+ if (!reuse) {
+ /* reuseport_array has only sk with non NULL sk_reuseport_cb.
+ * The only (!reuse) case here is - the sk has already been
+ * unhashed (e.g. by close()), so treat it as -ENOENT.
+ *
+ * Other maps (e.g. sock_map) do not provide this guarantee and
+ * the sk may never be in the reuseport group to begin with.
*/
- return -ENOENT;
+ return is_sockarray ? -ENOENT : -EINVAL;
+ }
if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) {
- struct sock *sk;
-
- if (unlikely(!reuse_kern->reuseport_id))
- /* There is a small race between adding the
- * sk to the map and setting the
- * reuse_kern->reuseport_id.
- * Treat it as the sk has not been added to
- * the bpf map yet.
- */
- return -ENOENT;
+ struct sock *sk = reuse_kern->sk;
- sk = reuse_kern->sk;
if (sk->sk_protocol != selected_sk->sk_protocol)
return -EPROTOTYPE;
else if (sk->sk_family != selected_sk->sk_family)
@@ -8835,10 +8986,9 @@ const struct bpf_prog_ops sk_reuseport_prog_ops = {
};
#endif /* CONFIG_INET */
-DEFINE_BPF_DISPATCHER(bpf_dispatcher_xdp)
+DEFINE_BPF_DISPATCHER(xdp)
void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog)
{
- bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(bpf_dispatcher_xdp),
- prev_prog, prog);
+ bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog);
}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index a1670dff0629..3eff84824c8b 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -920,9 +920,7 @@ bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
(int)FLOW_DISSECTOR_F_STOP_AT_ENCAP);
flow_keys->flags = flags;
- preempt_disable();
- result = BPF_PROG_RUN(prog, ctx);
- preempt_enable();
+ result = bpf_prog_run_pin_on_cpu(prog, ctx);
flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen);
flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
index 45b6a59ac124..e951b743bed3 100644
--- a/net/core/flow_offload.c
+++ b/net/core/flow_offload.c
@@ -167,6 +167,34 @@ void flow_rule_match_enc_opts(const struct flow_rule *rule,
}
EXPORT_SYMBOL(flow_rule_match_enc_opts);
+struct flow_action_cookie *flow_action_cookie_create(void *data,
+ unsigned int len,
+ gfp_t gfp)
+{
+ struct flow_action_cookie *cookie;
+
+ cookie = kmalloc(sizeof(*cookie) + len, gfp);
+ if (!cookie)
+ return NULL;
+ cookie->cookie_len = len;
+ memcpy(cookie->cookie, data, len);
+ return cookie;
+}
+EXPORT_SYMBOL(flow_action_cookie_create);
+
+void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
+{
+ kfree(cookie);
+}
+EXPORT_SYMBOL(flow_action_cookie_destroy);
+
+void flow_rule_match_ct(const struct flow_rule *rule,
+ struct flow_match_ct *out)
+{
+ FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
+}
+EXPORT_SYMBOL(flow_rule_match_ct);
+
struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
void (*release)(void *cb_priv))
@@ -483,7 +511,8 @@ EXPORT_SYMBOL_GPL(flow_indr_block_cb_unregister);
void flow_indr_block_call(struct net_device *dev,
struct flow_block_offload *bo,
- enum flow_block_command command)
+ enum flow_block_command command,
+ enum tc_setup_type type)
{
struct flow_indr_block_cb *indr_block_cb;
struct flow_indr_block_dev *indr_dev;
@@ -493,8 +522,7 @@ void flow_indr_block_call(struct net_device *dev,
return;
list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
- indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
- bo);
+ indr_block_cb->cb(dev, indr_block_cb->cb_priv, type, bo);
}
EXPORT_SYMBOL_GPL(flow_indr_block_call);
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 99a6de52b21d..7d3438215f32 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -367,7 +367,7 @@ static const struct nla_policy bpf_nl_policy[LWT_BPF_MAX + 1] = {
[LWT_BPF_XMIT_HEADROOM] = { .type = NLA_U32 },
};
-static int bpf_build_state(struct nlattr *nla,
+static int bpf_build_state(struct net *net, struct nlattr *nla,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts,
struct netlink_ext_ack *extack)
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index 2f9c0de533c7..8ec7d13d2860 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -41,6 +41,8 @@ static const char *lwtunnel_encap_str(enum lwtunnel_encap_types encap_type)
return "BPF";
case LWTUNNEL_ENCAP_SEG6_LOCAL:
return "SEG6LOCAL";
+ case LWTUNNEL_ENCAP_RPL:
+ return "RPL";
case LWTUNNEL_ENCAP_IP6:
case LWTUNNEL_ENCAP_IP:
case LWTUNNEL_ENCAP_NONE:
@@ -98,7 +100,7 @@ int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops,
}
EXPORT_SYMBOL_GPL(lwtunnel_encap_del_ops);
-int lwtunnel_build_state(u16 encap_type,
+int lwtunnel_build_state(struct net *net, u16 encap_type,
struct nlattr *encap, unsigned int family,
const void *cfg, struct lwtunnel_state **lws,
struct netlink_ext_ack *extack)
@@ -122,7 +124,7 @@ int lwtunnel_build_state(u16 encap_type,
rcu_read_unlock();
if (found) {
- ret = ops->build_state(encap, family, cfg, lws, extack);
+ ret = ops->build_state(net, encap, family, cfg, lws, extack);
if (ret)
module_put(ops->owner);
} else {
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 789a73aa7bd8..5bf8d22a47ec 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -3553,9 +3553,6 @@ static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
#define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
-#define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
- NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
-
#define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 4c826b8bf9b1..cf0215734ceb 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -944,6 +944,24 @@ err:
kobject_put(kobj);
return error;
}
+
+static int rx_queue_change_owner(struct net_device *dev, int index, kuid_t kuid,
+ kgid_t kgid)
+{
+ struct netdev_rx_queue *queue = dev->_rx + index;
+ struct kobject *kobj = &queue->kobj;
+ int error;
+
+ error = sysfs_change_owner(kobj, kuid, kgid);
+ if (error)
+ return error;
+
+ if (dev->sysfs_rx_queue_group)
+ error = sysfs_group_change_owner(
+ kobj, dev->sysfs_rx_queue_group, kuid, kgid);
+
+ return error;
+}
#endif /* CONFIG_SYSFS */
int
@@ -981,6 +999,29 @@ net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
#endif
}
+static int net_rx_queue_change_owner(struct net_device *dev, int num,
+ kuid_t kuid, kgid_t kgid)
+{
+#ifdef CONFIG_SYSFS
+ int error = 0;
+ int i;
+
+#ifndef CONFIG_RPS
+ if (!dev->sysfs_rx_queue_group)
+ return 0;
+#endif
+ for (i = 0; i < num; i++) {
+ error = rx_queue_change_owner(dev, i, kuid, kgid);
+ if (error)
+ break;
+ }
+
+ return error;
+#else
+ return 0;
+#endif
+}
+
#ifdef CONFIG_SYSFS
/*
* netdev_queue sysfs structures and functions.
@@ -1486,6 +1527,23 @@ err:
kobject_put(kobj);
return error;
}
+
+static int tx_queue_change_owner(struct net_device *ndev, int index,
+ kuid_t kuid, kgid_t kgid)
+{
+ struct netdev_queue *queue = ndev->_tx + index;
+ struct kobject *kobj = &queue->kobj;
+ int error;
+
+ error = sysfs_change_owner(kobj, kuid, kgid);
+ if (error)
+ return error;
+
+#ifdef CONFIG_BQL
+ error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid);
+#endif
+ return error;
+}
#endif /* CONFIG_SYSFS */
int
@@ -1520,6 +1578,25 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
#endif /* CONFIG_SYSFS */
}
+static int net_tx_queue_change_owner(struct net_device *dev, int num,
+ kuid_t kuid, kgid_t kgid)
+{
+#ifdef CONFIG_SYSFS
+ int error = 0;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ error = tx_queue_change_owner(dev, i, kuid, kgid);
+ if (error)
+ break;
+ }
+
+ return error;
+#else
+ return 0;
+#endif /* CONFIG_SYSFS */
+}
+
static int register_queue_kobjects(struct net_device *dev)
{
int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
@@ -1554,6 +1631,31 @@ error:
return error;
}
+static int queue_change_owner(struct net_device *ndev, kuid_t kuid, kgid_t kgid)
+{
+ int error = 0, real_rx = 0, real_tx = 0;
+
+#ifdef CONFIG_SYSFS
+ if (ndev->queues_kset) {
+ error = sysfs_change_owner(&ndev->queues_kset->kobj, kuid, kgid);
+ if (error)
+ return error;
+ }
+ real_rx = ndev->real_num_rx_queues;
+#endif
+ real_tx = ndev->real_num_tx_queues;
+
+ error = net_rx_queue_change_owner(ndev, real_rx, kuid, kgid);
+ if (error)
+ return error;
+
+ error = net_tx_queue_change_owner(ndev, real_tx, kuid, kgid);
+ if (error)
+ return error;
+
+ return 0;
+}
+
static void remove_queue_kobjects(struct net_device *dev)
{
int real_rx = 0, real_tx = 0;
@@ -1767,6 +1869,37 @@ int netdev_register_kobject(struct net_device *ndev)
return error;
}
+/* Change owner for sysfs entries when moving network devices across network
+ * namespaces owned by different user namespaces.
+ */
+int netdev_change_owner(struct net_device *ndev, const struct net *net_old,
+ const struct net *net_new)
+{
+ struct device *dev = &ndev->dev;
+ kuid_t old_uid, new_uid;
+ kgid_t old_gid, new_gid;
+ int error;
+
+ net_ns_get_ownership(net_old, &old_uid, &old_gid);
+ net_ns_get_ownership(net_new, &new_uid, &new_gid);
+
+ /* The network namespace was changed but the owning user namespace is
+ * identical so there's no need to change the owner of sysfs entries.
+ */
+ if (uid_eq(old_uid, new_uid) && gid_eq(old_gid, new_gid))
+ return 0;
+
+ error = device_change_owner(dev, new_uid, new_gid);
+ if (error)
+ return error;
+
+ error = queue_change_owner(ndev, new_uid, new_gid);
+ if (error)
+ return error;
+
+ return 0;
+}
+
int netdev_class_create_file_ns(const struct class_attribute *class_attr,
const void *ns)
{
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h
index 006876c7b78d..8a5b04c2699a 100644
--- a/net/core/net-sysfs.h
+++ b/net/core/net-sysfs.h
@@ -8,5 +8,7 @@ void netdev_unregister_kobject(struct net_device *);
int net_rx_queue_update_kobjects(struct net_device *, int old_num, int new_num);
int netdev_queue_update_kobjects(struct net_device *net,
int old_num, int new_num);
+int netdev_change_owner(struct net_device *, const struct net *net_old,
+ const struct net *net_new);
#endif
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 757cc1d084e7..190ca66a383b 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -69,6 +69,20 @@ EXPORT_SYMBOL_GPL(pernet_ops_rwsem);
static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
+static atomic64_t cookie_gen;
+
+u64 net_gen_cookie(struct net *net)
+{
+ while (1) {
+ u64 res = atomic64_read(&net->net_cookie);
+
+ if (res)
+ return res;
+ res = atomic64_inc_return(&cookie_gen);
+ atomic64_cmpxchg(&net->net_cookie, 0, res);
+ }
+}
+
static struct net_generic *net_alloc_generic(void)
{
struct net_generic *ng;
@@ -1087,6 +1101,7 @@ static int __init net_ns_init(void)
panic("Could not allocate generic netns");
rcu_assign_pointer(init_net.gen, ng);
+ net_gen_cookie(&init_net);
down_write(&pernet_ops_rwsem);
if (setup_net(&init_net, &init_user_ns))
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 10d2b255df5e..ef98372facf6 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -43,9 +43,11 @@ static int page_pool_init(struct page_pool *pool,
* DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
* which is the XDP_TX use-case.
*/
- if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
- (pool->p.dma_dir != DMA_BIDIRECTIONAL))
- return -EINVAL;
+ if (pool->p.flags & PP_FLAG_DMA_MAP) {
+ if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
+ (pool->p.dma_dir != DMA_BIDIRECTIONAL))
+ return -EINVAL;
+ }
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
/* In order to request DMA-sync-for-device the page
@@ -96,7 +98,7 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
}
EXPORT_SYMBOL(page_pool_create);
-static void __page_pool_return_page(struct page_pool *pool, struct page *page);
+static void page_pool_return_page(struct page_pool *pool, struct page *page);
noinline
static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
@@ -136,7 +138,7 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
* (2) break out to fallthrough to alloc_pages_node.
* This limit stress on page buddy alloactor.
*/
- __page_pool_return_page(pool, page);
+ page_pool_return_page(pool, page);
page = NULL;
break;
}
@@ -274,18 +276,25 @@ static s32 page_pool_inflight(struct page_pool *pool)
return inflight;
}
-/* Cleanup page_pool state from page */
-static void __page_pool_clean_page(struct page_pool *pool,
- struct page *page)
+/* Disconnects a page (from a page_pool). API users can have a need
+ * to disconnect a page (from a page_pool), to allow it to be used as
+ * a regular page (that will eventually be returned to the normal
+ * page-allocator via put_page).
+ */
+void page_pool_release_page(struct page_pool *pool, struct page *page)
{
dma_addr_t dma;
int count;
if (!(pool->p.flags & PP_FLAG_DMA_MAP))
+ /* Always account for inflight pages, even if we didn't
+ * map them
+ */
goto skip_dma_unmap;
dma = page->dma_addr;
- /* DMA unmap */
+
+ /* When page is unmapped, it cannot be returned our pool */
dma_unmap_page_attrs(pool->p.dev, dma,
PAGE_SIZE << pool->p.order, pool->p.dma_dir,
DMA_ATTR_SKIP_CPU_SYNC);
@@ -297,21 +306,12 @@ skip_dma_unmap:
count = atomic_inc_return(&pool->pages_state_release_cnt);
trace_page_pool_state_release(pool, page, count);
}
-
-/* unmap the page and clean our state */
-void page_pool_unmap_page(struct page_pool *pool, struct page *page)
-{
- /* When page is unmapped, this implies page will not be
- * returned to page_pool.
- */
- __page_pool_clean_page(pool, page);
-}
-EXPORT_SYMBOL(page_pool_unmap_page);
+EXPORT_SYMBOL(page_pool_release_page);
/* Return a page to the page allocator, cleaning up our state */
-static void __page_pool_return_page(struct page_pool *pool, struct page *page)
+static void page_pool_return_page(struct page_pool *pool, struct page *page)
{
- __page_pool_clean_page(pool, page);
+ page_pool_release_page(pool, page);
put_page(page);
/* An optimization would be to call __free_pages(page, pool->p.order)
@@ -320,8 +320,7 @@ static void __page_pool_return_page(struct page_pool *pool, struct page *page)
*/
}
-static bool __page_pool_recycle_into_ring(struct page_pool *pool,
- struct page *page)
+static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
{
int ret;
/* BH protection not needed if current is serving softirq */
@@ -338,7 +337,7 @@ static bool __page_pool_recycle_into_ring(struct page_pool *pool,
*
* Caller must provide appropriate safe context.
*/
-static bool __page_pool_recycle_direct(struct page *page,
+static bool page_pool_recycle_in_cache(struct page *page,
struct page_pool *pool)
{
if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
@@ -357,8 +356,14 @@ static bool pool_page_reusable(struct page_pool *pool, struct page *page)
return !page_is_pfmemalloc(page);
}
-void __page_pool_put_page(struct page_pool *pool, struct page *page,
- unsigned int dma_sync_size, bool allow_direct)
+/* If the page refcnt == 1, this will try to recycle the page.
+ * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
+ * the configured size min(dma_sync_size, pool->max_len).
+ * If the page refcnt != 1, then the page will be returned to memory
+ * subsystem.
+ */
+void page_pool_put_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size, bool allow_direct)
{
/* This allocator is optimized for the XDP mode that uses
* one-frame-per-page, but have fallbacks that act like the
@@ -375,12 +380,12 @@ void __page_pool_put_page(struct page_pool *pool, struct page *page,
dma_sync_size);
if (allow_direct && in_serving_softirq())
- if (__page_pool_recycle_direct(page, pool))
+ if (page_pool_recycle_in_cache(page, pool))
return;
- if (!__page_pool_recycle_into_ring(pool, page)) {
+ if (!page_pool_recycle_in_ring(pool, page)) {
/* Cache full, fallback to free pages */
- __page_pool_return_page(pool, page);
+ page_pool_return_page(pool, page);
}
return;
}
@@ -397,12 +402,13 @@ void __page_pool_put_page(struct page_pool *pool, struct page *page,
* doing refcnt based recycle tricks, meaning another process
* will be invoking put_page.
*/
- __page_pool_clean_page(pool, page);
+ /* Do not replace this with page_pool_return_page() */
+ page_pool_release_page(pool, page);
put_page(page);
}
-EXPORT_SYMBOL(__page_pool_put_page);
+EXPORT_SYMBOL(page_pool_put_page);
-static void __page_pool_empty_ring(struct page_pool *pool)
+static void page_pool_empty_ring(struct page_pool *pool)
{
struct page *page;
@@ -413,7 +419,7 @@ static void __page_pool_empty_ring(struct page_pool *pool)
pr_crit("%s() page_pool refcnt %d violation\n",
__func__, page_ref_count(page));
- __page_pool_return_page(pool, page);
+ page_pool_return_page(pool, page);
}
}
@@ -443,7 +449,7 @@ static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
*/
while (pool->alloc.count) {
page = pool->alloc.cache[--pool->alloc.count];
- __page_pool_return_page(pool, page);
+ page_pool_return_page(pool, page);
}
}
@@ -455,7 +461,7 @@ static void page_pool_scrub(struct page_pool *pool)
/* No more consumers should exist, but producers could still
* be in-flight.
*/
- __page_pool_empty_ring(pool);
+ page_pool_empty_ring(pool);
}
static int page_pool_release(struct page_pool *pool)
@@ -529,7 +535,7 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid)
/* Flush pool alloc cache, as refill will check NUMA node */
while (pool->alloc.count) {
page = pool->alloc.cache[--pool->alloc.count];
- __page_pool_return_page(pool, page);
+ page_pool_return_page(pool, page);
}
}
EXPORT_SYMBOL(page_pool_update_nid);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index d0641bba6b81..08e2811b5274 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2003,8 +2003,8 @@ static int pktgen_setup_dev(const struct pktgen_net *pn,
return -ENODEV;
}
- if (odev->type != ARPHRD_ETHER) {
- pr_err("not an ethernet device: \"%s\"\n", ifname);
+ if (odev->type != ARPHRD_ETHER && odev->type != ARPHRD_LOOPBACK) {
+ pr_err("not an ethernet or loopback device: \"%s\"\n", ifname);
err = -EINVAL;
} else if (!netif_running(odev)) {
pr_err("device is down: \"%s\"\n", ifname);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index e1152f4ffe33..709ebbf8ab5b 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1872,7 +1872,9 @@ static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
};
static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
+ [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD },
[IFLA_XDP_FD] = { .type = NLA_S32 },
+ [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 },
[IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
[IFLA_XDP_FLAGS] = { .type = NLA_U32 },
[IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
@@ -2799,8 +2801,20 @@ static int do_setlink(const struct sk_buff *skb,
}
if (xdp[IFLA_XDP_FD]) {
+ int expected_fd = -1;
+
+ if (xdp_flags & XDP_FLAGS_REPLACE) {
+ if (!xdp[IFLA_XDP_EXPECTED_FD]) {
+ err = -EINVAL;
+ goto errout;
+ }
+ expected_fd =
+ nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
+ }
+
err = dev_change_xdp_fd(dev, extack,
nla_get_s32(xdp[IFLA_XDP_FD]),
+ expected_fd,
xdp_flags);
if (err)
goto errout;
@@ -3909,7 +3923,7 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
/* Support fdb on master device the net/bridge default case */
if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
- (dev->priv_flags & IFF_BRIDGE_PORT)) {
+ netif_is_bridge_port(dev)) {
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
const struct net_device_ops *ops = br_dev->netdev_ops;
@@ -4020,7 +4034,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
/* Support fdb on master device the net/bridge default case */
if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
- (dev->priv_flags & IFF_BRIDGE_PORT)) {
+ netif_is_bridge_port(dev)) {
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
const struct net_device_ops *ops = br_dev->netdev_ops;
@@ -4246,13 +4260,13 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
continue;
if (!br_idx) { /* user did not specify a specific bridge */
- if (dev->priv_flags & IFF_BRIDGE_PORT) {
+ if (netif_is_bridge_port(dev)) {
br_dev = netdev_master_upper_dev_get(dev);
cops = br_dev->netdev_ops;
}
} else {
if (dev != br_dev &&
- !(dev->priv_flags & IFF_BRIDGE_PORT))
+ !netif_is_bridge_port(dev))
continue;
if (br_dev != netdev_master_upper_dev_get(dev) &&
@@ -4264,7 +4278,7 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (idx < s_idx)
goto cont;
- if (dev->priv_flags & IFF_BRIDGE_PORT) {
+ if (netif_is_bridge_port(dev)) {
if (cops && cops->ndo_fdb_dump) {
err = cops->ndo_fdb_dump(skb, cb,
br_dev, dev,
@@ -4414,7 +4428,7 @@ static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (dev) {
if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
- if (!(dev->priv_flags & IFF_BRIDGE_PORT)) {
+ if (!netif_is_bridge_port(dev)) {
NL_SET_ERR_MSG(extack, "Device is not a bridge port");
return -EINVAL;
}
@@ -4553,7 +4567,11 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
brport_nla_put_flag(skb, flags, mask,
IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
brport_nla_put_flag(skb, flags, mask,
- IFLA_BRPORT_PROXYARP, BR_PROXYARP)) {
+ IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
+ brport_nla_put_flag(skb, flags, mask,
+ IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
+ brport_nla_put_flag(skb, flags, mask,
+ IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
nla_nest_cancel(skb, protinfo);
goto nla_put_failure;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e1101a4f90a6..7e29590482ce 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3668,6 +3668,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
skb_push(nskb, -skb_network_offset(nskb) + offset);
+ skb_release_head_state(nskb);
__copy_skb_header(nskb, skb);
skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
@@ -3926,14 +3927,21 @@ normal:
goto perform_csum_check;
if (!sg) {
- if (!nskb->remcsum_offload)
- nskb->ip_summed = CHECKSUM_NONE;
- SKB_GSO_CB(nskb)->csum =
- skb_copy_and_csum_bits(head_skb, offset,
- skb_put(nskb, len),
- len, 0);
- SKB_GSO_CB(nskb)->csum_start =
- skb_headroom(nskb) + doffset;
+ if (!csum) {
+ if (!nskb->remcsum_offload)
+ nskb->ip_summed = CHECKSUM_NONE;
+ SKB_GSO_CB(nskb)->csum =
+ skb_copy_and_csum_bits(head_skb, offset,
+ skb_put(nskb,
+ len),
+ len, 0);
+ SKB_GSO_CB(nskb)->csum_start =
+ skb_headroom(nskb) + doffset;
+ } else {
+ skb_copy_bits(head_skb, offset,
+ skb_put(nskb, len),
+ len);
+ }
continue;
}
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index ded2d5227678..c479372f2cd2 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -512,7 +512,7 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
refcount_set(&psock->refcnt, 1);
- rcu_assign_sk_user_data(sk, psock);
+ rcu_assign_sk_user_data_nocopy(sk, psock);
sock_hold(sk);
return psock;
@@ -628,7 +628,6 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
struct bpf_prog *prog;
int ret;
- preempt_disable();
rcu_read_lock();
prog = READ_ONCE(psock->progs.msg_parser);
if (unlikely(!prog)) {
@@ -638,7 +637,7 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
sk_msg_compute_data_pointers(msg);
msg->sk = sk;
- ret = BPF_PROG_RUN(prog, msg);
+ ret = bpf_prog_run_pin_on_cpu(prog, msg);
ret = sk_psock_map_verd(ret, msg->sk_redir);
psock->apply_bytes = msg->apply_bytes;
if (ret == __SK_REDIRECT) {
@@ -653,7 +652,6 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
}
out:
rcu_read_unlock();
- preempt_enable();
return ret;
}
EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
@@ -665,9 +663,7 @@ static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
skb->sk = psock->sk;
bpf_compute_data_end_sk_skb(skb);
- preempt_disable();
- ret = BPF_PROG_RUN(prog, skb);
- preempt_enable();
+ ret = bpf_prog_run_pin_on_cpu(prog, skb);
/* strparser clones the skb before handing it to a upper layer,
* meaning skb_orphan has been called. We NULL sk on the way out
* to ensure we don't trigger a BUG_ON() in skb/sk operations
diff --git a/net/core/sock.c b/net/core/sock.c
index 8f71684305c3..da32d9b6d09f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1572,13 +1572,14 @@ static inline void sock_lock_init(struct sock *sk)
*/
static void sock_copy(struct sock *nsk, const struct sock *osk)
{
+ const struct proto *prot = READ_ONCE(osk->sk_prot);
#ifdef CONFIG_SECURITY_NETWORK
void *sptr = nsk->sk_security;
#endif
memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
- osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
+ prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
#ifdef CONFIG_SECURITY_NETWORK
nsk->sk_security = sptr;
@@ -1792,16 +1793,17 @@ static void sk_init_common(struct sock *sk)
*/
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
{
+ struct proto *prot = READ_ONCE(sk->sk_prot);
struct sock *newsk;
bool is_charged = true;
- newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
+ newsk = sk_prot_alloc(prot, priority, sk->sk_family);
if (newsk != NULL) {
struct sk_filter *filter;
sock_copy(newsk, sk);
- newsk->sk_prot_creator = sk->sk_prot;
+ newsk->sk_prot_creator = prot;
/* SANITY */
if (likely(newsk->sk_net_refcnt))
@@ -1866,6 +1868,12 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
goto out;
}
+ /* Clear sk_user_data if parent had the pointer tagged
+ * as not suitable for copying when cloning.
+ */
+ if (sk_user_data_is_nocopy(newsk))
+ RCU_INIT_POINTER(newsk->sk_user_data, NULL);
+
newsk->sk_err = 0;
newsk->sk_err_soft = 0;
newsk->sk_priority = 0;
@@ -2063,6 +2071,18 @@ void sock_efree(struct sk_buff *skb)
}
EXPORT_SYMBOL(sock_efree);
+/* Buffer destructor for prefetch/receive path where reference count may
+ * not be held, e.g. for listen sockets.
+ */
+#ifdef CONFIG_INET
+void sock_pfree(struct sk_buff *skb)
+{
+ if (sk_is_refcounted(skb->sk))
+ sock_gen_put(skb->sk);
+}
+EXPORT_SYMBOL(sock_pfree);
+#endif /* CONFIG_INET */
+
kuid_t sock_i_uid(struct sock *sk)
{
kuid_t uid;
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index b70c844a88ec..b08dfae10f88 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -10,6 +10,8 @@
#include <linux/skmsg.h>
#include <linux/list.h>
#include <linux/jhash.h>
+#include <linux/sock_diag.h>
+#include <net/udp.h>
struct bpf_stab {
struct bpf_map map;
@@ -31,7 +33,8 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
return ERR_PTR(-EPERM);
if (attr->max_entries == 0 ||
attr->key_size != 4 ||
- attr->value_size != 4 ||
+ (attr->value_size != sizeof(u32) &&
+ attr->value_size != sizeof(u64)) ||
attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL);
@@ -139,12 +142,58 @@ static void sock_map_unref(struct sock *sk, void *link_raw)
}
}
+static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
+{
+ struct proto *prot;
+
+ sock_owned_by_me(sk);
+
+ switch (sk->sk_type) {
+ case SOCK_STREAM:
+ prot = tcp_bpf_get_proto(sk, psock);
+ break;
+
+ case SOCK_DGRAM:
+ prot = udp_bpf_get_proto(sk, psock);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (IS_ERR(prot))
+ return PTR_ERR(prot);
+
+ sk_psock_update_proto(sk, psock, prot);
+ return 0;
+}
+
+static struct sk_psock *sock_map_psock_get_checked(struct sock *sk)
+{
+ struct sk_psock *psock;
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (psock) {
+ if (sk->sk_prot->close != sock_map_close) {
+ psock = ERR_PTR(-EBUSY);
+ goto out;
+ }
+
+ if (!refcount_inc_not_zero(&psock->refcnt))
+ psock = ERR_PTR(-EBUSY);
+ }
+out:
+ rcu_read_unlock();
+ return psock;
+}
+
static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
struct sock *sk)
{
struct bpf_prog *msg_parser, *skb_parser, *skb_verdict;
- bool skb_progs, sk_psock_is_new = false;
struct sk_psock *psock;
+ bool skb_progs;
int ret;
skb_verdict = READ_ONCE(progs->skb_verdict);
@@ -170,7 +219,7 @@ static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
}
}
- psock = sk_psock_get_checked(sk);
+ psock = sock_map_psock_get_checked(sk);
if (IS_ERR(psock)) {
ret = PTR_ERR(psock);
goto out_progs;
@@ -189,18 +238,14 @@ static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
ret = -ENOMEM;
goto out_progs;
}
- sk_psock_is_new = true;
}
if (msg_parser)
psock_set_prog(&psock->progs.msg_parser, msg_parser);
- if (sk_psock_is_new) {
- ret = tcp_bpf_init(sk);
- if (ret < 0)
- goto out_drop;
- } else {
- tcp_bpf_reinit(sk);
- }
+
+ ret = sock_map_init_proto(sk, psock);
+ if (ret < 0)
+ goto out_drop;
write_lock_bh(&sk->sk_callback_lock);
if (skb_progs && !psock->parser.enabled) {
@@ -228,6 +273,27 @@ out:
return ret;
}
+static int sock_map_link_no_progs(struct bpf_map *map, struct sock *sk)
+{
+ struct sk_psock *psock;
+ int ret;
+
+ psock = sock_map_psock_get_checked(sk);
+ if (IS_ERR(psock))
+ return PTR_ERR(psock);
+
+ if (!psock) {
+ psock = sk_psock_init(sk, map->numa_node);
+ if (!psock)
+ return -ENOMEM;
+ }
+
+ ret = sock_map_init_proto(sk, psock);
+ if (ret < 0)
+ sk_psock_put(sk, psock);
+ return ret;
+}
+
static void sock_map_free(struct bpf_map *map)
{
struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
@@ -277,7 +343,22 @@ static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
static void *sock_map_lookup(struct bpf_map *map, void *key)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return __sock_map_lookup_elem(map, *(u32 *)key);
+}
+
+static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
+{
+ struct sock *sk;
+
+ if (map->value_size != sizeof(u64))
+ return ERR_PTR(-ENOSPC);
+
+ sk = __sock_map_lookup_elem(map, *(u32 *)key);
+ if (!sk)
+ return ERR_PTR(-ENOENT);
+
+ sock_gen_cookie(sk);
+ return &sk->sk_cookie;
}
static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
@@ -336,11 +417,15 @@ static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next)
return 0;
}
+static bool sock_map_redirect_allowed(const struct sock *sk)
+{
+ return sk->sk_state != TCP_LISTEN;
+}
+
static int sock_map_update_common(struct bpf_map *map, u32 idx,
struct sock *sk, u64 flags)
{
struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
- struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_psock_link *link;
struct sk_psock *psock;
struct sock *osk;
@@ -351,14 +436,21 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
return -EINVAL;
if (unlikely(idx >= map->max_entries))
return -E2BIG;
- if (unlikely(rcu_access_pointer(icsk->icsk_ulp_data)))
+ if (inet_csk_has_ulp(sk))
return -EINVAL;
link = sk_psock_init_link();
if (!link)
return -ENOMEM;
- ret = sock_map_link(map, &stab->progs, sk);
+ /* Only sockets we can redirect into/from in BPF need to hold
+ * refs to parser/verdict progs and have their sk_data_ready
+ * and sk_write_space callbacks overridden.
+ */
+ if (sock_map_redirect_allowed(sk))
+ ret = sock_map_link(map, &stab->progs, sk);
+ else
+ ret = sock_map_link_no_progs(map, sk);
if (ret < 0)
goto out_free;
@@ -393,23 +485,52 @@ out_free:
static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops)
{
return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
- ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB;
+ ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB ||
+ ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB;
}
-static bool sock_map_sk_is_suitable(const struct sock *sk)
+static bool sk_is_tcp(const struct sock *sk)
{
return sk->sk_type == SOCK_STREAM &&
sk->sk_protocol == IPPROTO_TCP;
}
+static bool sk_is_udp(const struct sock *sk)
+{
+ return sk->sk_type == SOCK_DGRAM &&
+ sk->sk_protocol == IPPROTO_UDP;
+}
+
+static bool sock_map_sk_is_suitable(const struct sock *sk)
+{
+ return sk_is_tcp(sk) || sk_is_udp(sk);
+}
+
+static bool sock_map_sk_state_allowed(const struct sock *sk)
+{
+ if (sk_is_tcp(sk))
+ return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN);
+ else if (sk_is_udp(sk))
+ return sk_hashed(sk);
+
+ return false;
+}
+
static int sock_map_update_elem(struct bpf_map *map, void *key,
void *value, u64 flags)
{
- u32 ufd = *(u32 *)value;
u32 idx = *(u32 *)key;
struct socket *sock;
struct sock *sk;
int ret;
+ u64 ufd;
+
+ if (map->value_size == sizeof(u64))
+ ufd = *(u64 *)value;
+ else
+ ufd = *(u32 *)value;
+ if (ufd > S32_MAX)
+ return -EINVAL;
sock = sockfd_lookup(ufd, &ret);
if (!sock)
@@ -425,7 +546,7 @@ static int sock_map_update_elem(struct bpf_map *map, void *key,
}
sock_map_sk_acquire(sk);
- if (sk->sk_state != TCP_ESTABLISHED)
+ if (!sock_map_sk_state_allowed(sk))
ret = -EOPNOTSUPP;
else
ret = sock_map_update_common(map, idx, sk, flags);
@@ -462,13 +583,17 @@ BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
struct bpf_map *, map, u32, key, u64, flags)
{
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
+ struct sock *sk;
if (unlikely(flags & ~(BPF_F_INGRESS)))
return SK_DROP;
- tcb->bpf.flags = flags;
- tcb->bpf.sk_redir = __sock_map_lookup_elem(map, key);
- if (!tcb->bpf.sk_redir)
+
+ sk = __sock_map_lookup_elem(map, key);
+ if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
return SK_DROP;
+
+ tcb->bpf.flags = flags;
+ tcb->bpf.sk_redir = sk;
return SK_PASS;
}
@@ -485,12 +610,17 @@ const struct bpf_func_proto bpf_sk_redirect_map_proto = {
BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
struct bpf_map *, map, u32, key, u64, flags)
{
+ struct sock *sk;
+
if (unlikely(flags & ~(BPF_F_INGRESS)))
return SK_DROP;
- msg->flags = flags;
- msg->sk_redir = __sock_map_lookup_elem(map, key);
- if (!msg->sk_redir)
+
+ sk = __sock_map_lookup_elem(map, key);
+ if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
return SK_DROP;
+
+ msg->flags = flags;
+ msg->sk_redir = sk;
return SK_PASS;
}
@@ -508,6 +638,7 @@ const struct bpf_map_ops sock_map_ops = {
.map_alloc = sock_map_alloc,
.map_free = sock_map_free,
.map_get_next_key = sock_map_get_next_key,
+ .map_lookup_elem_sys_only = sock_map_lookup_sys,
.map_update_elem = sock_map_update_elem,
.map_delete_elem = sock_map_delete_elem,
.map_lookup_elem = sock_map_lookup,
@@ -520,7 +651,7 @@ struct bpf_htab_elem {
u32 hash;
struct sock *sk;
struct hlist_node node;
- u8 key[0];
+ u8 key[];
};
struct bpf_htab_bucket {
@@ -664,7 +795,6 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
struct sock *sk, u64 flags)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct inet_connection_sock *icsk = inet_csk(sk);
u32 key_size = map->key_size, hash;
struct bpf_htab_elem *elem, *elem_new;
struct bpf_htab_bucket *bucket;
@@ -675,14 +805,21 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
WARN_ON_ONCE(!rcu_read_lock_held());
if (unlikely(flags > BPF_EXIST))
return -EINVAL;
- if (unlikely(icsk->icsk_ulp_data))
+ if (inet_csk_has_ulp(sk))
return -EINVAL;
link = sk_psock_init_link();
if (!link)
return -ENOMEM;
- ret = sock_map_link(map, &htab->progs, sk);
+ /* Only sockets we can redirect into/from in BPF need to hold
+ * refs to parser/verdict progs and have their sk_data_ready
+ * and sk_write_space callbacks overridden.
+ */
+ if (sock_map_redirect_allowed(sk))
+ ret = sock_map_link(map, &htab->progs, sk);
+ else
+ ret = sock_map_link_no_progs(map, sk);
if (ret < 0)
goto out_free;
@@ -731,10 +868,17 @@ out_free:
static int sock_hash_update_elem(struct bpf_map *map, void *key,
void *value, u64 flags)
{
- u32 ufd = *(u32 *)value;
struct socket *sock;
struct sock *sk;
int ret;
+ u64 ufd;
+
+ if (map->value_size == sizeof(u64))
+ ufd = *(u64 *)value;
+ else
+ ufd = *(u32 *)value;
+ if (ufd > S32_MAX)
+ return -EINVAL;
sock = sockfd_lookup(ufd, &ret);
if (!sock)
@@ -750,7 +894,7 @@ static int sock_hash_update_elem(struct bpf_map *map, void *key,
}
sock_map_sk_acquire(sk);
- if (sk->sk_state != TCP_ESTABLISHED)
+ if (!sock_map_sk_state_allowed(sk))
ret = -EOPNOTSUPP;
else
ret = sock_hash_update_common(map, key, sk, flags);
@@ -810,7 +954,8 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
return ERR_PTR(-EPERM);
if (attr->max_entries == 0 ||
attr->key_size == 0 ||
- attr->value_size != 4 ||
+ (attr->value_size != sizeof(u32) &&
+ attr->value_size != sizeof(u64)) ||
attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL);
if (attr->key_size > MAX_BPF_STACK)
@@ -889,6 +1034,26 @@ static void sock_hash_free(struct bpf_map *map)
kfree(htab);
}
+static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
+{
+ struct sock *sk;
+
+ if (map->value_size != sizeof(u64))
+ return ERR_PTR(-ENOSPC);
+
+ sk = __sock_hash_lookup_elem(map, key);
+ if (!sk)
+ return ERR_PTR(-ENOENT);
+
+ sock_gen_cookie(sk);
+ return &sk->sk_cookie;
+}
+
+static void *sock_hash_lookup(struct bpf_map *map, void *key)
+{
+ return __sock_hash_lookup_elem(map, key);
+}
+
static void sock_hash_release_progs(struct bpf_map *map)
{
psock_progs_drop(&container_of(map, struct bpf_htab, map)->progs);
@@ -920,13 +1085,17 @@ BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
struct bpf_map *, map, void *, key, u64, flags)
{
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
+ struct sock *sk;
if (unlikely(flags & ~(BPF_F_INGRESS)))
return SK_DROP;
- tcb->bpf.flags = flags;
- tcb->bpf.sk_redir = __sock_hash_lookup_elem(map, key);
- if (!tcb->bpf.sk_redir)
+
+ sk = __sock_hash_lookup_elem(map, key);
+ if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
return SK_DROP;
+
+ tcb->bpf.flags = flags;
+ tcb->bpf.sk_redir = sk;
return SK_PASS;
}
@@ -943,12 +1112,17 @@ const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
struct bpf_map *, map, void *, key, u64, flags)
{
+ struct sock *sk;
+
if (unlikely(flags & ~(BPF_F_INGRESS)))
return SK_DROP;
- msg->flags = flags;
- msg->sk_redir = __sock_hash_lookup_elem(map, key);
- if (!msg->sk_redir)
+
+ sk = __sock_hash_lookup_elem(map, key);
+ if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
return SK_DROP;
+
+ msg->flags = flags;
+ msg->sk_redir = sk;
return SK_PASS;
}
@@ -968,7 +1142,8 @@ const struct bpf_map_ops sock_hash_ops = {
.map_get_next_key = sock_hash_get_next_key,
.map_update_elem = sock_hash_update_elem,
.map_delete_elem = sock_hash_delete_elem,
- .map_lookup_elem = sock_map_lookup,
+ .map_lookup_elem = sock_hash_lookup,
+ .map_lookup_elem_sys_only = sock_hash_lookup_sys,
.map_release_uref = sock_hash_release_progs,
.map_check_btf = map_check_no_btf,
};
@@ -1012,7 +1187,7 @@ int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
return 0;
}
-void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link)
+static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link)
{
switch (link->map->map_type) {
case BPF_MAP_TYPE_SOCKMAP:
@@ -1025,3 +1200,54 @@ void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link)
break;
}
}
+
+static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock)
+{
+ struct sk_psock_link *link;
+
+ while ((link = sk_psock_link_pop(psock))) {
+ sock_map_unlink(sk, link);
+ sk_psock_free_link(link);
+ }
+}
+
+void sock_map_unhash(struct sock *sk)
+{
+ void (*saved_unhash)(struct sock *sk);
+ struct sk_psock *psock;
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (unlikely(!psock)) {
+ rcu_read_unlock();
+ if (sk->sk_prot->unhash)
+ sk->sk_prot->unhash(sk);
+ return;
+ }
+
+ saved_unhash = psock->saved_unhash;
+ sock_map_remove_links(sk, psock);
+ rcu_read_unlock();
+ saved_unhash(sk);
+}
+
+void sock_map_close(struct sock *sk, long timeout)
+{
+ void (*saved_close)(struct sock *sk, long timeout);
+ struct sk_psock *psock;
+
+ lock_sock(sk);
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (unlikely(!psock)) {
+ rcu_read_unlock();
+ release_sock(sk);
+ return sk->sk_prot->close(sk, timeout);
+ }
+
+ saved_close = psock->saved_close;
+ sock_map_remove_links(sk, psock);
+ rcu_read_unlock();
+ release_sock(sk);
+ saved_close(sk, timeout);
+}
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index 91e9f2223c39..adcb3aea576d 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -16,27 +16,8 @@
DEFINE_SPINLOCK(reuseport_lock);
-#define REUSEPORT_MIN_ID 1
static DEFINE_IDA(reuseport_ida);
-int reuseport_get_id(struct sock_reuseport *reuse)
-{
- int id;
-
- if (reuse->reuseport_id)
- return reuse->reuseport_id;
-
- id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0,
- /* Called under reuseport_lock */
- GFP_ATOMIC);
- if (id < 0)
- return id;
-
- reuse->reuseport_id = id;
-
- return reuse->reuseport_id;
-}
-
static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
{
unsigned int size = sizeof(struct sock_reuseport) +
@@ -55,6 +36,7 @@ static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
int reuseport_alloc(struct sock *sk, bool bind_inany)
{
struct sock_reuseport *reuse;
+ int id, ret = 0;
/* bh lock used since this function call may precede hlist lock in
* soft irq of receive path or setsockopt from process context
@@ -78,10 +60,18 @@ int reuseport_alloc(struct sock *sk, bool bind_inany)
reuse = __reuseport_alloc(INIT_SOCKS);
if (!reuse) {
- spin_unlock_bh(&reuseport_lock);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
+ id = ida_alloc(&reuseport_ida, GFP_ATOMIC);
+ if (id < 0) {
+ kfree(reuse);
+ ret = id;
+ goto out;
+ }
+
+ reuse->reuseport_id = id;
reuse->socks[0] = sk;
reuse->num_socks = 1;
reuse->bind_inany = bind_inany;
@@ -90,7 +80,7 @@ int reuseport_alloc(struct sock *sk, bool bind_inany)
out:
spin_unlock_bh(&reuseport_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(reuseport_alloc);
@@ -134,8 +124,7 @@ static void reuseport_free_rcu(struct rcu_head *head)
reuse = container_of(head, struct sock_reuseport, rcu);
sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
- if (reuse->reuseport_id)
- ida_simple_remove(&reuseport_ida, reuse->reuseport_id);
+ ida_free(&reuseport_ida, reuse->reuseport_id);
kfree(reuse);
}
@@ -199,12 +188,15 @@ void reuseport_detach_sock(struct sock *sk)
reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
lockdep_is_held(&reuseport_lock));
- /* At least one of the sk in this reuseport group is added to
- * a bpf map. Notify the bpf side. The bpf map logic will
- * remove the sk if it is indeed added to a bpf map.
+ /* Notify the bpf side. The sk may be added to a sockarray
+ * map. If so, sockarray logic will remove it from the map.
+ *
+ * Other bpf map types that work with reuseport, like sockmap,
+ * don't need an explicit callback from here. They override sk
+ * unhash/close ops to remove the sk from the map before we
+ * get to this point.
*/
- if (reuse->reuseport_id)
- bpf_sk_reuseport_detach(sk);
+ bpf_sk_reuseport_detach(sk);
rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 8310714c47fd..4c7ea85486af 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -372,7 +372,7 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
page = virt_to_head_page(data);
napi_direct &= !xdp_return_frame_no_direct();
- page_pool_put_page(xa->page_pool, page, napi_direct);
+ page_pool_put_full_page(xa->page_pool, page, napi_direct);
rcu_read_unlock();
break;
case MEM_TYPE_PAGE_SHARED:
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 70f88f2b4456..105f3734dadb 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -95,7 +95,7 @@ void ccid_cleanup_builtins(void);
struct ccid {
struct ccid_operations *ccid_ops;
- char ccid_priv[0];
+ char ccid_priv[];
};
static inline void *ccid_priv(const struct ccid *ccid)
diff --git a/net/dccp/diag.c b/net/dccp/diag.c
index 73ef73a218ff..8a82c5a2c5a8 100644
--- a/net/dccp/diag.c
+++ b/net/dccp/diag.c
@@ -46,16 +46,15 @@ static void dccp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
}
static void dccp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
- inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r, bc);
+ inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r);
}
-static int dccp_diag_dump_one(struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
+static int dccp_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
- return inet_diag_dump_one_icsk(&dccp_hashinfo, in_skb, nlh, req);
+ return inet_diag_dump_one_icsk(&dccp_hashinfo, cb, req);
}
static const struct inet_diag_handler dccp_diag_handler = {
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 25187528c308..c5c74a34d139 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -216,6 +216,7 @@ EXPORT_SYMBOL_GPL(dccp_check_req);
*/
int dccp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb)
+ __releases(child)
{
int ret = 0;
const int state = child->sk_state;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 08c3dc45f1a4..06b9983325cc 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1173,7 +1173,7 @@ make_route:
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
- rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
+ rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, 0);
if (rt == NULL)
goto e_nobufs;
@@ -1439,7 +1439,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
}
make_route:
- rt = dst_alloc(&dn_dst_ops, out_dev, 1, DST_OBSOLETE_NONE, DST_HOST);
+ rt = dst_alloc(&dn_dst_ops, out_dev, 1, DST_OBSOLETE_NONE, 0);
if (rt == NULL)
goto e_nobufs;
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 17281fec710c..ee2610c4d46a 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -88,13 +88,9 @@ const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol)
{
struct dsa_tag_driver *dsa_tag_driver;
const struct dsa_device_ops *ops;
- char module_name[128];
bool found = false;
- snprintf(module_name, 127, "%s%d", DSA_TAG_DRIVER_ALIAS,
- tag_protocol);
-
- request_module(module_name);
+ request_module("%s%d", DSA_TAG_DRIVER_ALIAS, tag_protocol);
mutex_lock(&dsa_tag_drivers_lock);
list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index e7c30b472034..9a271a58a41d 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -18,8 +18,8 @@
#include "dsa_priv.h"
-static LIST_HEAD(dsa_tree_list);
static DEFINE_MUTEX(dsa2_mutex);
+LIST_HEAD(dsa_tree_list);
static const struct devlink_ops dsa_devlink_ops = {
};
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 760e6ea3178a..904cc7c9b882 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -22,6 +22,7 @@ enum {
DSA_NOTIFIER_MDB_DEL,
DSA_NOTIFIER_VLAN_ADD,
DSA_NOTIFIER_VLAN_DEL,
+ DSA_NOTIFIER_MTU,
};
/* DSA_NOTIFIER_AGEING_TIME */
@@ -61,6 +62,14 @@ struct dsa_notifier_vlan_info {
int port;
};
+/* DSA_NOTIFIER_MTU */
+struct dsa_notifier_mtu_info {
+ bool propagate_upstream;
+ int sw_index;
+ int port;
+ int mtu;
+};
+
struct dsa_slave_priv {
/* Copy of CPU port xmit for faster access in slave transmit hot path */
struct sk_buff * (*xmit)(struct sk_buff *skb,
@@ -127,6 +136,8 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
struct switchdev_trans *trans);
int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock,
struct switchdev_trans *trans);
+int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
+ bool propagate_upstream);
int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid);
int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
@@ -183,4 +194,8 @@ dsa_slave_to_master(const struct net_device *dev)
/* switch.c */
int dsa_switch_register_notifier(struct dsa_switch *ds);
void dsa_switch_unregister_notifier(struct dsa_switch *ds);
+
+/* dsa2.c */
+extern struct list_head dsa_tree_list;
+
#endif
diff --git a/net/dsa/master.c b/net/dsa/master.c
index bd44bde272f4..b5c535af63a3 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -314,20 +314,6 @@ static const struct attribute_group dsa_group = {
.attrs = dsa_slave_attrs,
};
-static void dsa_master_set_mtu(struct net_device *dev, struct dsa_port *cpu_dp)
-{
- unsigned int mtu = ETH_DATA_LEN + cpu_dp->tag_ops->overhead;
- int err;
-
- rtnl_lock();
- if (mtu <= dev->max_mtu) {
- err = dev_set_mtu(dev, mtu);
- if (err)
- netdev_dbg(dev, "Unable to set MTU to include for DSA overheads\n");
- }
- rtnl_unlock();
-}
-
static void dsa_master_reset_mtu(struct net_device *dev)
{
int err;
@@ -344,7 +330,12 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
int ret;
- dsa_master_set_mtu(dev, cpu_dp);
+ rtnl_lock();
+ ret = dev_set_mtu(dev, ETH_DATA_LEN + cpu_dp->tag_ops->overhead);
+ rtnl_unlock();
+ if (ret)
+ netdev_warn(dev, "error %d setting MTU to include DSA overhead\n",
+ ret);
/* If we use a tagging format that doesn't have an ethertype
* field, make sure that all packets from this point on get
diff --git a/net/dsa/port.c b/net/dsa/port.c
index ec13dc666788..231b2d494f1c 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -297,6 +297,19 @@ int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
return ds->ops->port_egress_floods(ds, port, true, mrouter);
}
+int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
+ bool propagate_upstream)
+{
+ struct dsa_notifier_mtu_info info = {
+ .sw_index = dp->ds->index,
+ .propagate_upstream = propagate_upstream,
+ .port = dp->index,
+ .mtu = new_mtu,
+ };
+
+ return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
+}
+
int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
u16 vid)
{
@@ -457,6 +470,7 @@ static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
{
struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
struct dsa_switch *ds = dp->ds;
+ int err;
/* Only called for inband modes */
if (!ds->ops->phylink_mac_link_state) {
@@ -464,8 +478,12 @@ static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
return;
}
- if (ds->ops->phylink_mac_link_state(ds, dp->index, state) < 0)
+ err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
+ if (err < 0) {
+ dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
+ dp->index, err);
state->link = 0;
+ }
}
static void dsa_port_phylink_mac_config(struct phylink_config *config,
@@ -513,9 +531,11 @@ static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
}
static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev)
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
struct dsa_switch *ds = dp->ds;
@@ -526,7 +546,8 @@ static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
return;
}
- ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev);
+ ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
+ speed, duplex, tx_pause, rx_pause);
}
const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index ddc0f9236928..5390ff541658 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -842,59 +842,137 @@ dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
return NULL;
}
-static int dsa_slave_add_cls_matchall(struct net_device *dev,
- struct tc_cls_matchall_offload *cls,
- bool ingress)
+static int
+dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_mall_mirror_tc_entry *mirror;
struct dsa_mall_tc_entry *mall_tc_entry;
- __be16 protocol = cls->common.protocol;
struct dsa_switch *ds = dp->ds;
struct flow_action_entry *act;
struct dsa_port *to_dp;
- int err = -EOPNOTSUPP;
+ int err;
+
+ act = &cls->rule->action.entries[0];
if (!ds->ops->port_mirror_add)
- return err;
+ return -EOPNOTSUPP;
- if (!flow_offload_has_one_action(&cls->rule->action))
- return err;
+ if (!act->dev)
+ return -EINVAL;
+
+ if (!flow_action_basic_hw_stats_check(&cls->rule->action,
+ cls->common.extack))
+ return -EOPNOTSUPP;
act = &cls->rule->action.entries[0];
- if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
- struct dsa_mall_mirror_tc_entry *mirror;
+ if (!dsa_slave_dev_check(act->dev))
+ return -EOPNOTSUPP;
- if (!act->dev)
- return -EINVAL;
+ mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
+ if (!mall_tc_entry)
+ return -ENOMEM;
- if (!dsa_slave_dev_check(act->dev))
- return -EOPNOTSUPP;
+ mall_tc_entry->cookie = cls->cookie;
+ mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
+ mirror = &mall_tc_entry->mirror;
- mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
- if (!mall_tc_entry)
- return -ENOMEM;
+ to_dp = dsa_slave_to_port(act->dev);
- mall_tc_entry->cookie = cls->cookie;
- mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
- mirror = &mall_tc_entry->mirror;
+ mirror->to_local_port = to_dp->index;
+ mirror->ingress = ingress;
- to_dp = dsa_slave_to_port(act->dev);
+ err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
+ if (err) {
+ kfree(mall_tc_entry);
+ return err;
+ }
- mirror->to_local_port = to_dp->index;
- mirror->ingress = ingress;
+ list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
- err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
- if (err) {
- kfree(mall_tc_entry);
- return err;
+ return err;
+}
+
+static int
+dsa_slave_add_cls_matchall_police(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_mall_policer_tc_entry *policer;
+ struct dsa_mall_tc_entry *mall_tc_entry;
+ struct dsa_switch *ds = dp->ds;
+ struct flow_action_entry *act;
+ int err;
+
+ if (!ds->ops->port_policer_add) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Policing offload not implemented\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ingress) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only supported on ingress qdisc\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_action_basic_hw_stats_check(&cls->rule->action,
+ cls->common.extack))
+ return -EOPNOTSUPP;
+
+ list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) {
+ if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one port policer allowed\n");
+ return -EEXIST;
}
+ }
+
+ act = &cls->rule->action.entries[0];
- list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
+ mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
+ if (!mall_tc_entry)
+ return -ENOMEM;
+
+ mall_tc_entry->cookie = cls->cookie;
+ mall_tc_entry->type = DSA_PORT_MALL_POLICER;
+ policer = &mall_tc_entry->policer;
+ policer->rate_bytes_per_sec = act->police.rate_bytes_ps;
+ policer->burst = act->police.burst;
+
+ err = ds->ops->port_policer_add(ds, dp->index, policer);
+ if (err) {
+ kfree(mall_tc_entry);
+ return err;
}
- return 0;
+ list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
+
+ return err;
+}
+
+static int dsa_slave_add_cls_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
+{
+ int err = -EOPNOTSUPP;
+
+ if (cls->common.protocol == htons(ETH_P_ALL) &&
+ flow_offload_has_one_action(&cls->rule->action) &&
+ cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
+ err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
+ else if (flow_offload_has_one_action(&cls->rule->action) &&
+ cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
+ err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
+
+ return err;
}
static void dsa_slave_del_cls_matchall(struct net_device *dev,
@@ -904,9 +982,6 @@ static void dsa_slave_del_cls_matchall(struct net_device *dev,
struct dsa_mall_tc_entry *mall_tc_entry;
struct dsa_switch *ds = dp->ds;
- if (!ds->ops->port_mirror_del)
- return;
-
mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
if (!mall_tc_entry)
return;
@@ -915,7 +990,13 @@ static void dsa_slave_del_cls_matchall(struct net_device *dev,
switch (mall_tc_entry->type) {
case DSA_PORT_MALL_MIRROR:
- ds->ops->port_mirror_del(ds, dp->index, &mall_tc_entry->mirror);
+ if (ds->ops->port_mirror_del)
+ ds->ops->port_mirror_del(ds, dp->index,
+ &mall_tc_entry->mirror);
+ break;
+ case DSA_PORT_MALL_POLICER:
+ if (ds->ops->port_policer_del)
+ ds->ops->port_policer_del(ds, dp->index);
break;
default:
WARN_ON(1);
@@ -942,6 +1023,64 @@ static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
}
}
+static int dsa_slave_add_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (!ds->ops->cls_flower_add)
+ return -EOPNOTSUPP;
+
+ return ds->ops->cls_flower_add(ds, port, cls, ingress);
+}
+
+static int dsa_slave_del_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (!ds->ops->cls_flower_del)
+ return -EOPNOTSUPP;
+
+ return ds->ops->cls_flower_del(ds, port, cls, ingress);
+}
+
+static int dsa_slave_stats_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+ if (!ds->ops->cls_flower_stats)
+ return -EOPNOTSUPP;
+
+ return ds->ops->cls_flower_stats(ds, port, cls, ingress);
+}
+
+static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
+{
+ switch (cls->command) {
+ case FLOW_CLS_REPLACE:
+ return dsa_slave_add_cls_flower(dev, cls, ingress);
+ case FLOW_CLS_DESTROY:
+ return dsa_slave_del_cls_flower(dev, cls, ingress);
+ case FLOW_CLS_STATS:
+ return dsa_slave_stats_cls_flower(dev, cls, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv, bool ingress)
{
@@ -953,6 +1092,8 @@ static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSMATCHALL:
return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
+ case TC_SETUP_CLSFLOWER:
+ return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
default:
return -EOPNOTSUPP;
}
@@ -1154,6 +1295,208 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
return dsa_port_vid_del(dp, vid);
}
+struct dsa_hw_port {
+ struct list_head list;
+ struct net_device *dev;
+ int old_mtu;
+};
+
+static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu)
+{
+ const struct dsa_hw_port *p;
+ int err;
+
+ list_for_each_entry(p, hw_port_list, list) {
+ if (p->dev->mtu == mtu)
+ continue;
+
+ err = dev_set_mtu(p->dev, mtu);
+ if (err)
+ goto rollback;
+ }
+
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(p, hw_port_list, list) {
+ if (p->dev->mtu == p->old_mtu)
+ continue;
+
+ if (dev_set_mtu(p->dev, p->old_mtu))
+ netdev_err(p->dev, "Failed to restore MTU\n");
+ }
+
+ return err;
+}
+
+static void dsa_hw_port_list_free(struct list_head *hw_port_list)
+{
+ struct dsa_hw_port *p, *n;
+
+ list_for_each_entry_safe(p, n, hw_port_list, list)
+ kfree(p);
+}
+
+/* Make the hardware datapath to/from @dev limited to a common MTU */
+void dsa_bridge_mtu_normalization(struct dsa_port *dp)
+{
+ struct list_head hw_port_list;
+ struct dsa_switch_tree *dst;
+ int min_mtu = ETH_MAX_MTU;
+ struct dsa_port *other_dp;
+ int err;
+
+ if (!dp->ds->mtu_enforcement_ingress)
+ return;
+
+ if (!dp->bridge_dev)
+ return;
+
+ INIT_LIST_HEAD(&hw_port_list);
+
+ /* Populate the list of ports that are part of the same bridge
+ * as the newly added/modified port
+ */
+ list_for_each_entry(dst, &dsa_tree_list, list) {
+ list_for_each_entry(other_dp, &dst->ports, list) {
+ struct dsa_hw_port *hw_port;
+ struct net_device *slave;
+
+ if (other_dp->type != DSA_PORT_TYPE_USER)
+ continue;
+
+ if (other_dp->bridge_dev != dp->bridge_dev)
+ continue;
+
+ if (!other_dp->ds->mtu_enforcement_ingress)
+ continue;
+
+ slave = other_dp->slave;
+
+ if (min_mtu > slave->mtu)
+ min_mtu = slave->mtu;
+
+ hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
+ if (!hw_port)
+ goto out;
+
+ hw_port->dev = slave;
+ hw_port->old_mtu = slave->mtu;
+
+ list_add(&hw_port->list, &hw_port_list);
+ }
+ }
+
+ /* Attempt to configure the entire hardware bridge to the newly added
+ * interface's MTU first, regardless of whether the intention of the
+ * user was to raise or lower it.
+ */
+ err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
+ if (!err)
+ goto out;
+
+ /* Clearly that didn't work out so well, so just set the minimum MTU on
+ * all hardware bridge ports now. If this fails too, then all ports will
+ * still have their old MTU rolled back anyway.
+ */
+ dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu);
+
+out:
+ dsa_hw_port_list_free(&hw_port_list);
+}
+
+static int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct net_device *master = dsa_slave_to_master(dev);
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->dp->ds;
+ struct dsa_port *cpu_dp;
+ int port = p->dp->index;
+ int largest_mtu = 0;
+ int new_master_mtu;
+ int old_master_mtu;
+ int mtu_limit;
+ int cpu_mtu;
+ int err, i;
+
+ if (!ds->ops->port_change_mtu)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ int slave_mtu;
+
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ /* During probe, this function will be called for each slave
+ * device, while not all of them have been allocated. That's
+ * ok, it doesn't change what the maximum is, so ignore it.
+ */
+ if (!dsa_to_port(ds, i)->slave)
+ continue;
+
+ /* Pretend that we already applied the setting, which we
+ * actually haven't (still haven't done all integrity checks)
+ */
+ if (i == port)
+ slave_mtu = new_mtu;
+ else
+ slave_mtu = dsa_to_port(ds, i)->slave->mtu;
+
+ if (largest_mtu < slave_mtu)
+ largest_mtu = slave_mtu;
+ }
+
+ cpu_dp = dsa_to_port(ds, port)->cpu_dp;
+
+ mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
+ old_master_mtu = master->mtu;
+ new_master_mtu = largest_mtu + cpu_dp->tag_ops->overhead;
+ if (new_master_mtu > mtu_limit)
+ return -ERANGE;
+
+ /* If the master MTU isn't over limit, there's no need to check the CPU
+ * MTU, since that surely isn't either.
+ */
+ cpu_mtu = largest_mtu;
+
+ /* Start applying stuff */
+ if (new_master_mtu != old_master_mtu) {
+ err = dev_set_mtu(master, new_master_mtu);
+ if (err < 0)
+ goto out_master_failed;
+
+ /* We only need to propagate the MTU of the CPU port to
+ * upstream switches.
+ */
+ err = dsa_port_mtu_change(cpu_dp, cpu_mtu, true);
+ if (err)
+ goto out_cpu_failed;
+ }
+
+ err = dsa_port_mtu_change(dp, new_mtu, false);
+ if (err)
+ goto out_port_failed;
+
+ dev->mtu = new_mtu;
+
+ dsa_bridge_mtu_normalization(dp);
+
+ return 0;
+
+out_port_failed:
+ if (new_master_mtu != old_master_mtu)
+ dsa_port_mtu_change(cpu_dp, old_master_mtu -
+ cpu_dp->tag_ops->overhead,
+ true);
+out_cpu_failed:
+ if (new_master_mtu != old_master_mtu)
+ dev_set_mtu(master, old_master_mtu);
+out_master_failed:
+ return err;
+}
+
static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_drvinfo = dsa_slave_get_drvinfo,
.get_regs_len = dsa_slave_get_regs_len,
@@ -1231,6 +1574,7 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
.ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
.ndo_get_devlink_port = dsa_slave_get_devlink_port,
+ .ndo_change_mtu = dsa_slave_change_mtu,
};
static struct device_type dsa_type = {
@@ -1241,7 +1585,8 @@ void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
{
const struct dsa_port *dp = dsa_to_port(ds, port);
- phylink_mac_change(dp->pl, up);
+ if (dp->pl)
+ phylink_mac_change(dp->pl, up);
}
EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
@@ -1401,7 +1746,10 @@ int dsa_slave_create(struct dsa_port *port)
slave_dev->priv_flags |= IFF_NO_QUEUE;
slave_dev->netdev_ops = &dsa_slave_netdev_ops;
slave_dev->min_mtu = 0;
- slave_dev->max_mtu = ETH_MAX_MTU;
+ if (ds->ops->port_max_mtu)
+ slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
+ else
+ slave_dev->max_mtu = ETH_MAX_MTU;
SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
SET_NETDEV_DEV(slave_dev, port->ds->dev);
@@ -1419,6 +1767,15 @@ int dsa_slave_create(struct dsa_port *port)
p->xmit = cpu_dp->tag_ops->xmit;
port->slave = slave_dev;
+ rtnl_lock();
+ ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
+ rtnl_unlock();
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(ds->dev, "error %d setting MTU on port %d\n",
+ ret, port->index);
+ goto out_free;
+ }
+
netif_carrier_off(slave_dev);
ret = dsa_slave_phy_setup(slave_dev);
@@ -1481,6 +1838,8 @@ static int dsa_slave_changeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking) {
err = dsa_port_bridge_join(dp, info->upper_dev);
+ if (!err)
+ dsa_bridge_mtu_normalization(dp);
err = notifier_from_errno(err);
} else {
dsa_port_bridge_leave(dp, info->upper_dev);
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index df4abe897ed6..f3c32ff552b3 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -52,6 +52,40 @@ static int dsa_switch_ageing_time(struct dsa_switch *ds,
return 0;
}
+static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
+ struct dsa_notifier_mtu_info *info)
+{
+ if (ds->index == info->sw_index)
+ return (port == info->port) || dsa_is_dsa_port(ds, port);
+
+ if (!info->propagate_upstream)
+ return false;
+
+ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ return true;
+
+ return false;
+}
+
+static int dsa_switch_mtu(struct dsa_switch *ds,
+ struct dsa_notifier_mtu_info *info)
+{
+ int port, ret;
+
+ if (!ds->ops->port_change_mtu)
+ return -EOPNOTSUPP;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_switch_mtu_match(ds, port, info)) {
+ ret = ds->ops->port_change_mtu(ds, port, info->mtu);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int dsa_switch_bridge_join(struct dsa_switch *ds,
struct dsa_notifier_bridge_info *info)
{
@@ -328,6 +362,9 @@ static int dsa_switch_event(struct notifier_block *nb,
case DSA_NOTIFIER_VLAN_DEL:
err = dsa_switch_vlan_del(ds, info);
break;
+ case DSA_NOTIFIER_MTU:
+ err = dsa_switch_mtu(ds, info);
+ break;
default:
err = -EOPNOTSUPP;
break;
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index 9169b63a89e3..cc8512b5f9e2 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -144,6 +144,27 @@ static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb,
return skb;
}
+
+static int brcm_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
+ int *offset)
+{
+ /* We have been called on the DSA master network device after
+ * eth_type_trans() which pulled the Ethernet header already.
+ * Frames have one of these two layouts:
+ * -----------------------------------
+ * | MAC DA | MAC SA | 4b tag | Type | DSA_TAG_PROTO_BRCM
+ * -----------------------------------
+ * -----------------------------------
+ * | 4b tag | MAC DA | MAC SA | Type | DSA_TAG_PROTO_BRCM_PREPEND
+ * -----------------------------------
+ * skb->data points 2 bytes before the actual Ethernet type field and
+ * we have an offset of 4bytes between where skb->data and where the
+ * payload starts.
+ */
+ *offset = BRCM_TAG_LEN;
+ *proto = ((__be16 *)skb->data)[1];
+ return 0;
+}
#endif
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM)
@@ -179,6 +200,7 @@ static const struct dsa_device_ops brcm_netdev_ops = {
.xmit = brcm_tag_xmit,
.rcv = brcm_tag_rcv,
.overhead = BRCM_TAG_LEN,
+ .flow_dissect = brcm_tag_flow_dissect,
};
DSA_TAG_DRIVER(brcm_netdev_ops);
@@ -207,6 +229,7 @@ static const struct dsa_device_ops brcm_prepend_netdev_ops = {
.xmit = brcm_tag_xmit_prepend,
.rcv = brcm_tag_rcv_prepend,
.overhead = BRCM_TAG_LEN,
+ .flow_dissect = brcm_tag_flow_dissect,
};
DSA_TAG_DRIVER(brcm_prepend_netdev_ops);
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index 8e3e7283d430..59de1315100f 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -153,7 +153,8 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
memset(injection, 0, OCELOT_TAG_LEN);
- src = dsa_upstream_port(ds, port);
+ /* Set the source port as the CPU port module and not the NPI port */
+ src = ocelot->num_phys_ports;
dest = BIT(port);
bypass = true;
qos_class = skb->priority;
diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile
index 424545a4aaec..6c360c9c9370 100644
--- a/net/ethtool/Makefile
+++ b/net/ethtool/Makefile
@@ -5,4 +5,5 @@ obj-y += ioctl.o common.o
obj-$(CONFIG_ETHTOOL_NETLINK) += ethtool_nl.o
ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o \
- linkstate.o debug.o wol.o
+ linkstate.o debug.o wol.o features.o privflags.o rings.o \
+ channels.o coalesce.o pause.o eee.o tsinfo.o
diff --git a/net/ethtool/bitset.c b/net/ethtool/bitset.c
index ef9197541cb3..dae7402eaca3 100644
--- a/net/ethtool/bitset.c
+++ b/net/ethtool/bitset.c
@@ -588,6 +588,100 @@ int ethnl_update_bitset32(u32 *bitmap, unsigned int nbits,
return 0;
}
+/**
+ * ethnl_parse_bitset() - Compute effective value and mask from bitset nest
+ * @val: unsigned long based bitmap to put value into
+ * @mask: unsigned long based bitmap to put mask into
+ * @nbits: size of @val and @mask bitmaps
+ * @attr: nest attribute to parse and apply
+ * @names: array of bit names; may be null for compact format
+ * @extack: extack for error reporting
+ *
+ * Provide @nbits size long bitmaps for value and mask so that
+ * x = (val & mask) | (x & ~mask) would modify any @nbits sized bitmap x
+ * the same way ethnl_update_bitset() with the same bitset attribute would.
+ *
+ * Return: negative error code on failure, 0 on success
+ */
+int ethnl_parse_bitset(unsigned long *val, unsigned long *mask,
+ unsigned int nbits, const struct nlattr *attr,
+ ethnl_string_array_t names,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[ETHTOOL_A_BITSET_MAX + 1];
+ const struct nlattr *bit_attr;
+ bool no_mask;
+ int rem;
+ int ret;
+
+ if (!attr)
+ return 0;
+ ret = nla_parse_nested(tb, ETHTOOL_A_BITSET_MAX, attr, bitset_policy,
+ extack);
+ if (ret < 0)
+ return ret;
+ no_mask = tb[ETHTOOL_A_BITSET_NOMASK];
+
+ if (!tb[ETHTOOL_A_BITSET_BITS]) {
+ unsigned int change_bits;
+
+ ret = ethnl_compact_sanity_checks(nbits, attr, tb, extack);
+ if (ret < 0)
+ return ret;
+
+ change_bits = nla_get_u32(tb[ETHTOOL_A_BITSET_SIZE]);
+ bitmap_from_arr32(val, nla_data(tb[ETHTOOL_A_BITSET_VALUE]),
+ change_bits);
+ if (change_bits < nbits)
+ bitmap_clear(val, change_bits, nbits - change_bits);
+ if (no_mask) {
+ bitmap_fill(mask, nbits);
+ } else {
+ bitmap_from_arr32(mask,
+ nla_data(tb[ETHTOOL_A_BITSET_MASK]),
+ change_bits);
+ if (change_bits < nbits)
+ bitmap_clear(mask, change_bits,
+ nbits - change_bits);
+ }
+
+ return 0;
+ }
+
+ if (tb[ETHTOOL_A_BITSET_VALUE]) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_BITSET_VALUE],
+ "value only allowed in compact bitset");
+ return -EINVAL;
+ }
+ if (tb[ETHTOOL_A_BITSET_MASK]) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_BITSET_MASK],
+ "mask only allowed in compact bitset");
+ return -EINVAL;
+ }
+
+ bitmap_zero(val, nbits);
+ if (no_mask)
+ bitmap_fill(mask, nbits);
+ else
+ bitmap_zero(mask, nbits);
+
+ nla_for_each_nested(bit_attr, tb[ETHTOOL_A_BITSET_BITS], rem) {
+ unsigned int idx;
+ bool bit_val;
+
+ ret = ethnl_parse_bit(&idx, &bit_val, nbits, bit_attr, no_mask,
+ names, extack);
+ if (ret < 0)
+ return ret;
+ if (bit_val)
+ __set_bit(idx, val);
+ if (!no_mask)
+ __set_bit(idx, mask);
+ }
+
+ return 0;
+}
+
#if BITS_PER_LONG == 64 && defined(__BIG_ENDIAN)
/* 64-bit big endian architectures are the only case when u32 based bitmaps
diff --git a/net/ethtool/bitset.h b/net/ethtool/bitset.h
index b849f9d19676..c2c2e0051d00 100644
--- a/net/ethtool/bitset.h
+++ b/net/ethtool/bitset.h
@@ -26,5 +26,9 @@ int ethnl_update_bitset(unsigned long *bitmap, unsigned int nbits,
int ethnl_update_bitset32(u32 *bitmap, unsigned int nbits,
const struct nlattr *attr, ethnl_string_array_t names,
struct netlink_ext_ack *extack, bool *mod);
+int ethnl_parse_bitset(unsigned long *val, unsigned long *mask,
+ unsigned int nbits, const struct nlattr *attr,
+ ethnl_string_array_t names,
+ struct netlink_ext_ack *extack);
#endif /* _NET_ETHTOOL_BITSET_H */
diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c
new file mode 100644
index 000000000000..389924b65d05
--- /dev/null
+++ b/net/ethtool/channels.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <net/xdp_sock.h>
+
+#include "netlink.h"
+#include "common.h"
+
+struct channels_req_info {
+ struct ethnl_req_info base;
+};
+
+struct channels_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_channels channels;
+};
+
+#define CHANNELS_REPDATA(__reply_base) \
+ container_of(__reply_base, struct channels_reply_data, base)
+
+static const struct nla_policy
+channels_get_policy[ETHTOOL_A_CHANNELS_MAX + 1] = {
+ [ETHTOOL_A_CHANNELS_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_CHANNELS_RX_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_TX_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_OTHER_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_COMBINED_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_RX_COUNT] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_TX_COUNT] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_OTHER_COUNT] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_COMBINED_COUNT] = { .type = NLA_REJECT },
+};
+
+static int channels_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct channels_reply_data *data = CHANNELS_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ int ret;
+
+ if (!dev->ethtool_ops->get_channels)
+ return -EOPNOTSUPP;
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+ dev->ethtool_ops->get_channels(dev, &data->channels);
+ ethnl_ops_complete(dev);
+
+ return 0;
+}
+
+static int channels_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ return nla_total_size(sizeof(u32)) + /* _CHANNELS_RX_MAX */
+ nla_total_size(sizeof(u32)) + /* _CHANNELS_TX_MAX */
+ nla_total_size(sizeof(u32)) + /* _CHANNELS_OTHER_MAX */
+ nla_total_size(sizeof(u32)) + /* _CHANNELS_COMBINED_MAX */
+ nla_total_size(sizeof(u32)) + /* _CHANNELS_RX_COUNT */
+ nla_total_size(sizeof(u32)) + /* _CHANNELS_TX_COUNT */
+ nla_total_size(sizeof(u32)) + /* _CHANNELS_OTHER_COUNT */
+ nla_total_size(sizeof(u32)); /* _CHANNELS_COMBINED_COUNT */
+}
+
+static int channels_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct channels_reply_data *data = CHANNELS_REPDATA(reply_base);
+ const struct ethtool_channels *channels = &data->channels;
+
+ if ((channels->max_rx &&
+ (nla_put_u32(skb, ETHTOOL_A_CHANNELS_RX_MAX,
+ channels->max_rx) ||
+ nla_put_u32(skb, ETHTOOL_A_CHANNELS_RX_COUNT,
+ channels->rx_count))) ||
+ (channels->max_tx &&
+ (nla_put_u32(skb, ETHTOOL_A_CHANNELS_TX_MAX,
+ channels->max_tx) ||
+ nla_put_u32(skb, ETHTOOL_A_CHANNELS_TX_COUNT,
+ channels->tx_count))) ||
+ (channels->max_other &&
+ (nla_put_u32(skb, ETHTOOL_A_CHANNELS_OTHER_MAX,
+ channels->max_other) ||
+ nla_put_u32(skb, ETHTOOL_A_CHANNELS_OTHER_COUNT,
+ channels->other_count))) ||
+ (channels->max_combined &&
+ (nla_put_u32(skb, ETHTOOL_A_CHANNELS_COMBINED_MAX,
+ channels->max_combined) ||
+ nla_put_u32(skb, ETHTOOL_A_CHANNELS_COMBINED_COUNT,
+ channels->combined_count))))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+const struct ethnl_request_ops ethnl_channels_request_ops = {
+ .request_cmd = ETHTOOL_MSG_CHANNELS_GET,
+ .reply_cmd = ETHTOOL_MSG_CHANNELS_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_CHANNELS_HEADER,
+ .max_attr = ETHTOOL_A_CHANNELS_MAX,
+ .req_info_size = sizeof(struct channels_req_info),
+ .reply_data_size = sizeof(struct channels_reply_data),
+ .request_policy = channels_get_policy,
+
+ .prepare_data = channels_prepare_data,
+ .reply_size = channels_reply_size,
+ .fill_reply = channels_fill_reply,
+};
+
+/* CHANNELS_SET */
+
+static const struct nla_policy
+channels_set_policy[ETHTOOL_A_CHANNELS_MAX + 1] = {
+ [ETHTOOL_A_CHANNELS_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_CHANNELS_RX_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_TX_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_OTHER_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_COMBINED_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_CHANNELS_RX_COUNT] = { .type = NLA_U32 },
+ [ETHTOOL_A_CHANNELS_TX_COUNT] = { .type = NLA_U32 },
+ [ETHTOOL_A_CHANNELS_OTHER_COUNT] = { .type = NLA_U32 },
+ [ETHTOOL_A_CHANNELS_COMBINED_COUNT] = { .type = NLA_U32 },
+};
+
+int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *tb[ETHTOOL_A_CHANNELS_MAX + 1];
+ unsigned int from_channel, old_total, i;
+ struct ethtool_channels channels = {};
+ struct ethnl_req_info req_info = {};
+ const struct nlattr *err_attr;
+ const struct ethtool_ops *ops;
+ struct net_device *dev;
+ u32 max_rx_in_use = 0;
+ bool mod = false;
+ int ret;
+
+ ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
+ ETHTOOL_A_CHANNELS_MAX, channels_set_policy,
+ info->extack);
+ if (ret < 0)
+ return ret;
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_CHANNELS_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+ dev = req_info.dev;
+ ops = dev->ethtool_ops;
+ ret = -EOPNOTSUPP;
+ if (!ops->get_channels || !ops->set_channels)
+ goto out_dev;
+
+ rtnl_lock();
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto out_rtnl;
+ ops->get_channels(dev, &channels);
+ old_total = channels.combined_count +
+ max(channels.rx_count, channels.tx_count);
+
+ ethnl_update_u32(&channels.rx_count, tb[ETHTOOL_A_CHANNELS_RX_COUNT],
+ &mod);
+ ethnl_update_u32(&channels.tx_count, tb[ETHTOOL_A_CHANNELS_TX_COUNT],
+ &mod);
+ ethnl_update_u32(&channels.other_count,
+ tb[ETHTOOL_A_CHANNELS_OTHER_COUNT], &mod);
+ ethnl_update_u32(&channels.combined_count,
+ tb[ETHTOOL_A_CHANNELS_COMBINED_COUNT], &mod);
+ ret = 0;
+ if (!mod)
+ goto out_ops;
+
+ /* ensure new channel counts are within limits */
+ if (channels.rx_count > channels.max_rx)
+ err_attr = tb[ETHTOOL_A_CHANNELS_RX_COUNT];
+ else if (channels.tx_count > channels.max_tx)
+ err_attr = tb[ETHTOOL_A_CHANNELS_TX_COUNT];
+ else if (channels.other_count > channels.max_other)
+ err_attr = tb[ETHTOOL_A_CHANNELS_OTHER_COUNT];
+ else if (channels.combined_count > channels.max_combined)
+ err_attr = tb[ETHTOOL_A_CHANNELS_COMBINED_COUNT];
+ else
+ err_attr = NULL;
+ if (err_attr) {
+ ret = -EINVAL;
+ NL_SET_ERR_MSG_ATTR(info->extack, err_attr,
+ "requested channel count exceeds maximum");
+ goto out_ops;
+ }
+
+ /* ensure the new Rx count fits within the configured Rx flow
+ * indirection table settings
+ */
+ if (netif_is_rxfh_configured(dev) &&
+ !ethtool_get_max_rxfh_channel(dev, &max_rx_in_use) &&
+ (channels.combined_count + channels.rx_count) <= max_rx_in_use) {
+ GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing indirection table settings");
+ return -EINVAL;
+ }
+
+ /* Disabling channels, query zero-copy AF_XDP sockets */
+ from_channel = channels.combined_count +
+ min(channels.rx_count, channels.tx_count);
+ for (i = from_channel; i < old_total; i++)
+ if (xdp_get_umem_from_qid(dev, i)) {
+ GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing zerocopy AF_XDP sockets");
+ return -EINVAL;
+ }
+
+ ret = dev->ethtool_ops->set_channels(dev, &channels);
+ if (ret < 0)
+ goto out_ops;
+ ethtool_notify(dev, ETHTOOL_MSG_CHANNELS_NTF, NULL);
+
+out_ops:
+ ethnl_ops_complete(dev);
+out_rtnl:
+ rtnl_unlock();
+out_dev:
+ dev_put(dev);
+ return ret;
+}
diff --git a/net/ethtool/coalesce.c b/net/ethtool/coalesce.c
new file mode 100644
index 000000000000..6afd99042d67
--- /dev/null
+++ b/net/ethtool/coalesce.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "netlink.h"
+#include "common.h"
+
+struct coalesce_req_info {
+ struct ethnl_req_info base;
+};
+
+struct coalesce_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_coalesce coalesce;
+ u32 supported_params;
+};
+
+#define COALESCE_REPDATA(__reply_base) \
+ container_of(__reply_base, struct coalesce_reply_data, base)
+
+#define __SUPPORTED_OFFSET ETHTOOL_A_COALESCE_RX_USECS
+static u32 attr_to_mask(unsigned int attr_type)
+{
+ return BIT(attr_type - __SUPPORTED_OFFSET);
+}
+
+/* build time check that indices in ethtool_ops::supported_coalesce_params
+ * match corresponding attribute types with an offset
+ */
+#define __CHECK_SUPPORTED_OFFSET(x) \
+ static_assert((ETHTOOL_ ## x) == \
+ BIT((ETHTOOL_A_ ## x) - __SUPPORTED_OFFSET))
+__CHECK_SUPPORTED_OFFSET(COALESCE_RX_USECS);
+__CHECK_SUPPORTED_OFFSET(COALESCE_RX_MAX_FRAMES);
+__CHECK_SUPPORTED_OFFSET(COALESCE_RX_USECS_IRQ);
+__CHECK_SUPPORTED_OFFSET(COALESCE_RX_MAX_FRAMES_IRQ);
+__CHECK_SUPPORTED_OFFSET(COALESCE_TX_USECS);
+__CHECK_SUPPORTED_OFFSET(COALESCE_TX_MAX_FRAMES);
+__CHECK_SUPPORTED_OFFSET(COALESCE_TX_USECS_IRQ);
+__CHECK_SUPPORTED_OFFSET(COALESCE_TX_MAX_FRAMES_IRQ);
+__CHECK_SUPPORTED_OFFSET(COALESCE_STATS_BLOCK_USECS);
+__CHECK_SUPPORTED_OFFSET(COALESCE_USE_ADAPTIVE_RX);
+__CHECK_SUPPORTED_OFFSET(COALESCE_USE_ADAPTIVE_TX);
+__CHECK_SUPPORTED_OFFSET(COALESCE_PKT_RATE_LOW);
+__CHECK_SUPPORTED_OFFSET(COALESCE_RX_USECS_LOW);
+__CHECK_SUPPORTED_OFFSET(COALESCE_RX_MAX_FRAMES_LOW);
+__CHECK_SUPPORTED_OFFSET(COALESCE_TX_USECS_LOW);
+__CHECK_SUPPORTED_OFFSET(COALESCE_TX_MAX_FRAMES_LOW);
+__CHECK_SUPPORTED_OFFSET(COALESCE_PKT_RATE_HIGH);
+__CHECK_SUPPORTED_OFFSET(COALESCE_RX_USECS_HIGH);
+__CHECK_SUPPORTED_OFFSET(COALESCE_RX_MAX_FRAMES_HIGH);
+__CHECK_SUPPORTED_OFFSET(COALESCE_TX_USECS_HIGH);
+__CHECK_SUPPORTED_OFFSET(COALESCE_TX_MAX_FRAMES_HIGH);
+__CHECK_SUPPORTED_OFFSET(COALESCE_RATE_SAMPLE_INTERVAL);
+
+static const struct nla_policy
+coalesce_get_policy[ETHTOOL_A_COALESCE_MAX + 1] = {
+ [ETHTOOL_A_COALESCE_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_COALESCE_RX_USECS] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_RX_MAX_FRAMES] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_RX_USECS_IRQ] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_TX_USECS] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_TX_MAX_FRAMES] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_TX_USECS_IRQ] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_STATS_BLOCK_USECS] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_PKT_RATE_LOW] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_RX_USECS_LOW] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_TX_USECS_LOW] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_PKT_RATE_HIGH] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_RX_USECS_HIGH] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_TX_USECS_HIGH] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL] = { .type = NLA_REJECT },
+};
+
+static int coalesce_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct coalesce_reply_data *data = COALESCE_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ int ret;
+
+ if (!dev->ethtool_ops->get_coalesce)
+ return -EOPNOTSUPP;
+ data->supported_params = dev->ethtool_ops->supported_coalesce_params;
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+ ret = dev->ethtool_ops->get_coalesce(dev, &data->coalesce);
+ ethnl_ops_complete(dev);
+
+ return ret;
+}
+
+static int coalesce_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ return nla_total_size(sizeof(u32)) + /* _RX_USECS */
+ nla_total_size(sizeof(u32)) + /* _RX_MAX_FRAMES */
+ nla_total_size(sizeof(u32)) + /* _RX_USECS_IRQ */
+ nla_total_size(sizeof(u32)) + /* _RX_MAX_FRAMES_IRQ */
+ nla_total_size(sizeof(u32)) + /* _TX_USECS */
+ nla_total_size(sizeof(u32)) + /* _TX_MAX_FRAMES */
+ nla_total_size(sizeof(u32)) + /* _TX_USECS_IRQ */
+ nla_total_size(sizeof(u32)) + /* _TX_MAX_FRAMES_IRQ */
+ nla_total_size(sizeof(u32)) + /* _STATS_BLOCK_USECS */
+ nla_total_size(sizeof(u8)) + /* _USE_ADAPTIVE_RX */
+ nla_total_size(sizeof(u8)) + /* _USE_ADAPTIVE_TX */
+ nla_total_size(sizeof(u32)) + /* _PKT_RATE_LOW */
+ nla_total_size(sizeof(u32)) + /* _RX_USECS_LOW */
+ nla_total_size(sizeof(u32)) + /* _RX_MAX_FRAMES_LOW */
+ nla_total_size(sizeof(u32)) + /* _TX_USECS_LOW */
+ nla_total_size(sizeof(u32)) + /* _TX_MAX_FRAMES_LOW */
+ nla_total_size(sizeof(u32)) + /* _PKT_RATE_HIGH */
+ nla_total_size(sizeof(u32)) + /* _RX_USECS_HIGH */
+ nla_total_size(sizeof(u32)) + /* _RX_MAX_FRAMES_HIGH */
+ nla_total_size(sizeof(u32)) + /* _TX_USECS_HIGH */
+ nla_total_size(sizeof(u32)) + /* _TX_MAX_FRAMES_HIGH */
+ nla_total_size(sizeof(u32)); /* _RATE_SAMPLE_INTERVAL */
+}
+
+static bool coalesce_put_u32(struct sk_buff *skb, u16 attr_type, u32 val,
+ u32 supported_params)
+{
+ if (!val && !(supported_params & attr_to_mask(attr_type)))
+ return false;
+ return nla_put_u32(skb, attr_type, val);
+}
+
+static bool coalesce_put_bool(struct sk_buff *skb, u16 attr_type, u32 val,
+ u32 supported_params)
+{
+ if (!val && !(supported_params & attr_to_mask(attr_type)))
+ return false;
+ return nla_put_u8(skb, attr_type, !!val);
+}
+
+static int coalesce_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct coalesce_reply_data *data = COALESCE_REPDATA(reply_base);
+ const struct ethtool_coalesce *coal = &data->coalesce;
+ u32 supported = data->supported_params;
+
+ if (coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_USECS,
+ coal->rx_coalesce_usecs, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_MAX_FRAMES,
+ coal->rx_max_coalesced_frames, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_USECS_IRQ,
+ coal->rx_coalesce_usecs_irq, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ,
+ coal->rx_max_coalesced_frames_irq, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_USECS,
+ coal->tx_coalesce_usecs, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_MAX_FRAMES,
+ coal->tx_max_coalesced_frames, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_USECS_IRQ,
+ coal->tx_coalesce_usecs_irq, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ,
+ coal->tx_max_coalesced_frames_irq, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_STATS_BLOCK_USECS,
+ coal->stats_block_coalesce_usecs, supported) ||
+ coalesce_put_bool(skb, ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX,
+ coal->use_adaptive_rx_coalesce, supported) ||
+ coalesce_put_bool(skb, ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX,
+ coal->use_adaptive_tx_coalesce, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_PKT_RATE_LOW,
+ coal->pkt_rate_low, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_USECS_LOW,
+ coal->rx_coalesce_usecs_low, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW,
+ coal->rx_max_coalesced_frames_low, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_USECS_LOW,
+ coal->tx_coalesce_usecs_low, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW,
+ coal->tx_max_coalesced_frames_low, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_PKT_RATE_HIGH,
+ coal->pkt_rate_high, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_USECS_HIGH,
+ coal->rx_coalesce_usecs_high, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH,
+ coal->rx_max_coalesced_frames_high, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_USECS_HIGH,
+ coal->tx_coalesce_usecs_high, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH,
+ coal->tx_max_coalesced_frames_high, supported) ||
+ coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL,
+ coal->rate_sample_interval, supported))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+const struct ethnl_request_ops ethnl_coalesce_request_ops = {
+ .request_cmd = ETHTOOL_MSG_COALESCE_GET,
+ .reply_cmd = ETHTOOL_MSG_COALESCE_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_COALESCE_HEADER,
+ .max_attr = ETHTOOL_A_COALESCE_MAX,
+ .req_info_size = sizeof(struct coalesce_req_info),
+ .reply_data_size = sizeof(struct coalesce_reply_data),
+ .request_policy = coalesce_get_policy,
+
+ .prepare_data = coalesce_prepare_data,
+ .reply_size = coalesce_reply_size,
+ .fill_reply = coalesce_fill_reply,
+};
+
+/* COALESCE_SET */
+
+static const struct nla_policy
+coalesce_set_policy[ETHTOOL_A_COALESCE_MAX + 1] = {
+ [ETHTOOL_A_COALESCE_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_COALESCE_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_COALESCE_RX_USECS] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_RX_MAX_FRAMES] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_RX_USECS_IRQ] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_TX_USECS] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_TX_MAX_FRAMES] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_TX_USECS_IRQ] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_STATS_BLOCK_USECS] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX] = { .type = NLA_U8 },
+ [ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX] = { .type = NLA_U8 },
+ [ETHTOOL_A_COALESCE_PKT_RATE_LOW] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_RX_USECS_LOW] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_TX_USECS_LOW] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_PKT_RATE_HIGH] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_RX_USECS_HIGH] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_TX_USECS_HIGH] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH] = { .type = NLA_U32 },
+ [ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL] = { .type = NLA_U32 },
+};
+
+int ethnl_set_coalesce(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *tb[ETHTOOL_A_COALESCE_MAX + 1];
+ struct ethtool_coalesce coalesce = {};
+ struct ethnl_req_info req_info = {};
+ const struct ethtool_ops *ops;
+ struct net_device *dev;
+ u32 supported_params;
+ bool mod = false;
+ int ret;
+ u16 a;
+
+ ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
+ ETHTOOL_A_COALESCE_MAX, coalesce_set_policy,
+ info->extack);
+ if (ret < 0)
+ return ret;
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_COALESCE_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+ dev = req_info.dev;
+ ops = dev->ethtool_ops;
+ ret = -EOPNOTSUPP;
+ if (!ops->get_coalesce || !ops->set_coalesce)
+ goto out_dev;
+
+ /* make sure that only supported parameters are present */
+ supported_params = ops->supported_coalesce_params;
+ for (a = ETHTOOL_A_COALESCE_RX_USECS; a < __ETHTOOL_A_COALESCE_CNT; a++)
+ if (tb[a] && !(supported_params & attr_to_mask(a))) {
+ ret = -EINVAL;
+ NL_SET_ERR_MSG_ATTR(info->extack, tb[a],
+ "cannot modify an unsupported parameter");
+ goto out_dev;
+ }
+
+ rtnl_lock();
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto out_rtnl;
+ ret = ops->get_coalesce(dev, &coalesce);
+ if (ret < 0)
+ goto out_ops;
+
+ ethnl_update_u32(&coalesce.rx_coalesce_usecs,
+ tb[ETHTOOL_A_COALESCE_RX_USECS], &mod);
+ ethnl_update_u32(&coalesce.rx_max_coalesced_frames,
+ tb[ETHTOOL_A_COALESCE_RX_MAX_FRAMES], &mod);
+ ethnl_update_u32(&coalesce.rx_coalesce_usecs_irq,
+ tb[ETHTOOL_A_COALESCE_RX_USECS_IRQ], &mod);
+ ethnl_update_u32(&coalesce.rx_max_coalesced_frames_irq,
+ tb[ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ], &mod);
+ ethnl_update_u32(&coalesce.tx_coalesce_usecs,
+ tb[ETHTOOL_A_COALESCE_TX_USECS], &mod);
+ ethnl_update_u32(&coalesce.tx_max_coalesced_frames,
+ tb[ETHTOOL_A_COALESCE_TX_MAX_FRAMES], &mod);
+ ethnl_update_u32(&coalesce.tx_coalesce_usecs_irq,
+ tb[ETHTOOL_A_COALESCE_TX_USECS_IRQ], &mod);
+ ethnl_update_u32(&coalesce.tx_max_coalesced_frames_irq,
+ tb[ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ], &mod);
+ ethnl_update_u32(&coalesce.stats_block_coalesce_usecs,
+ tb[ETHTOOL_A_COALESCE_STATS_BLOCK_USECS], &mod);
+ ethnl_update_bool32(&coalesce.use_adaptive_rx_coalesce,
+ tb[ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX], &mod);
+ ethnl_update_bool32(&coalesce.use_adaptive_tx_coalesce,
+ tb[ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX], &mod);
+ ethnl_update_u32(&coalesce.pkt_rate_low,
+ tb[ETHTOOL_A_COALESCE_PKT_RATE_LOW], &mod);
+ ethnl_update_u32(&coalesce.rx_coalesce_usecs_low,
+ tb[ETHTOOL_A_COALESCE_RX_USECS_LOW], &mod);
+ ethnl_update_u32(&coalesce.rx_max_coalesced_frames_low,
+ tb[ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW], &mod);
+ ethnl_update_u32(&coalesce.tx_coalesce_usecs_low,
+ tb[ETHTOOL_A_COALESCE_TX_USECS_LOW], &mod);
+ ethnl_update_u32(&coalesce.tx_max_coalesced_frames_low,
+ tb[ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW], &mod);
+ ethnl_update_u32(&coalesce.pkt_rate_high,
+ tb[ETHTOOL_A_COALESCE_PKT_RATE_HIGH], &mod);
+ ethnl_update_u32(&coalesce.rx_coalesce_usecs_high,
+ tb[ETHTOOL_A_COALESCE_RX_USECS_HIGH], &mod);
+ ethnl_update_u32(&coalesce.rx_max_coalesced_frames_high,
+ tb[ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH], &mod);
+ ethnl_update_u32(&coalesce.tx_coalesce_usecs_high,
+ tb[ETHTOOL_A_COALESCE_TX_USECS_HIGH], &mod);
+ ethnl_update_u32(&coalesce.tx_max_coalesced_frames_high,
+ tb[ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH], &mod);
+ ethnl_update_u32(&coalesce.rate_sample_interval,
+ tb[ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL], &mod);
+ ret = 0;
+ if (!mod)
+ goto out_ops;
+
+ ret = dev->ethtool_ops->set_coalesce(dev, &coalesce);
+ if (ret < 0)
+ goto out_ops;
+ ethtool_notify(dev, ETHTOOL_MSG_COALESCE_NTF, NULL);
+
+out_ops:
+ ethnl_ops_complete(dev);
+out_rtnl:
+ rtnl_unlock();
+out_dev:
+ dev_put(dev);
+ return ret;
+}
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index 636ec6d5110e..423e640e3876 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+
#include "common.h"
const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
@@ -60,6 +63,7 @@ const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
[NETIF_F_HW_TLS_TX_BIT] = "tls-hw-tx-offload",
[NETIF_F_HW_TLS_RX_BIT] = "tls-hw-rx-offload",
[NETIF_F_GRO_FRAGLIST_BIT] = "rx-gro-list",
+ [NETIF_F_HW_MACSEC_BIT] = "macsec-hw-offload",
};
const char
@@ -168,6 +172,7 @@ const char link_mode_names[][ETH_GSTRING_LEN] = {
__DEFINE_LINK_MODE_NAME(400000, LR8_ER8_FR8, Full),
__DEFINE_LINK_MODE_NAME(400000, DR8, Full),
__DEFINE_LINK_MODE_NAME(400000, CR8, Full),
+ __DEFINE_SPECIAL_MODE_NAME(FEC_LLRS, "LLRS"),
};
static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -202,6 +207,53 @@ const char wol_mode_names[][ETH_GSTRING_LEN] = {
};
static_assert(ARRAY_SIZE(wol_mode_names) == WOL_MODE_COUNT);
+const char sof_timestamping_names[][ETH_GSTRING_LEN] = {
+ [const_ilog2(SOF_TIMESTAMPING_TX_HARDWARE)] = "hardware-transmit",
+ [const_ilog2(SOF_TIMESTAMPING_TX_SOFTWARE)] = "software-transmit",
+ [const_ilog2(SOF_TIMESTAMPING_RX_HARDWARE)] = "hardware-receive",
+ [const_ilog2(SOF_TIMESTAMPING_RX_SOFTWARE)] = "software-receive",
+ [const_ilog2(SOF_TIMESTAMPING_SOFTWARE)] = "software-system-clock",
+ [const_ilog2(SOF_TIMESTAMPING_SYS_HARDWARE)] = "hardware-legacy-clock",
+ [const_ilog2(SOF_TIMESTAMPING_RAW_HARDWARE)] = "hardware-raw-clock",
+ [const_ilog2(SOF_TIMESTAMPING_OPT_ID)] = "option-id",
+ [const_ilog2(SOF_TIMESTAMPING_TX_SCHED)] = "sched-transmit",
+ [const_ilog2(SOF_TIMESTAMPING_TX_ACK)] = "ack-transmit",
+ [const_ilog2(SOF_TIMESTAMPING_OPT_CMSG)] = "option-cmsg",
+ [const_ilog2(SOF_TIMESTAMPING_OPT_TSONLY)] = "option-tsonly",
+ [const_ilog2(SOF_TIMESTAMPING_OPT_STATS)] = "option-stats",
+ [const_ilog2(SOF_TIMESTAMPING_OPT_PKTINFO)] = "option-pktinfo",
+ [const_ilog2(SOF_TIMESTAMPING_OPT_TX_SWHW)] = "option-tx-swhw",
+};
+static_assert(ARRAY_SIZE(sof_timestamping_names) == __SOF_TIMESTAMPING_CNT);
+
+const char ts_tx_type_names[][ETH_GSTRING_LEN] = {
+ [HWTSTAMP_TX_OFF] = "off",
+ [HWTSTAMP_TX_ON] = "on",
+ [HWTSTAMP_TX_ONESTEP_SYNC] = "onestep-sync",
+ [HWTSTAMP_TX_ONESTEP_P2P] = "onestep-p2p",
+};
+static_assert(ARRAY_SIZE(ts_tx_type_names) == __HWTSTAMP_TX_CNT);
+
+const char ts_rx_filter_names[][ETH_GSTRING_LEN] = {
+ [HWTSTAMP_FILTER_NONE] = "none",
+ [HWTSTAMP_FILTER_ALL] = "all",
+ [HWTSTAMP_FILTER_SOME] = "some",
+ [HWTSTAMP_FILTER_PTP_V1_L4_EVENT] = "ptpv1-l4-event",
+ [HWTSTAMP_FILTER_PTP_V1_L4_SYNC] = "ptpv1-l4-sync",
+ [HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ] = "ptpv1-l4-delay-req",
+ [HWTSTAMP_FILTER_PTP_V2_L4_EVENT] = "ptpv2-l4-event",
+ [HWTSTAMP_FILTER_PTP_V2_L4_SYNC] = "ptpv2-l4-sync",
+ [HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ] = "ptpv2-l4-delay-req",
+ [HWTSTAMP_FILTER_PTP_V2_L2_EVENT] = "ptpv2-l2-event",
+ [HWTSTAMP_FILTER_PTP_V2_L2_SYNC] = "ptpv2-l2-sync",
+ [HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ] = "ptpv2-l2-delay-req",
+ [HWTSTAMP_FILTER_PTP_V2_EVENT] = "ptpv2-event",
+ [HWTSTAMP_FILTER_PTP_V2_SYNC] = "ptpv2-sync",
+ [HWTSTAMP_FILTER_PTP_V2_DELAY_REQ] = "ptpv2-delay-req",
+ [HWTSTAMP_FILTER_NTP_ALL] = "ntp-all",
+};
+static_assert(ARRAY_SIZE(ts_rx_filter_names) == __HWTSTAMP_FILTER_CNT);
+
/* return false if legacy contained non-0 deprecated fields
* maxtxpkt/maxrxpkt. rest of ksettings always updated
*/
@@ -257,3 +309,65 @@ int __ethtool_get_link(struct net_device *dev)
return netif_running(dev) && dev->ethtool_ops->get_link(dev);
}
+
+int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max)
+{
+ u32 dev_size, current_max = 0;
+ u32 *indir;
+ int ret;
+
+ if (!dev->ethtool_ops->get_rxfh_indir_size ||
+ !dev->ethtool_ops->get_rxfh)
+ return -EOPNOTSUPP;
+ dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
+ if (dev_size == 0)
+ return -EOPNOTSUPP;
+
+ indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
+ if (!indir)
+ return -ENOMEM;
+
+ ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL, NULL);
+ if (ret)
+ goto out;
+
+ while (dev_size--)
+ current_max = max(current_max, indir[dev_size]);
+
+ *max = current_max;
+
+out:
+ kfree(indir);
+ return ret;
+}
+
+int ethtool_check_ops(const struct ethtool_ops *ops)
+{
+ if (WARN_ON(ops->set_coalesce && !ops->supported_coalesce_params))
+ return -EINVAL;
+ /* NOTE: sufficiently insane drivers may swap ethtool_ops at runtime,
+ * the fact that ops are checked at registration time does not
+ * mean the ops attached to a netdev later on are sane.
+ */
+ return 0;
+}
+
+int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct phy_device *phydev = dev->phydev;
+
+ memset(info, 0, sizeof(*info));
+ info->cmd = ETHTOOL_GET_TS_INFO;
+
+ if (phy_has_tsinfo(phydev))
+ return phy_ts_info(phydev, info);
+ if (ops->get_ts_info)
+ return ops->get_ts_info(dev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+
+ return 0;
+}
diff --git a/net/ethtool/common.h b/net/ethtool/common.h
index 40ba74e0b9bb..a62f68ccc43a 100644
--- a/net/ethtool/common.h
+++ b/net/ethtool/common.h
@@ -6,10 +6,14 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
+#define ETHTOOL_DEV_FEATURE_WORDS DIV_ROUND_UP(NETDEV_FEATURE_COUNT, 32)
+
/* compose link mode index from speed, type and duplex */
#define ETHTOOL_LINK_MODE(speed, type, duplex) \
ETHTOOL_LINK_MODE_ ## speed ## base ## type ## _ ## duplex ## _BIT
+#define __SOF_TIMESTAMPING_CNT (const_ilog2(SOF_TIMESTAMPING_LAST) + 1)
+
extern const char
netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN];
extern const char
@@ -21,11 +25,16 @@ phy_tunable_strings[__ETHTOOL_PHY_TUNABLE_COUNT][ETH_GSTRING_LEN];
extern const char link_mode_names[][ETH_GSTRING_LEN];
extern const char netif_msg_class_names[][ETH_GSTRING_LEN];
extern const char wol_mode_names[][ETH_GSTRING_LEN];
+extern const char sof_timestamping_names[][ETH_GSTRING_LEN];
+extern const char ts_tx_type_names[][ETH_GSTRING_LEN];
+extern const char ts_rx_filter_names[][ETH_GSTRING_LEN];
int __ethtool_get_link(struct net_device *dev);
bool convert_legacy_settings_to_link_ksettings(
struct ethtool_link_ksettings *link_ksettings,
const struct ethtool_cmd *legacy_settings);
+int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max);
+int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
#endif /* _ETHTOOL_COMMON_H */
diff --git a/net/ethtool/debug.c b/net/ethtool/debug.c
index 92599ad7b3c2..1bd026a29f3f 100644
--- a/net/ethtool/debug.c
+++ b/net/ethtool/debug.c
@@ -102,8 +102,10 @@ int ethnl_set_debug(struct sk_buff *skb, struct genl_info *info)
info->extack);
if (ret < 0)
return ret;
- ret = ethnl_parse_header(&req_info, tb[ETHTOOL_A_DEBUG_HEADER],
- genl_info_net(info), info->extack, true);
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_DEBUG_HEADER],
+ genl_info_net(info), info->extack,
+ true);
if (ret < 0)
return ret;
dev = req_info.dev;
diff --git a/net/ethtool/eee.c b/net/ethtool/eee.c
new file mode 100644
index 000000000000..94aa19cff22f
--- /dev/null
+++ b/net/ethtool/eee.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "netlink.h"
+#include "common.h"
+#include "bitset.h"
+
+#define EEE_MODES_COUNT \
+ (sizeof_field(struct ethtool_eee, supported) * BITS_PER_BYTE)
+
+struct eee_req_info {
+ struct ethnl_req_info base;
+};
+
+struct eee_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_eee eee;
+};
+
+#define EEE_REPDATA(__reply_base) \
+ container_of(__reply_base, struct eee_reply_data, base)
+
+static const struct nla_policy
+eee_get_policy[ETHTOOL_A_EEE_MAX + 1] = {
+ [ETHTOOL_A_EEE_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_EEE_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_EEE_MODES_OURS] = { .type = NLA_REJECT },
+ [ETHTOOL_A_EEE_MODES_PEER] = { .type = NLA_REJECT },
+ [ETHTOOL_A_EEE_ACTIVE] = { .type = NLA_REJECT },
+ [ETHTOOL_A_EEE_ENABLED] = { .type = NLA_REJECT },
+ [ETHTOOL_A_EEE_TX_LPI_ENABLED] = { .type = NLA_REJECT },
+ [ETHTOOL_A_EEE_TX_LPI_TIMER] = { .type = NLA_REJECT },
+};
+
+static int eee_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct eee_reply_data *data = EEE_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ int ret;
+
+ if (!dev->ethtool_ops->get_eee)
+ return -EOPNOTSUPP;
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+ ret = dev->ethtool_ops->get_eee(dev, &data->eee);
+ ethnl_ops_complete(dev);
+
+ return ret;
+}
+
+static int eee_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+ const struct eee_reply_data *data = EEE_REPDATA(reply_base);
+ const struct ethtool_eee *eee = &data->eee;
+ int len = 0;
+ int ret;
+
+ BUILD_BUG_ON(sizeof(eee->advertised) * BITS_PER_BYTE !=
+ EEE_MODES_COUNT);
+ BUILD_BUG_ON(sizeof(eee->lp_advertised) * BITS_PER_BYTE !=
+ EEE_MODES_COUNT);
+
+ /* MODES_OURS */
+ ret = ethnl_bitset32_size(&eee->advertised, &eee->supported,
+ EEE_MODES_COUNT, link_mode_names, compact);
+ if (ret < 0)
+ return ret;
+ len += ret;
+ /* MODES_PEERS */
+ ret = ethnl_bitset32_size(&eee->lp_advertised, NULL,
+ EEE_MODES_COUNT, link_mode_names, compact);
+ if (ret < 0)
+ return ret;
+ len += ret;
+
+ len += nla_total_size(sizeof(u8)) + /* _EEE_ACTIVE */
+ nla_total_size(sizeof(u8)) + /* _EEE_ENABLED */
+ nla_total_size(sizeof(u8)) + /* _EEE_TX_LPI_ENABLED */
+ nla_total_size(sizeof(u32)); /* _EEE_TX_LPI_TIMER */
+
+ return len;
+}
+
+static int eee_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+ const struct eee_reply_data *data = EEE_REPDATA(reply_base);
+ const struct ethtool_eee *eee = &data->eee;
+ int ret;
+
+ ret = ethnl_put_bitset32(skb, ETHTOOL_A_EEE_MODES_OURS,
+ &eee->advertised, &eee->supported,
+ EEE_MODES_COUNT, link_mode_names, compact);
+ if (ret < 0)
+ return ret;
+ ret = ethnl_put_bitset32(skb, ETHTOOL_A_EEE_MODES_PEER,
+ &eee->lp_advertised, NULL, EEE_MODES_COUNT,
+ link_mode_names, compact);
+ if (ret < 0)
+ return ret;
+
+ if (nla_put_u8(skb, ETHTOOL_A_EEE_ACTIVE, !!eee->eee_active) ||
+ nla_put_u8(skb, ETHTOOL_A_EEE_ENABLED, !!eee->eee_enabled) ||
+ nla_put_u8(skb, ETHTOOL_A_EEE_TX_LPI_ENABLED,
+ !!eee->tx_lpi_enabled) ||
+ nla_put_u32(skb, ETHTOOL_A_EEE_TX_LPI_TIMER, eee->tx_lpi_timer))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+const struct ethnl_request_ops ethnl_eee_request_ops = {
+ .request_cmd = ETHTOOL_MSG_EEE_GET,
+ .reply_cmd = ETHTOOL_MSG_EEE_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_EEE_HEADER,
+ .max_attr = ETHTOOL_A_EEE_MAX,
+ .req_info_size = sizeof(struct eee_req_info),
+ .reply_data_size = sizeof(struct eee_reply_data),
+ .request_policy = eee_get_policy,
+
+ .prepare_data = eee_prepare_data,
+ .reply_size = eee_reply_size,
+ .fill_reply = eee_fill_reply,
+};
+
+/* EEE_SET */
+
+static const struct nla_policy
+eee_set_policy[ETHTOOL_A_EEE_MAX + 1] = {
+ [ETHTOOL_A_EEE_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_EEE_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_EEE_MODES_OURS] = { .type = NLA_NESTED },
+ [ETHTOOL_A_EEE_MODES_PEER] = { .type = NLA_REJECT },
+ [ETHTOOL_A_EEE_ACTIVE] = { .type = NLA_REJECT },
+ [ETHTOOL_A_EEE_ENABLED] = { .type = NLA_U8 },
+ [ETHTOOL_A_EEE_TX_LPI_ENABLED] = { .type = NLA_U8 },
+ [ETHTOOL_A_EEE_TX_LPI_TIMER] = { .type = NLA_U32 },
+};
+
+int ethnl_set_eee(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *tb[ETHTOOL_A_EEE_MAX + 1];
+ struct ethtool_eee eee = {};
+ struct ethnl_req_info req_info = {};
+ const struct ethtool_ops *ops;
+ struct net_device *dev;
+ bool mod = false;
+ int ret;
+
+ ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb, ETHTOOL_A_EEE_MAX,
+ eee_set_policy, info->extack);
+ if (ret < 0)
+ return ret;
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_EEE_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+ dev = req_info.dev;
+ ops = dev->ethtool_ops;
+ ret = -EOPNOTSUPP;
+ if (!ops->get_eee || !ops->set_eee)
+ goto out_dev;
+
+ rtnl_lock();
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto out_rtnl;
+ ret = ops->get_eee(dev, &eee);
+ if (ret < 0)
+ goto out_ops;
+
+ ret = ethnl_update_bitset32(&eee.advertised, EEE_MODES_COUNT,
+ tb[ETHTOOL_A_EEE_MODES_OURS],
+ link_mode_names, info->extack, &mod);
+ if (ret < 0)
+ goto out_ops;
+ ethnl_update_bool32(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod);
+ ethnl_update_bool32(&eee.tx_lpi_enabled,
+ tb[ETHTOOL_A_EEE_TX_LPI_ENABLED], &mod);
+ ethnl_update_bool32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER],
+ &mod);
+ ret = 0;
+ if (!mod)
+ goto out_ops;
+
+ ret = dev->ethtool_ops->set_eee(dev, &eee);
+ if (ret < 0)
+ goto out_ops;
+ ethtool_notify(dev, ETHTOOL_MSG_EEE_NTF, NULL);
+
+out_ops:
+ ethnl_ops_complete(dev);
+out_rtnl:
+ rtnl_unlock();
+out_dev:
+ dev_put(dev);
+ return ret;
+}
diff --git a/net/ethtool/features.c b/net/ethtool/features.c
new file mode 100644
index 000000000000..4e632dc987d8
--- /dev/null
+++ b/net/ethtool/features.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "netlink.h"
+#include "common.h"
+#include "bitset.h"
+
+struct features_req_info {
+ struct ethnl_req_info base;
+};
+
+struct features_reply_data {
+ struct ethnl_reply_data base;
+ u32 hw[ETHTOOL_DEV_FEATURE_WORDS];
+ u32 wanted[ETHTOOL_DEV_FEATURE_WORDS];
+ u32 active[ETHTOOL_DEV_FEATURE_WORDS];
+ u32 nochange[ETHTOOL_DEV_FEATURE_WORDS];
+ u32 all[ETHTOOL_DEV_FEATURE_WORDS];
+};
+
+#define FEATURES_REPDATA(__reply_base) \
+ container_of(__reply_base, struct features_reply_data, base)
+
+static const struct nla_policy
+features_get_policy[ETHTOOL_A_FEATURES_MAX + 1] = {
+ [ETHTOOL_A_FEATURES_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_FEATURES_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_FEATURES_HW] = { .type = NLA_REJECT },
+ [ETHTOOL_A_FEATURES_WANTED] = { .type = NLA_REJECT },
+ [ETHTOOL_A_FEATURES_ACTIVE] = { .type = NLA_REJECT },
+ [ETHTOOL_A_FEATURES_NOCHANGE] = { .type = NLA_REJECT },
+};
+
+static void ethnl_features_to_bitmap32(u32 *dest, netdev_features_t src)
+{
+ unsigned int i;
+
+ for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; i++)
+ dest[i] = src >> (32 * i);
+}
+
+static int features_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct features_reply_data *data = FEATURES_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ netdev_features_t all_features;
+
+ ethnl_features_to_bitmap32(data->hw, dev->hw_features);
+ ethnl_features_to_bitmap32(data->wanted, dev->wanted_features);
+ ethnl_features_to_bitmap32(data->active, dev->features);
+ ethnl_features_to_bitmap32(data->nochange, NETIF_F_NEVER_CHANGE);
+ all_features = GENMASK_ULL(NETDEV_FEATURE_COUNT - 1, 0);
+ ethnl_features_to_bitmap32(data->all, all_features);
+
+ return 0;
+}
+
+static int features_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct features_reply_data *data = FEATURES_REPDATA(reply_base);
+ bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+ unsigned int len = 0;
+ int ret;
+
+ ret = ethnl_bitset32_size(data->hw, data->all, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ return ret;
+ len += ret;
+ ret = ethnl_bitset32_size(data->wanted, NULL, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ return ret;
+ len += ret;
+ ret = ethnl_bitset32_size(data->active, NULL, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ return ret;
+ len += ret;
+ ret = ethnl_bitset32_size(data->nochange, NULL, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ return ret;
+ len += ret;
+
+ return len;
+}
+
+static int features_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct features_reply_data *data = FEATURES_REPDATA(reply_base);
+ bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+ int ret;
+
+ ret = ethnl_put_bitset32(skb, ETHTOOL_A_FEATURES_HW, data->hw,
+ data->all, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ return ret;
+ ret = ethnl_put_bitset32(skb, ETHTOOL_A_FEATURES_WANTED, data->wanted,
+ NULL, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ return ret;
+ ret = ethnl_put_bitset32(skb, ETHTOOL_A_FEATURES_ACTIVE, data->active,
+ NULL, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ return ret;
+ return ethnl_put_bitset32(skb, ETHTOOL_A_FEATURES_NOCHANGE,
+ data->nochange, NULL, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+}
+
+const struct ethnl_request_ops ethnl_features_request_ops = {
+ .request_cmd = ETHTOOL_MSG_FEATURES_GET,
+ .reply_cmd = ETHTOOL_MSG_FEATURES_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_FEATURES_HEADER,
+ .max_attr = ETHTOOL_A_FEATURES_MAX,
+ .req_info_size = sizeof(struct features_req_info),
+ .reply_data_size = sizeof(struct features_reply_data),
+ .request_policy = features_get_policy,
+
+ .prepare_data = features_prepare_data,
+ .reply_size = features_reply_size,
+ .fill_reply = features_fill_reply,
+};
+
+/* FEATURES_SET */
+
+static const struct nla_policy
+features_set_policy[ETHTOOL_A_FEATURES_MAX + 1] = {
+ [ETHTOOL_A_FEATURES_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_FEATURES_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_FEATURES_HW] = { .type = NLA_REJECT },
+ [ETHTOOL_A_FEATURES_WANTED] = { .type = NLA_NESTED },
+ [ETHTOOL_A_FEATURES_ACTIVE] = { .type = NLA_REJECT },
+ [ETHTOOL_A_FEATURES_NOCHANGE] = { .type = NLA_REJECT },
+};
+
+static void ethnl_features_to_bitmap(unsigned long *dest, netdev_features_t val)
+{
+ const unsigned int words = BITS_TO_LONGS(NETDEV_FEATURE_COUNT);
+ unsigned int i;
+
+ bitmap_zero(dest, NETDEV_FEATURE_COUNT);
+ for (i = 0; i < words; i++)
+ dest[i] = (unsigned long)(val >> (i * BITS_PER_LONG));
+}
+
+static netdev_features_t ethnl_bitmap_to_features(unsigned long *src)
+{
+ const unsigned int nft_bits = sizeof(netdev_features_t) * BITS_PER_BYTE;
+ const unsigned int words = BITS_TO_LONGS(NETDEV_FEATURE_COUNT);
+ netdev_features_t ret = 0;
+ unsigned int i;
+
+ for (i = 0; i < words; i++)
+ ret |= (netdev_features_t)(src[i]) << (i * BITS_PER_LONG);
+ ret &= ~(netdev_features_t)0 >> (nft_bits - NETDEV_FEATURE_COUNT);
+ return ret;
+}
+
+static int features_send_reply(struct net_device *dev, struct genl_info *info,
+ const unsigned long *wanted,
+ const unsigned long *wanted_mask,
+ const unsigned long *active,
+ const unsigned long *active_mask, bool compact)
+{
+ struct sk_buff *rskb;
+ void *reply_payload;
+ int reply_len = 0;
+ int ret;
+
+ reply_len = ethnl_reply_header_size();
+ ret = ethnl_bitset_size(wanted, wanted_mask, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ goto err;
+ reply_len += ret;
+ ret = ethnl_bitset_size(active, active_mask, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ goto err;
+ reply_len += ret;
+
+ ret = -ENOMEM;
+ rskb = ethnl_reply_init(reply_len, dev, ETHTOOL_MSG_FEATURES_SET_REPLY,
+ ETHTOOL_A_FEATURES_HEADER, info,
+ &reply_payload);
+ if (!rskb)
+ goto err;
+
+ ret = ethnl_put_bitset(rskb, ETHTOOL_A_FEATURES_WANTED, wanted,
+ wanted_mask, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ goto nla_put_failure;
+ ret = ethnl_put_bitset(rskb, ETHTOOL_A_FEATURES_ACTIVE, active,
+ active_mask, NETDEV_FEATURE_COUNT,
+ netdev_features_strings, compact);
+ if (ret < 0)
+ goto nla_put_failure;
+
+ genlmsg_end(rskb, reply_payload);
+ ret = genlmsg_reply(rskb, info);
+ return ret;
+
+nla_put_failure:
+ nlmsg_free(rskb);
+ WARN_ONCE(1, "calculated message payload length (%d) not sufficient\n",
+ reply_len);
+err:
+ GENL_SET_ERR_MSG(info, "failed to send reply message");
+ return ret;
+}
+
+int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
+{
+ DECLARE_BITMAP(wanted_diff_mask, NETDEV_FEATURE_COUNT);
+ DECLARE_BITMAP(active_diff_mask, NETDEV_FEATURE_COUNT);
+ DECLARE_BITMAP(old_active, NETDEV_FEATURE_COUNT);
+ DECLARE_BITMAP(new_active, NETDEV_FEATURE_COUNT);
+ DECLARE_BITMAP(req_wanted, NETDEV_FEATURE_COUNT);
+ DECLARE_BITMAP(req_mask, NETDEV_FEATURE_COUNT);
+ struct nlattr *tb[ETHTOOL_A_FEATURES_MAX + 1];
+ struct ethnl_req_info req_info = {};
+ struct net_device *dev;
+ bool mod;
+ int ret;
+
+ ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
+ ETHTOOL_A_FEATURES_MAX, features_set_policy,
+ info->extack);
+ if (ret < 0)
+ return ret;
+ if (!tb[ETHTOOL_A_FEATURES_WANTED])
+ return -EINVAL;
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_FEATURES_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+ dev = req_info.dev;
+
+ rtnl_lock();
+ ethnl_features_to_bitmap(old_active, dev->features);
+ ret = ethnl_parse_bitset(req_wanted, req_mask, NETDEV_FEATURE_COUNT,
+ tb[ETHTOOL_A_FEATURES_WANTED],
+ netdev_features_strings, info->extack);
+ if (ret < 0)
+ goto out_rtnl;
+ if (ethnl_bitmap_to_features(req_mask) & ~NETIF_F_ETHTOOL_BITS) {
+ GENL_SET_ERR_MSG(info, "attempt to change non-ethtool features");
+ ret = -EINVAL;
+ goto out_rtnl;
+ }
+
+ /* set req_wanted bits not in req_mask from old_active */
+ bitmap_and(req_wanted, req_wanted, req_mask, NETDEV_FEATURE_COUNT);
+ bitmap_andnot(new_active, old_active, req_mask, NETDEV_FEATURE_COUNT);
+ bitmap_or(req_wanted, new_active, req_wanted, NETDEV_FEATURE_COUNT);
+ if (bitmap_equal(req_wanted, old_active, NETDEV_FEATURE_COUNT)) {
+ ret = 0;
+ goto out_rtnl;
+ }
+
+ dev->wanted_features = ethnl_bitmap_to_features(req_wanted);
+ __netdev_update_features(dev);
+ ethnl_features_to_bitmap(new_active, dev->features);
+ mod = !bitmap_equal(old_active, new_active, NETDEV_FEATURE_COUNT);
+
+ ret = 0;
+ if (!(req_info.flags & ETHTOOL_FLAG_OMIT_REPLY)) {
+ bool compact = req_info.flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+
+ bitmap_xor(wanted_diff_mask, req_wanted, new_active,
+ NETDEV_FEATURE_COUNT);
+ bitmap_xor(active_diff_mask, old_active, new_active,
+ NETDEV_FEATURE_COUNT);
+ bitmap_and(wanted_diff_mask, wanted_diff_mask, req_mask,
+ NETDEV_FEATURE_COUNT);
+ bitmap_and(req_wanted, req_wanted, wanted_diff_mask,
+ NETDEV_FEATURE_COUNT);
+ bitmap_and(new_active, new_active, active_diff_mask,
+ NETDEV_FEATURE_COUNT);
+
+ ret = features_send_reply(dev, info, req_wanted,
+ wanted_diff_mask, new_active,
+ active_diff_mask, compact);
+ }
+ if (mod)
+ ethtool_notify(dev, ETHTOOL_MSG_FEATURES_NTF, NULL);
+
+out_rtnl:
+ rtnl_unlock();
+ dev_put(dev);
+ return ret;
+}
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index b987052d91ef..89d0b1827aaf 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -56,8 +56,6 @@ EXPORT_SYMBOL(ethtool_op_get_ts_info);
/* Handlers for each ethtool command */
-#define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32)
-
static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
{
struct ethtool_gfeatures cmd = {
@@ -198,13 +196,14 @@ static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd)
switch (eth_cmd) {
case ETHTOOL_GTXCSUM:
case ETHTOOL_STXCSUM:
- return NETIF_F_CSUM_MASK | NETIF_F_SCTP_CRC;
+ return NETIF_F_CSUM_MASK | NETIF_F_FCOE_CRC |
+ NETIF_F_SCTP_CRC;
case ETHTOOL_GRXCSUM:
case ETHTOOL_SRXCSUM:
return NETIF_F_RXCSUM;
case ETHTOOL_GSG:
case ETHTOOL_SSG:
- return NETIF_F_SG;
+ return NETIF_F_SG | NETIF_F_FRAGLIST;
case ETHTOOL_GTSO:
case ETHTOOL_STSO:
return NETIF_F_ALL_TSO;
@@ -459,6 +458,24 @@ static int load_link_ksettings_from_user(struct ethtool_link_ksettings *to,
return 0;
}
+/* Check if the user is trying to change anything besides speed/duplex */
+bool ethtool_virtdev_validate_cmd(const struct ethtool_link_ksettings *cmd)
+{
+ struct ethtool_link_settings base2 = {};
+
+ base2.speed = cmd->base.speed;
+ base2.port = PORT_OTHER;
+ base2.duplex = cmd->base.duplex;
+ base2.cmd = cmd->base.cmd;
+ base2.link_mode_masks_nwords = cmd->base.link_mode_masks_nwords;
+
+ return !memcmp(&base2, &cmd->base, sizeof(base2)) &&
+ bitmap_empty(cmd->link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS) &&
+ bitmap_empty(cmd->link_modes.lp_advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
/* convert a kernel internal ethtool_link_ksettings to
* ethtool_link_usettings in user space. return 0 on success, errno on
* error.
@@ -581,6 +598,27 @@ static int ethtool_set_link_ksettings(struct net_device *dev,
return err;
}
+int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd,
+ u32 *dev_speed, u8 *dev_duplex)
+{
+ u32 speed;
+ u8 duplex;
+
+ speed = cmd->base.speed;
+ duplex = cmd->base.duplex;
+ /* don't allow custom speed and duplex */
+ if (!ethtool_validate_speed(speed) ||
+ !ethtool_validate_duplex(duplex) ||
+ !ethtool_virtdev_validate_cmd(cmd))
+ return -EINVAL;
+ *dev_speed = speed;
+ *dev_duplex = duplex;
+
+ return 0;
+}
+EXPORT_SYMBOL(ethtool_virtdev_set_link_ksettings);
+
/* Query device for its ethtool_cmd settings.
*
* Backward compatibility note: for compatibility with legacy ethtool, this is
@@ -891,37 +929,6 @@ void netdev_rss_key_fill(void *buffer, size_t len)
}
EXPORT_SYMBOL(netdev_rss_key_fill);
-static int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max)
-{
- u32 dev_size, current_max = 0;
- u32 *indir;
- int ret;
-
- if (!dev->ethtool_ops->get_rxfh_indir_size ||
- !dev->ethtool_ops->get_rxfh)
- return -EOPNOTSUPP;
- dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
- if (dev_size == 0)
- return -EOPNOTSUPP;
-
- indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
- if (!indir)
- return -ENOMEM;
-
- ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL, NULL);
- if (ret)
- goto out;
-
- while (dev_size--)
- current_max = max(current_max, indir[dev_size]);
-
- *max = current_max;
-
-out:
- kfree(indir);
- return ret;
-}
-
static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
void __user *useraddr)
{
@@ -1347,6 +1354,7 @@ static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
static int ethtool_set_eee(struct net_device *dev, char __user *useraddr)
{
struct ethtool_eee edata;
+ int ret;
if (!dev->ethtool_ops->set_eee)
return -EOPNOTSUPP;
@@ -1354,7 +1362,10 @@ static int ethtool_set_eee(struct net_device *dev, char __user *useraddr)
if (copy_from_user(&edata, useraddr, sizeof(edata)))
return -EFAULT;
- return dev->ethtool_ops->set_eee(dev, &edata);
+ ret = dev->ethtool_ops->set_eee(dev, &edata);
+ if (!ret)
+ ethtool_notify(dev, ETHTOOL_MSG_EEE_NTF, NULL);
+ return ret;
}
static int ethtool_nway_reset(struct net_device *dev)
@@ -1505,10 +1516,66 @@ static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev,
return 0;
}
+static bool
+ethtool_set_coalesce_supported(struct net_device *dev,
+ struct ethtool_coalesce *coalesce)
+{
+ u32 supported_params = dev->ethtool_ops->supported_coalesce_params;
+ u32 nonzero_params = 0;
+
+ if (coalesce->rx_coalesce_usecs)
+ nonzero_params |= ETHTOOL_COALESCE_RX_USECS;
+ if (coalesce->rx_max_coalesced_frames)
+ nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES;
+ if (coalesce->rx_coalesce_usecs_irq)
+ nonzero_params |= ETHTOOL_COALESCE_RX_USECS_IRQ;
+ if (coalesce->rx_max_coalesced_frames_irq)
+ nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ;
+ if (coalesce->tx_coalesce_usecs)
+ nonzero_params |= ETHTOOL_COALESCE_TX_USECS;
+ if (coalesce->tx_max_coalesced_frames)
+ nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES;
+ if (coalesce->tx_coalesce_usecs_irq)
+ nonzero_params |= ETHTOOL_COALESCE_TX_USECS_IRQ;
+ if (coalesce->tx_max_coalesced_frames_irq)
+ nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ;
+ if (coalesce->stats_block_coalesce_usecs)
+ nonzero_params |= ETHTOOL_COALESCE_STATS_BLOCK_USECS;
+ if (coalesce->use_adaptive_rx_coalesce)
+ nonzero_params |= ETHTOOL_COALESCE_USE_ADAPTIVE_RX;
+ if (coalesce->use_adaptive_tx_coalesce)
+ nonzero_params |= ETHTOOL_COALESCE_USE_ADAPTIVE_TX;
+ if (coalesce->pkt_rate_low)
+ nonzero_params |= ETHTOOL_COALESCE_PKT_RATE_LOW;
+ if (coalesce->rx_coalesce_usecs_low)
+ nonzero_params |= ETHTOOL_COALESCE_RX_USECS_LOW;
+ if (coalesce->rx_max_coalesced_frames_low)
+ nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW;
+ if (coalesce->tx_coalesce_usecs_low)
+ nonzero_params |= ETHTOOL_COALESCE_TX_USECS_LOW;
+ if (coalesce->tx_max_coalesced_frames_low)
+ nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW;
+ if (coalesce->pkt_rate_high)
+ nonzero_params |= ETHTOOL_COALESCE_PKT_RATE_HIGH;
+ if (coalesce->rx_coalesce_usecs_high)
+ nonzero_params |= ETHTOOL_COALESCE_RX_USECS_HIGH;
+ if (coalesce->rx_max_coalesced_frames_high)
+ nonzero_params |= ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH;
+ if (coalesce->tx_coalesce_usecs_high)
+ nonzero_params |= ETHTOOL_COALESCE_TX_USECS_HIGH;
+ if (coalesce->tx_max_coalesced_frames_high)
+ nonzero_params |= ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH;
+ if (coalesce->rate_sample_interval)
+ nonzero_params |= ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL;
+
+ return (supported_params & nonzero_params) == nonzero_params;
+}
+
static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
void __user *useraddr)
{
struct ethtool_coalesce coalesce;
+ int ret;
if (!dev->ethtool_ops->set_coalesce)
return -EOPNOTSUPP;
@@ -1516,7 +1583,13 @@ static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
return -EFAULT;
- return dev->ethtool_ops->set_coalesce(dev, &coalesce);
+ if (!ethtool_set_coalesce_supported(dev, &coalesce))
+ return -EOPNOTSUPP;
+
+ ret = dev->ethtool_ops->set_coalesce(dev, &coalesce);
+ if (!ret)
+ ethtool_notify(dev, ETHTOOL_MSG_COALESCE_NTF, NULL);
+ return ret;
}
static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
@@ -1536,6 +1609,7 @@ static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
{
struct ethtool_ringparam ringparam, max = { .cmd = ETHTOOL_GRINGPARAM };
+ int ret;
if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam)
return -EOPNOTSUPP;
@@ -1552,7 +1626,10 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
ringparam.tx_pending > max.tx_max_pending)
return -EINVAL;
- return dev->ethtool_ops->set_ringparam(dev, &ringparam);
+ ret = dev->ethtool_ops->set_ringparam(dev, &ringparam);
+ if (!ret)
+ ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL);
+ return ret;
}
static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
@@ -1577,6 +1654,7 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
u16 from_channel, to_channel;
u32 max_rx_in_use = 0;
unsigned int i;
+ int ret;
if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
return -EOPNOTSUPP;
@@ -1608,7 +1686,10 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
if (xdp_get_umem_from_qid(dev, i))
return -EINVAL;
- return dev->ethtool_ops->set_channels(dev, &channels);
+ ret = dev->ethtool_ops->set_channels(dev, &channels);
+ if (!ret)
+ ethtool_notify(dev, ETHTOOL_MSG_CHANNELS_NTF, NULL);
+ return ret;
}
static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr)
@@ -1628,6 +1709,7 @@ static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr)
static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
{
struct ethtool_pauseparam pauseparam;
+ int ret;
if (!dev->ethtool_ops->set_pauseparam)
return -EOPNOTSUPP;
@@ -1635,7 +1717,10 @@ static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
return -EFAULT;
- return dev->ethtool_ops->set_pauseparam(dev, &pauseparam);
+ ret = dev->ethtool_ops->set_pauseparam(dev, &pauseparam);
+ if (!ret)
+ ethtool_notify(dev, ETHTOOL_MSG_PAUSE_NTF, NULL);
+ return ret;
}
static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
@@ -2055,32 +2140,17 @@ out:
static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
{
- int err = 0;
struct ethtool_ts_info info;
- const struct ethtool_ops *ops = dev->ethtool_ops;
- struct phy_device *phydev = dev->phydev;
-
- memset(&info, 0, sizeof(info));
- info.cmd = ETHTOOL_GET_TS_INFO;
-
- if (phy_has_tsinfo(phydev)) {
- err = phy_ts_info(phydev, &info);
- } else if (ops->get_ts_info) {
- err = ops->get_ts_info(dev, &info);
- } else {
- info.so_timestamping =
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info.phc_index = -1;
- }
+ int err;
+ err = __ethtool_get_ts_info(dev, &info);
if (err)
return err;
if (copy_to_user(useraddr, &info, sizeof(info)))
- err = -EFAULT;
+ return -EFAULT;
- return err;
+ return 0;
}
static int __ethtool_get_module_info(struct net_device *dev,
@@ -2297,6 +2367,11 @@ ethtool_set_per_queue_coalesce(struct net_device *dev,
goto roll_back;
}
+ if (!ethtool_set_coalesce_supported(dev, &coalesce)) {
+ ret = -EOPNOTSUPP;
+ goto roll_back;
+ }
+
ret = dev->ethtool_ops->set_per_queue_coalesce(dev, bit, &coalesce);
if (ret != 0)
goto roll_back;
@@ -2612,6 +2687,8 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GPFLAGS:
rc = ethtool_get_value(dev, useraddr, ethcmd,
dev->ethtool_ops->get_priv_flags);
+ if (!rc)
+ ethtool_notify(dev, ETHTOOL_MSG_PRIVFLAGS_NTF, NULL);
break;
case ETHTOOL_SPFLAGS:
rc = ethtool_set_value(dev, useraddr,
diff --git a/net/ethtool/linkinfo.c b/net/ethtool/linkinfo.c
index 6e9e0b590bb5..677068deb68c 100644
--- a/net/ethtool/linkinfo.c
+++ b/net/ethtool/linkinfo.c
@@ -121,8 +121,10 @@ int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info)
info->extack);
if (ret < 0)
return ret;
- ret = ethnl_parse_header(&req_info, tb[ETHTOOL_A_LINKINFO_HEADER],
- genl_info_net(info), info->extack, true);
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_LINKINFO_HEADER],
+ genl_info_net(info), info->extack,
+ true);
if (ret < 0)
return ret;
dev = req_info.dev;
diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c
index 18cc37be2d9c..452608c6d856 100644
--- a/net/ethtool/linkmodes.c
+++ b/net/ethtool/linkmodes.c
@@ -237,6 +237,7 @@ static const struct link_mode_info link_mode_params[] = {
__DEFINE_LINK_MODE_PARAMS(400000, LR8_ER8_FR8, Full),
__DEFINE_LINK_MODE_PARAMS(400000, DR8, Full),
__DEFINE_LINK_MODE_PARAMS(400000, CR8, Full),
+ __DEFINE_SPECIAL_MODE_PARAMS(FEC_LLRS),
};
static const struct nla_policy
@@ -333,8 +334,10 @@ int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info)
info->extack);
if (ret < 0)
return ret;
- ret = ethnl_parse_header(&req_info, tb[ETHTOOL_A_LINKMODES_HEADER],
- genl_info_net(info), info->extack, true);
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_LINKMODES_HEADER],
+ genl_info_net(info), info->extack,
+ true);
if (ret < 0)
return ret;
dev = req_info.dev;
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index fc9e0b806889..0c772318c023 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -18,7 +18,7 @@ static const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_MAX + 1] = {
};
/**
- * ethnl_parse_header() - parse request header
+ * ethnl_parse_header_dev_get() - parse request header
* @req_info: structure to put results into
* @header: nest attribute with request header
* @net: request netns
@@ -33,9 +33,9 @@ static const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_MAX + 1] = {
*
* Return: 0 on success or negative error code
*/
-int ethnl_parse_header(struct ethnl_req_info *req_info,
- const struct nlattr *header, struct net *net,
- struct netlink_ext_ack *extack, bool require_dev)
+int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
+ const struct nlattr *header, struct net *net,
+ struct netlink_ext_ack *extack, bool require_dev)
{
struct nlattr *tb[ETHTOOL_A_HEADER_MAX + 1];
const struct nlattr *devname_attr;
@@ -223,6 +223,14 @@ ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = {
[ETHTOOL_MSG_LINKSTATE_GET] = &ethnl_linkstate_request_ops,
[ETHTOOL_MSG_DEBUG_GET] = &ethnl_debug_request_ops,
[ETHTOOL_MSG_WOL_GET] = &ethnl_wol_request_ops,
+ [ETHTOOL_MSG_FEATURES_GET] = &ethnl_features_request_ops,
+ [ETHTOOL_MSG_PRIVFLAGS_GET] = &ethnl_privflags_request_ops,
+ [ETHTOOL_MSG_RINGS_GET] = &ethnl_rings_request_ops,
+ [ETHTOOL_MSG_CHANNELS_GET] = &ethnl_channels_request_ops,
+ [ETHTOOL_MSG_COALESCE_GET] = &ethnl_coalesce_request_ops,
+ [ETHTOOL_MSG_PAUSE_GET] = &ethnl_pause_request_ops,
+ [ETHTOOL_MSG_EEE_GET] = &ethnl_eee_request_ops,
+ [ETHTOOL_MSG_TSINFO_GET] = &ethnl_tsinfo_request_ops,
};
static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb)
@@ -261,8 +269,8 @@ static int ethnl_default_parse(struct ethnl_req_info *req_info,
request_ops->request_policy, extack);
if (ret < 0)
goto out;
- ret = ethnl_parse_header(req_info, tb[request_ops->hdr_attr], net,
- extack, require_dev);
+ ret = ethnl_parse_header_dev_get(req_info, tb[request_ops->hdr_attr],
+ net, extack, require_dev);
if (ret < 0)
goto out;
@@ -535,6 +543,13 @@ ethnl_default_notify_ops[ETHTOOL_MSG_KERNEL_MAX + 1] = {
[ETHTOOL_MSG_LINKMODES_NTF] = &ethnl_linkmodes_request_ops,
[ETHTOOL_MSG_DEBUG_NTF] = &ethnl_debug_request_ops,
[ETHTOOL_MSG_WOL_NTF] = &ethnl_wol_request_ops,
+ [ETHTOOL_MSG_FEATURES_NTF] = &ethnl_features_request_ops,
+ [ETHTOOL_MSG_PRIVFLAGS_NTF] = &ethnl_privflags_request_ops,
+ [ETHTOOL_MSG_RINGS_NTF] = &ethnl_rings_request_ops,
+ [ETHTOOL_MSG_CHANNELS_NTF] = &ethnl_channels_request_ops,
+ [ETHTOOL_MSG_COALESCE_NTF] = &ethnl_coalesce_request_ops,
+ [ETHTOOL_MSG_PAUSE_NTF] = &ethnl_pause_request_ops,
+ [ETHTOOL_MSG_EEE_NTF] = &ethnl_eee_request_ops,
};
/* default notification handler */
@@ -620,6 +635,13 @@ static const ethnl_notify_handler_t ethnl_notify_handlers[] = {
[ETHTOOL_MSG_LINKMODES_NTF] = ethnl_default_notify,
[ETHTOOL_MSG_DEBUG_NTF] = ethnl_default_notify,
[ETHTOOL_MSG_WOL_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_FEATURES_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_PRIVFLAGS_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_RINGS_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_CHANNELS_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_COALESCE_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_PAUSE_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_EEE_NTF] = ethnl_default_notify,
};
void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data)
@@ -637,6 +659,29 @@ void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data)
}
EXPORT_SYMBOL(ethtool_notify);
+static void ethnl_notify_features(struct netdev_notifier_info *info)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(info);
+
+ ethtool_notify(dev, ETHTOOL_MSG_FEATURES_NTF, NULL);
+}
+
+static int ethnl_netdev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ switch (event) {
+ case NETDEV_FEAT_CHANGE:
+ ethnl_notify_features(ptr);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ethnl_netdev_notifier = {
+ .notifier_call = ethnl_netdev_event,
+};
+
/* genetlink setup */
static const struct genl_ops ethtool_genl_ops[] = {
@@ -703,6 +748,97 @@ static const struct genl_ops ethtool_genl_ops[] = {
.flags = GENL_UNS_ADMIN_PERM,
.doit = ethnl_set_wol,
},
+ {
+ .cmd = ETHTOOL_MSG_FEATURES_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ },
+ {
+ .cmd = ETHTOOL_MSG_FEATURES_SET,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_set_features,
+ },
+ {
+ .cmd = ETHTOOL_MSG_PRIVFLAGS_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ },
+ {
+ .cmd = ETHTOOL_MSG_PRIVFLAGS_SET,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_set_privflags,
+ },
+ {
+ .cmd = ETHTOOL_MSG_RINGS_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ },
+ {
+ .cmd = ETHTOOL_MSG_RINGS_SET,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_set_rings,
+ },
+ {
+ .cmd = ETHTOOL_MSG_CHANNELS_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ },
+ {
+ .cmd = ETHTOOL_MSG_CHANNELS_SET,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_set_channels,
+ },
+ {
+ .cmd = ETHTOOL_MSG_COALESCE_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ },
+ {
+ .cmd = ETHTOOL_MSG_COALESCE_SET,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_set_coalesce,
+ },
+ {
+ .cmd = ETHTOOL_MSG_PAUSE_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ },
+ {
+ .cmd = ETHTOOL_MSG_PAUSE_SET,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_set_pause,
+ },
+ {
+ .cmd = ETHTOOL_MSG_EEE_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ },
+ {
+ .cmd = ETHTOOL_MSG_EEE_SET,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_set_eee,
+ },
+ {
+ .cmd = ETHTOOL_MSG_TSINFO_GET,
+ .doit = ethnl_default_doit,
+ .start = ethnl_default_start,
+ .dumpit = ethnl_default_dumpit,
+ .done = ethnl_default_done,
+ },
};
static const struct genl_multicast_group ethtool_nl_mcgrps[] = {
@@ -731,7 +867,9 @@ static int __init ethnl_init(void)
return ret;
ethnl_ok = true;
- return 0;
+ ret = register_netdevice_notifier(&ethnl_netdev_notifier);
+ WARN(ret < 0, "ethtool: net device notifier registration failed");
+ return ret;
}
subsys_initcall(ethnl_init);
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index 60efd87686ad..81b8fa020bcb 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -10,9 +10,10 @@
struct ethnl_req_info;
-int ethnl_parse_header(struct ethnl_req_info *req_info,
- const struct nlattr *nest, struct net *net,
- struct netlink_ext_ack *extack, bool require_dev);
+int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
+ const struct nlattr *nest, struct net *net,
+ struct netlink_ext_ack *extack,
+ bool require_dev);
int ethnl_fill_reply_header(struct sk_buff *skb, struct net_device *dev,
u16 attrtype);
struct sk_buff *ethnl_reply_init(size_t payload, struct net_device *dev, u8 cmd,
@@ -336,10 +337,25 @@ extern const struct ethnl_request_ops ethnl_linkmodes_request_ops;
extern const struct ethnl_request_ops ethnl_linkstate_request_ops;
extern const struct ethnl_request_ops ethnl_debug_request_ops;
extern const struct ethnl_request_ops ethnl_wol_request_ops;
+extern const struct ethnl_request_ops ethnl_features_request_ops;
+extern const struct ethnl_request_ops ethnl_privflags_request_ops;
+extern const struct ethnl_request_ops ethnl_rings_request_ops;
+extern const struct ethnl_request_ops ethnl_channels_request_ops;
+extern const struct ethnl_request_ops ethnl_coalesce_request_ops;
+extern const struct ethnl_request_ops ethnl_pause_request_ops;
+extern const struct ethnl_request_ops ethnl_eee_request_ops;
+extern const struct ethnl_request_ops ethnl_tsinfo_request_ops;
int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info);
int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info);
int ethnl_set_debug(struct sk_buff *skb, struct genl_info *info);
int ethnl_set_wol(struct sk_buff *skb, struct genl_info *info);
+int ethnl_set_features(struct sk_buff *skb, struct genl_info *info);
+int ethnl_set_privflags(struct sk_buff *skb, struct genl_info *info);
+int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info);
+int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info);
+int ethnl_set_coalesce(struct sk_buff *skb, struct genl_info *info);
+int ethnl_set_pause(struct sk_buff *skb, struct genl_info *info);
+int ethnl_set_eee(struct sk_buff *skb, struct genl_info *info);
#endif /* _NET_ETHTOOL_NETLINK_H */
diff --git a/net/ethtool/pause.c b/net/ethtool/pause.c
new file mode 100644
index 000000000000..7aea35d1e8a5
--- /dev/null
+++ b/net/ethtool/pause.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "netlink.h"
+#include "common.h"
+
+struct pause_req_info {
+ struct ethnl_req_info base;
+};
+
+struct pause_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_pauseparam pauseparam;
+};
+
+#define PAUSE_REPDATA(__reply_base) \
+ container_of(__reply_base, struct pause_reply_data, base)
+
+static const struct nla_policy
+pause_get_policy[ETHTOOL_A_PAUSE_MAX + 1] = {
+ [ETHTOOL_A_PAUSE_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_PAUSE_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_PAUSE_AUTONEG] = { .type = NLA_REJECT },
+ [ETHTOOL_A_PAUSE_RX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_PAUSE_TX] = { .type = NLA_REJECT },
+};
+
+static int pause_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct pause_reply_data *data = PAUSE_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ int ret;
+
+ if (!dev->ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+ dev->ethtool_ops->get_pauseparam(dev, &data->pauseparam);
+ ethnl_ops_complete(dev);
+
+ return 0;
+}
+
+static int pause_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ return nla_total_size(sizeof(u8)) + /* _PAUSE_AUTONEG */
+ nla_total_size(sizeof(u8)) + /* _PAUSE_RX */
+ nla_total_size(sizeof(u8)); /* _PAUSE_TX */
+}
+
+static int pause_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct pause_reply_data *data = PAUSE_REPDATA(reply_base);
+ const struct ethtool_pauseparam *pauseparam = &data->pauseparam;
+
+ if (nla_put_u8(skb, ETHTOOL_A_PAUSE_AUTONEG, !!pauseparam->autoneg) ||
+ nla_put_u8(skb, ETHTOOL_A_PAUSE_RX, !!pauseparam->rx_pause) ||
+ nla_put_u8(skb, ETHTOOL_A_PAUSE_TX, !!pauseparam->tx_pause))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+const struct ethnl_request_ops ethnl_pause_request_ops = {
+ .request_cmd = ETHTOOL_MSG_PAUSE_GET,
+ .reply_cmd = ETHTOOL_MSG_PAUSE_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_PAUSE_HEADER,
+ .max_attr = ETHTOOL_A_PAUSE_MAX,
+ .req_info_size = sizeof(struct pause_req_info),
+ .reply_data_size = sizeof(struct pause_reply_data),
+ .request_policy = pause_get_policy,
+
+ .prepare_data = pause_prepare_data,
+ .reply_size = pause_reply_size,
+ .fill_reply = pause_fill_reply,
+};
+
+/* PAUSE_SET */
+
+static const struct nla_policy
+pause_set_policy[ETHTOOL_A_PAUSE_MAX + 1] = {
+ [ETHTOOL_A_PAUSE_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_PAUSE_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_PAUSE_AUTONEG] = { .type = NLA_U8 },
+ [ETHTOOL_A_PAUSE_RX] = { .type = NLA_U8 },
+ [ETHTOOL_A_PAUSE_TX] = { .type = NLA_U8 },
+};
+
+int ethnl_set_pause(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *tb[ETHTOOL_A_PAUSE_MAX + 1];
+ struct ethtool_pauseparam params = {};
+ struct ethnl_req_info req_info = {};
+ const struct ethtool_ops *ops;
+ struct net_device *dev;
+ bool mod = false;
+ int ret;
+
+ ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb, ETHTOOL_A_PAUSE_MAX,
+ pause_set_policy, info->extack);
+ if (ret < 0)
+ return ret;
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_PAUSE_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+ dev = req_info.dev;
+ ops = dev->ethtool_ops;
+ ret = -EOPNOTSUPP;
+ if (!ops->get_pauseparam || !ops->set_pauseparam)
+ goto out_dev;
+
+ rtnl_lock();
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto out_rtnl;
+ ops->get_pauseparam(dev, &params);
+
+ ethnl_update_bool32(&params.autoneg, tb[ETHTOOL_A_PAUSE_AUTONEG], &mod);
+ ethnl_update_bool32(&params.rx_pause, tb[ETHTOOL_A_PAUSE_RX], &mod);
+ ethnl_update_bool32(&params.tx_pause, tb[ETHTOOL_A_PAUSE_TX], &mod);
+ ret = 0;
+ if (!mod)
+ goto out_ops;
+
+ ret = dev->ethtool_ops->set_pauseparam(dev, &params);
+ if (ret < 0)
+ goto out_ops;
+ ethtool_notify(dev, ETHTOOL_MSG_PAUSE_NTF, NULL);
+
+out_ops:
+ ethnl_ops_complete(dev);
+out_rtnl:
+ rtnl_unlock();
+out_dev:
+ dev_put(dev);
+ return ret;
+}
diff --git a/net/ethtool/privflags.c b/net/ethtool/privflags.c
new file mode 100644
index 000000000000..77447dceb109
--- /dev/null
+++ b/net/ethtool/privflags.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "netlink.h"
+#include "common.h"
+#include "bitset.h"
+
+struct privflags_req_info {
+ struct ethnl_req_info base;
+};
+
+struct privflags_reply_data {
+ struct ethnl_reply_data base;
+ const char (*priv_flag_names)[ETH_GSTRING_LEN];
+ unsigned int n_priv_flags;
+ u32 priv_flags;
+};
+
+#define PRIVFLAGS_REPDATA(__reply_base) \
+ container_of(__reply_base, struct privflags_reply_data, base)
+
+static const struct nla_policy
+privflags_get_policy[ETHTOOL_A_PRIVFLAGS_MAX + 1] = {
+ [ETHTOOL_A_PRIVFLAGS_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_PRIVFLAGS_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_PRIVFLAGS_FLAGS] = { .type = NLA_REJECT },
+};
+
+static int ethnl_get_priv_flags_info(struct net_device *dev,
+ unsigned int *count,
+ const char (**names)[ETH_GSTRING_LEN])
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ int nflags;
+
+ nflags = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS);
+ if (nflags < 0)
+ return nflags;
+
+ if (names) {
+ *names = kcalloc(nflags, ETH_GSTRING_LEN, GFP_KERNEL);
+ if (!*names)
+ return -ENOMEM;
+ ops->get_strings(dev, ETH_SS_PRIV_FLAGS, (u8 *)*names);
+ }
+
+ /* We can pass more than 32 private flags to userspace via netlink but
+ * we cannot get more with ethtool_ops::get_priv_flags(). Note that we
+ * must not adjust nflags before allocating the space for flag names
+ * as the buffer must be large enough for all flags.
+ */
+ if (WARN_ONCE(nflags > 32,
+ "device %s reports more than 32 private flags (%d)\n",
+ netdev_name(dev), nflags))
+ nflags = 32;
+ *count = nflags;
+
+ return 0;
+}
+
+static int privflags_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct privflags_reply_data *data = PRIVFLAGS_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ const char (*names)[ETH_GSTRING_LEN];
+ const struct ethtool_ops *ops;
+ unsigned int nflags;
+ int ret;
+
+ ops = dev->ethtool_ops;
+ if (!ops->get_priv_flags || !ops->get_sset_count || !ops->get_strings)
+ return -EOPNOTSUPP;
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = ethnl_get_priv_flags_info(dev, &nflags, &names);
+ if (ret < 0)
+ goto out_ops;
+ data->priv_flags = ops->get_priv_flags(dev);
+ data->priv_flag_names = names;
+ data->n_priv_flags = nflags;
+
+out_ops:
+ ethnl_ops_complete(dev);
+ return ret;
+}
+
+static int privflags_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct privflags_reply_data *data = PRIVFLAGS_REPDATA(reply_base);
+ bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+ const u32 all_flags = ~(u32)0 >> (32 - data->n_priv_flags);
+
+ return ethnl_bitset32_size(&data->priv_flags, &all_flags,
+ data->n_priv_flags,
+ data->priv_flag_names, compact);
+}
+
+static int privflags_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct privflags_reply_data *data = PRIVFLAGS_REPDATA(reply_base);
+ bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+ const u32 all_flags = ~(u32)0 >> (32 - data->n_priv_flags);
+
+ return ethnl_put_bitset32(skb, ETHTOOL_A_PRIVFLAGS_FLAGS,
+ &data->priv_flags, &all_flags,
+ data->n_priv_flags, data->priv_flag_names,
+ compact);
+}
+
+static void privflags_cleanup_data(struct ethnl_reply_data *reply_data)
+{
+ struct privflags_reply_data *data = PRIVFLAGS_REPDATA(reply_data);
+
+ kfree(data->priv_flag_names);
+}
+
+const struct ethnl_request_ops ethnl_privflags_request_ops = {
+ .request_cmd = ETHTOOL_MSG_PRIVFLAGS_GET,
+ .reply_cmd = ETHTOOL_MSG_PRIVFLAGS_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_PRIVFLAGS_HEADER,
+ .max_attr = ETHTOOL_A_PRIVFLAGS_MAX,
+ .req_info_size = sizeof(struct privflags_req_info),
+ .reply_data_size = sizeof(struct privflags_reply_data),
+ .request_policy = privflags_get_policy,
+
+ .prepare_data = privflags_prepare_data,
+ .reply_size = privflags_reply_size,
+ .fill_reply = privflags_fill_reply,
+ .cleanup_data = privflags_cleanup_data,
+};
+
+/* PRIVFLAGS_SET */
+
+static const struct nla_policy
+privflags_set_policy[ETHTOOL_A_PRIVFLAGS_MAX + 1] = {
+ [ETHTOOL_A_PRIVFLAGS_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_PRIVFLAGS_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_PRIVFLAGS_FLAGS] = { .type = NLA_NESTED },
+};
+
+int ethnl_set_privflags(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *tb[ETHTOOL_A_PRIVFLAGS_MAX + 1];
+ const char (*names)[ETH_GSTRING_LEN] = NULL;
+ struct ethnl_req_info req_info = {};
+ const struct ethtool_ops *ops;
+ struct net_device *dev;
+ unsigned int nflags;
+ bool mod = false;
+ bool compact;
+ u32 flags;
+ int ret;
+
+ ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
+ ETHTOOL_A_PRIVFLAGS_MAX, privflags_set_policy,
+ info->extack);
+ if (ret < 0)
+ return ret;
+ if (!tb[ETHTOOL_A_PRIVFLAGS_FLAGS])
+ return -EINVAL;
+ ret = ethnl_bitset_is_compact(tb[ETHTOOL_A_PRIVFLAGS_FLAGS], &compact);
+ if (ret < 0)
+ return ret;
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_PRIVFLAGS_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+ dev = req_info.dev;
+ ops = dev->ethtool_ops;
+ ret = -EOPNOTSUPP;
+ if (!ops->get_priv_flags || !ops->set_priv_flags ||
+ !ops->get_sset_count || !ops->get_strings)
+ goto out_dev;
+
+ rtnl_lock();
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto out_rtnl;
+ ret = ethnl_get_priv_flags_info(dev, &nflags, compact ? NULL : &names);
+ if (ret < 0)
+ goto out_ops;
+ flags = ops->get_priv_flags(dev);
+
+ ret = ethnl_update_bitset32(&flags, nflags,
+ tb[ETHTOOL_A_PRIVFLAGS_FLAGS], names,
+ info->extack, &mod);
+ if (ret < 0 || !mod)
+ goto out_free;
+ ret = ops->set_priv_flags(dev, flags);
+ if (ret < 0)
+ goto out_free;
+ ethtool_notify(dev, ETHTOOL_MSG_PRIVFLAGS_NTF, NULL);
+
+out_free:
+ kfree(names);
+out_ops:
+ ethnl_ops_complete(dev);
+out_rtnl:
+ rtnl_unlock();
+out_dev:
+ dev_put(dev);
+ return ret;
+}
diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c
new file mode 100644
index 000000000000..5422526f4eef
--- /dev/null
+++ b/net/ethtool/rings.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "netlink.h"
+#include "common.h"
+
+struct rings_req_info {
+ struct ethnl_req_info base;
+};
+
+struct rings_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_ringparam ringparam;
+};
+
+#define RINGS_REPDATA(__reply_base) \
+ container_of(__reply_base, struct rings_reply_data, base)
+
+static const struct nla_policy
+rings_get_policy[ETHTOOL_A_RINGS_MAX + 1] = {
+ [ETHTOOL_A_RINGS_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_RINGS_RX_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_RX_MINI_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_RX_JUMBO_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_TX_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_RX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_TX] = { .type = NLA_REJECT },
+};
+
+static int rings_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct rings_reply_data *data = RINGS_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ int ret;
+
+ if (!dev->ethtool_ops->get_ringparam)
+ return -EOPNOTSUPP;
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+ dev->ethtool_ops->get_ringparam(dev, &data->ringparam);
+ ethnl_ops_complete(dev);
+
+ return 0;
+}
+
+static int rings_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ return nla_total_size(sizeof(u32)) + /* _RINGS_RX_MAX */
+ nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI_MAX */
+ nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO_MAX */
+ nla_total_size(sizeof(u32)) + /* _RINGS_TX_MAX */
+ nla_total_size(sizeof(u32)) + /* _RINGS_RX */
+ nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */
+ nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */
+ nla_total_size(sizeof(u32)); /* _RINGS_TX */
+}
+
+static int rings_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct rings_reply_data *data = RINGS_REPDATA(reply_base);
+ const struct ethtool_ringparam *ringparam = &data->ringparam;
+
+ if ((ringparam->rx_max_pending &&
+ (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MAX,
+ ringparam->rx_max_pending) ||
+ nla_put_u32(skb, ETHTOOL_A_RINGS_RX,
+ ringparam->rx_pending))) ||
+ (ringparam->rx_mini_max_pending &&
+ (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI_MAX,
+ ringparam->rx_mini_max_pending) ||
+ nla_put_u32(skb, ETHTOOL_A_RINGS_RX_MINI,
+ ringparam->rx_mini_pending))) ||
+ (ringparam->rx_jumbo_max_pending &&
+ (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO_MAX,
+ ringparam->rx_jumbo_max_pending) ||
+ nla_put_u32(skb, ETHTOOL_A_RINGS_RX_JUMBO,
+ ringparam->rx_jumbo_pending))) ||
+ (ringparam->tx_max_pending &&
+ (nla_put_u32(skb, ETHTOOL_A_RINGS_TX_MAX,
+ ringparam->tx_max_pending) ||
+ nla_put_u32(skb, ETHTOOL_A_RINGS_TX,
+ ringparam->tx_pending))))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+const struct ethnl_request_ops ethnl_rings_request_ops = {
+ .request_cmd = ETHTOOL_MSG_RINGS_GET,
+ .reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_RINGS_HEADER,
+ .max_attr = ETHTOOL_A_RINGS_MAX,
+ .req_info_size = sizeof(struct rings_req_info),
+ .reply_data_size = sizeof(struct rings_reply_data),
+ .request_policy = rings_get_policy,
+
+ .prepare_data = rings_prepare_data,
+ .reply_size = rings_reply_size,
+ .fill_reply = rings_fill_reply,
+};
+
+/* RINGS_SET */
+
+static const struct nla_policy
+rings_set_policy[ETHTOOL_A_RINGS_MAX + 1] = {
+ [ETHTOOL_A_RINGS_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_RINGS_RX_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_RX_MINI_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_RX_JUMBO_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_TX_MAX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_RINGS_RX] = { .type = NLA_U32 },
+ [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 },
+ [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 },
+ [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 },
+};
+
+int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *tb[ETHTOOL_A_RINGS_MAX + 1];
+ struct ethtool_ringparam ringparam = {};
+ struct ethnl_req_info req_info = {};
+ const struct nlattr *err_attr;
+ const struct ethtool_ops *ops;
+ struct net_device *dev;
+ bool mod = false;
+ int ret;
+
+ ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
+ ETHTOOL_A_RINGS_MAX, rings_set_policy,
+ info->extack);
+ if (ret < 0)
+ return ret;
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_RINGS_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+ dev = req_info.dev;
+ ops = dev->ethtool_ops;
+ ret = -EOPNOTSUPP;
+ if (!ops->get_ringparam || !ops->set_ringparam)
+ goto out_dev;
+
+ rtnl_lock();
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto out_rtnl;
+ ops->get_ringparam(dev, &ringparam);
+
+ ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod);
+ ethnl_update_u32(&ringparam.rx_mini_pending,
+ tb[ETHTOOL_A_RINGS_RX_MINI], &mod);
+ ethnl_update_u32(&ringparam.rx_jumbo_pending,
+ tb[ETHTOOL_A_RINGS_RX_JUMBO], &mod);
+ ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod);
+ ret = 0;
+ if (!mod)
+ goto out_ops;
+
+ /* ensure new ring parameters are within limits */
+ if (ringparam.rx_pending > ringparam.rx_max_pending)
+ err_attr = tb[ETHTOOL_A_RINGS_RX];
+ else if (ringparam.rx_mini_pending > ringparam.rx_mini_max_pending)
+ err_attr = tb[ETHTOOL_A_RINGS_RX_MINI];
+ else if (ringparam.rx_jumbo_pending > ringparam.rx_jumbo_max_pending)
+ err_attr = tb[ETHTOOL_A_RINGS_RX_JUMBO];
+ else if (ringparam.tx_pending > ringparam.tx_max_pending)
+ err_attr = tb[ETHTOOL_A_RINGS_TX];
+ else
+ err_attr = NULL;
+ if (err_attr) {
+ ret = -EINVAL;
+ NL_SET_ERR_MSG_ATTR(info->extack, err_attr,
+ "requested ring size exceeds maximum");
+ goto out_ops;
+ }
+
+ ret = dev->ethtool_ops->set_ringparam(dev, &ringparam);
+ if (ret < 0)
+ goto out_ops;
+ ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL);
+
+out_ops:
+ ethnl_ops_complete(dev);
+out_rtnl:
+ rtnl_unlock();
+out_dev:
+ dev_put(dev);
+ return ret;
+}
diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
index 8e5911887b4c..95eae5c68a52 100644
--- a/net/ethtool/strset.c
+++ b/net/ethtool/strset.c
@@ -60,6 +60,21 @@ static const struct strset_info info_template[] = {
.count = WOL_MODE_COUNT,
.strings = wol_mode_names,
},
+ [ETH_SS_SOF_TIMESTAMPING] = {
+ .per_dev = false,
+ .count = __SOF_TIMESTAMPING_CNT,
+ .strings = sof_timestamping_names,
+ },
+ [ETH_SS_TS_TX_TYPES] = {
+ .per_dev = false,
+ .count = __HWTSTAMP_TX_CNT,
+ .strings = ts_tx_type_names,
+ },
+ [ETH_SS_TS_RX_FILTERS] = {
+ .per_dev = false,
+ .count = __HWTSTAMP_FILTER_CNT,
+ .strings = ts_rx_filter_names,
+ },
};
struct strset_req_info {
diff --git a/net/ethtool/tsinfo.c b/net/ethtool/tsinfo.c
new file mode 100644
index 000000000000..7cb5b512b77c
--- /dev/null
+++ b/net/ethtool/tsinfo.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/net_tstamp.h>
+
+#include "netlink.h"
+#include "common.h"
+#include "bitset.h"
+
+struct tsinfo_req_info {
+ struct ethnl_req_info base;
+};
+
+struct tsinfo_reply_data {
+ struct ethnl_reply_data base;
+ struct ethtool_ts_info ts_info;
+};
+
+#define TSINFO_REPDATA(__reply_base) \
+ container_of(__reply_base, struct tsinfo_reply_data, base)
+
+static const struct nla_policy
+tsinfo_get_policy[ETHTOOL_A_TSINFO_MAX + 1] = {
+ [ETHTOOL_A_TSINFO_UNSPEC] = { .type = NLA_REJECT },
+ [ETHTOOL_A_TSINFO_HEADER] = { .type = NLA_NESTED },
+ [ETHTOOL_A_TSINFO_TIMESTAMPING] = { .type = NLA_REJECT },
+ [ETHTOOL_A_TSINFO_TX_TYPES] = { .type = NLA_REJECT },
+ [ETHTOOL_A_TSINFO_RX_FILTERS] = { .type = NLA_REJECT },
+ [ETHTOOL_A_TSINFO_PHC_INDEX] = { .type = NLA_REJECT },
+};
+
+static int tsinfo_prepare_data(const struct ethnl_req_info *req_base,
+ struct ethnl_reply_data *reply_base,
+ struct genl_info *info)
+{
+ struct tsinfo_reply_data *data = TSINFO_REPDATA(reply_base);
+ struct net_device *dev = reply_base->dev;
+ int ret;
+
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+ ret = __ethtool_get_ts_info(dev, &data->ts_info);
+ ethnl_ops_complete(dev);
+
+ return ret;
+}
+
+static int tsinfo_reply_size(const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct tsinfo_reply_data *data = TSINFO_REPDATA(reply_base);
+ bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+ const struct ethtool_ts_info *ts_info = &data->ts_info;
+ int len = 0;
+ int ret;
+
+ BUILD_BUG_ON(__SOF_TIMESTAMPING_CNT > 32);
+ BUILD_BUG_ON(__HWTSTAMP_TX_CNT > 32);
+ BUILD_BUG_ON(__HWTSTAMP_FILTER_CNT > 32);
+
+ if (ts_info->so_timestamping) {
+ ret = ethnl_bitset32_size(&ts_info->so_timestamping, NULL,
+ __SOF_TIMESTAMPING_CNT,
+ sof_timestamping_names, compact);
+ if (ret < 0)
+ return ret;
+ len += ret; /* _TSINFO_TIMESTAMPING */
+ }
+ if (ts_info->tx_types) {
+ ret = ethnl_bitset32_size(&ts_info->tx_types, NULL,
+ __HWTSTAMP_TX_CNT,
+ ts_tx_type_names, compact);
+ if (ret < 0)
+ return ret;
+ len += ret; /* _TSINFO_TX_TYPES */
+ }
+ if (ts_info->rx_filters) {
+ ret = ethnl_bitset32_size(&ts_info->rx_filters, NULL,
+ __HWTSTAMP_FILTER_CNT,
+ ts_rx_filter_names, compact);
+ if (ret < 0)
+ return ret;
+ len += ret; /* _TSINFO_RX_FILTERS */
+ }
+ if (ts_info->phc_index >= 0)
+ len += nla_total_size(sizeof(u32)); /* _TSINFO_PHC_INDEX */
+
+ return len;
+}
+
+static int tsinfo_fill_reply(struct sk_buff *skb,
+ const struct ethnl_req_info *req_base,
+ const struct ethnl_reply_data *reply_base)
+{
+ const struct tsinfo_reply_data *data = TSINFO_REPDATA(reply_base);
+ bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+ const struct ethtool_ts_info *ts_info = &data->ts_info;
+ int ret;
+
+ if (ts_info->so_timestamping) {
+ ret = ethnl_put_bitset32(skb, ETHTOOL_A_TSINFO_TIMESTAMPING,
+ &ts_info->so_timestamping, NULL,
+ __SOF_TIMESTAMPING_CNT,
+ sof_timestamping_names, compact);
+ if (ret < 0)
+ return ret;
+ }
+ if (ts_info->tx_types) {
+ ret = ethnl_put_bitset32(skb, ETHTOOL_A_TSINFO_TX_TYPES,
+ &ts_info->tx_types, NULL,
+ __HWTSTAMP_TX_CNT,
+ ts_tx_type_names, compact);
+ if (ret < 0)
+ return ret;
+ }
+ if (ts_info->rx_filters) {
+ ret = ethnl_put_bitset32(skb, ETHTOOL_A_TSINFO_RX_FILTERS,
+ &ts_info->rx_filters, NULL,
+ __HWTSTAMP_FILTER_CNT,
+ ts_rx_filter_names, compact);
+ if (ret < 0)
+ return ret;
+ }
+ if (ts_info->phc_index >= 0 &&
+ nla_put_u32(skb, ETHTOOL_A_TSINFO_PHC_INDEX, ts_info->phc_index))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+const struct ethnl_request_ops ethnl_tsinfo_request_ops = {
+ .request_cmd = ETHTOOL_MSG_TSINFO_GET,
+ .reply_cmd = ETHTOOL_MSG_TSINFO_GET_REPLY,
+ .hdr_attr = ETHTOOL_A_TSINFO_HEADER,
+ .max_attr = ETHTOOL_A_TSINFO_MAX,
+ .req_info_size = sizeof(struct tsinfo_req_info),
+ .reply_data_size = sizeof(struct tsinfo_reply_data),
+ .request_policy = tsinfo_get_policy,
+
+ .prepare_data = tsinfo_prepare_data,
+ .reply_size = tsinfo_reply_size,
+ .fill_reply = tsinfo_fill_reply,
+};
diff --git a/net/ethtool/wol.c b/net/ethtool/wol.c
index 55e1ecaaf739..1798421e9f1c 100644
--- a/net/ethtool/wol.c
+++ b/net/ethtool/wol.c
@@ -123,8 +123,9 @@ int ethnl_set_wol(struct sk_buff *skb, struct genl_info *info)
wol_set_policy, info->extack);
if (ret < 0)
return ret;
- ret = ethnl_parse_header(&req_info, tb[ETHTOOL_A_WOL_HEADER],
- genl_info_net(info), info->extack, true);
+ ret = ethnl_parse_header_dev_get(&req_info, tb[ETHTOOL_A_WOL_HEADER],
+ genl_info_net(info), info->extack,
+ true);
if (ret < 0)
return ret;
dev = req_info.dev;
diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c
index d5f709b940ff..9787ef11ca71 100644
--- a/net/hsr/hsr_debugfs.c
+++ b/net/hsr/hsr_debugfs.c
@@ -113,7 +113,6 @@ void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
priv->node_tbl_root = NULL;
return;
}
- priv->node_tbl_file = de;
}
/* hsr_debugfs_term - Tear down debugfs intrastructure
@@ -125,9 +124,7 @@ void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
void
hsr_debugfs_term(struct hsr_priv *priv)
{
- debugfs_remove(priv->node_tbl_file);
- priv->node_tbl_file = NULL;
- debugfs_remove(priv->node_tbl_root);
+ debugfs_remove_recursive(priv->node_tbl_root);
priv->node_tbl_root = NULL;
}
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index c7bd6c49fadf..fc7027314ad8 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -57,24 +57,19 @@ static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
static bool hsr_check_carrier(struct hsr_port *master)
{
struct hsr_port *port;
- bool has_carrier;
- has_carrier = false;
+ ASSERT_RTNL();
- rcu_read_lock();
- hsr_for_each_port(master->hsr, port)
+ hsr_for_each_port(master->hsr, port) {
if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
- has_carrier = true;
- break;
+ netif_carrier_on(master->dev);
+ return true;
}
- rcu_read_unlock();
+ }
- if (has_carrier)
- netif_carrier_on(master->dev);
- else
- netif_carrier_off(master->dev);
+ netif_carrier_off(master->dev);
- return has_carrier;
+ return false;
}
static void hsr_check_announce(struct net_device *hsr_dev,
@@ -118,11 +113,9 @@ int hsr_get_max_mtu(struct hsr_priv *hsr)
struct hsr_port *port;
mtu_max = ETH_DATA_LEN;
- rcu_read_lock();
hsr_for_each_port(hsr, port)
if (port->type != HSR_PT_MASTER)
mtu_max = min(port->dev->mtu, mtu_max);
- rcu_read_unlock();
if (mtu_max < HSR_HLEN)
return 0;
@@ -157,7 +150,6 @@ static int hsr_dev_open(struct net_device *dev)
hsr = netdev_priv(dev);
designation = '\0';
- rcu_read_lock();
hsr_for_each_port(hsr, port) {
if (port->type == HSR_PT_MASTER)
continue;
@@ -175,7 +167,6 @@ static int hsr_dev_open(struct net_device *dev)
netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a fully working HSR network\n",
designation, port->dev->name);
}
- rcu_read_unlock();
if (designation == '\0')
netdev_warn(dev, "No slave devices configured\n");
@@ -350,22 +341,33 @@ static void hsr_announce(struct timer_list *t)
rcu_read_unlock();
}
+static void hsr_del_ports(struct hsr_priv *hsr)
+{
+ struct hsr_port *port;
+
+ port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+ if (port)
+ hsr_del_port(port);
+
+ port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+ if (port)
+ hsr_del_port(port);
+
+ port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ if (port)
+ hsr_del_port(port);
+}
+
/* This has to be called after all the readers are gone.
* Otherwise we would have to check the return value of
* hsr_port_get_hsr().
*/
static void hsr_dev_destroy(struct net_device *hsr_dev)
{
- struct hsr_priv *hsr;
- struct hsr_port *port;
- struct hsr_port *tmp;
-
- hsr = netdev_priv(hsr_dev);
+ struct hsr_priv *hsr = netdev_priv(hsr_dev);
hsr_debugfs_term(hsr);
-
- list_for_each_entry_safe(port, tmp, &hsr->ports, port_list)
- hsr_del_port(port);
+ hsr_del_ports(hsr);
del_timer_sync(&hsr->prune_timer);
del_timer_sync(&hsr->announce_timer);
@@ -431,11 +433,10 @@ static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
};
int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
- unsigned char multicast_spec, u8 protocol_version)
+ unsigned char multicast_spec, u8 protocol_version,
+ struct netlink_ext_ack *extack)
{
struct hsr_priv *hsr;
- struct hsr_port *port;
- struct hsr_port *tmp;
int res;
hsr = netdev_priv(hsr_dev);
@@ -478,7 +479,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
/* Make sure the 1st call to netif_carrier_on() gets through */
netif_carrier_off(hsr_dev);
- res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
+ res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER, extack);
if (res)
goto err_add_master;
@@ -486,11 +487,11 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
if (res)
goto err_unregister;
- res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A);
+ res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A, extack);
if (res)
goto err_add_slaves;
- res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B);
+ res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B, extack);
if (res)
goto err_add_slaves;
@@ -502,8 +503,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
err_add_slaves:
unregister_netdevice(hsr_dev);
err_unregister:
- list_for_each_entry_safe(port, tmp, &hsr->ports, port_list)
- hsr_del_port(port);
+ hsr_del_ports(hsr);
err_add_master:
hsr_del_self_node(hsr);
diff --git a/net/hsr/hsr_device.h b/net/hsr/hsr_device.h
index 6d7759c4f5f9..a099d7de7e79 100644
--- a/net/hsr/hsr_device.h
+++ b/net/hsr/hsr_device.h
@@ -13,7 +13,8 @@
void hsr_dev_setup(struct net_device *dev);
int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
- unsigned char multicast_spec, u8 protocol_version);
+ unsigned char multicast_spec, u8 protocol_version,
+ struct netlink_ext_ack *extack);
void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
bool is_hsr_master(struct net_device *dev);
int hsr_get_max_mtu(struct hsr_priv *hsr);
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index a64bb64935a6..03b891904314 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -318,7 +318,8 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
node_dst = find_node_by_addr_A(&port->hsr->node_db,
eth_hdr(skb)->h_dest);
if (!node_dst) {
- WARN_ONCE(1, "%s: Unknown node\n", __func__);
+ if (net_ratelimit())
+ netdev_err(skb->dev, "%s: Unknown node\n", __func__);
return;
}
if (port->type != node_dst->addr_B_port)
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index 9e389accbfc7..26d6c39f24e1 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -85,7 +85,8 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
master->dev->mtu = mtu_max;
break;
case NETDEV_UNREGISTER:
- hsr_del_port(port);
+ if (!is_hsr_master(dev))
+ hsr_del_port(port);
break;
case NETDEV_PRE_TYPE_CHANGE:
/* HSR works only on Ethernet devices. Refuse slave to change
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
index 754d84b217f0..7321cf8d6d2c 100644
--- a/net/hsr/hsr_main.h
+++ b/net/hsr/hsr_main.h
@@ -166,7 +166,6 @@ struct hsr_priv {
unsigned char sup_multicast_addr[ETH_ALEN];
#ifdef CONFIG_DEBUG_FS
struct dentry *node_tbl_root;
- struct dentry *node_tbl_file;
#endif
};
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index fae21c863b1f..5465a395da04 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -35,26 +35,34 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
unsigned char multicast_spec, hsr_version;
if (!data) {
- netdev_info(dev, "HSR: No slave devices specified\n");
+ NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
return -EINVAL;
}
if (!data[IFLA_HSR_SLAVE1]) {
- netdev_info(dev, "HSR: Slave1 device not specified\n");
+ NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified");
return -EINVAL;
}
link[0] = __dev_get_by_index(src_net,
nla_get_u32(data[IFLA_HSR_SLAVE1]));
+ if (!link[0]) {
+ NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist");
+ return -EINVAL;
+ }
if (!data[IFLA_HSR_SLAVE2]) {
- netdev_info(dev, "HSR: Slave2 device not specified\n");
+ NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified");
return -EINVAL;
}
link[1] = __dev_get_by_index(src_net,
nla_get_u32(data[IFLA_HSR_SLAVE2]));
+ if (!link[1]) {
+ NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist");
+ return -EINVAL;
+ }
- if (!link[0] || !link[1])
- return -ENODEV;
- if (link[0] == link[1])
+ if (link[0] == link[1]) {
+ NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same");
return -EINVAL;
+ }
if (!data[IFLA_HSR_MULTICAST_SPEC])
multicast_spec = 0;
@@ -66,34 +74,25 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
else
hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]);
- return hsr_dev_finalize(dev, link, multicast_spec, hsr_version);
+ return hsr_dev_finalize(dev, link, multicast_spec, hsr_version, extack);
}
static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
- struct hsr_priv *hsr;
+ struct hsr_priv *hsr = netdev_priv(dev);
struct hsr_port *port;
- int res;
-
- hsr = netdev_priv(dev);
-
- res = 0;
- rcu_read_lock();
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
- if (port)
- res = nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex);
- rcu_read_unlock();
- if (res)
- goto nla_put_failure;
+ if (port) {
+ if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex))
+ goto nla_put_failure;
+ }
- rcu_read_lock();
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
- if (port)
- res = nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex);
- rcu_read_unlock();
- if (res)
- goto nla_put_failure;
+ if (port) {
+ if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex))
+ goto nla_put_failure;
+ }
if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
hsr->sup_multicast_addr) ||
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index a9104d42aafb..f4b9f7a3ce51 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -25,7 +25,6 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_PASS;
}
- rcu_read_lock(); /* hsr->node_db, hsr->ports */
port = hsr_port_get_rcu(skb->dev);
if (!port)
goto finish_pass;
@@ -45,11 +44,9 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
hsr_forward_skb(skb, port);
finish_consume:
- rcu_read_unlock(); /* hsr->node_db, hsr->ports */
return RX_HANDLER_CONSUMED;
finish_pass:
- rcu_read_unlock(); /* hsr->node_db, hsr->ports */
return RX_HANDLER_PASS;
}
@@ -58,33 +55,37 @@ bool hsr_port_exists(const struct net_device *dev)
return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame;
}
-static int hsr_check_dev_ok(struct net_device *dev)
+static int hsr_check_dev_ok(struct net_device *dev,
+ struct netlink_ext_ack *extack)
{
/* Don't allow HSR on non-ethernet like devices */
if ((dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
dev->addr_len != ETH_ALEN) {
- netdev_info(dev, "Cannot use loopback or non-ethernet device as HSR slave.\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot use loopback or non-ethernet device as HSR slave.");
return -EINVAL;
}
/* Don't allow enslaving hsr devices */
if (is_hsr_master(dev)) {
- netdev_info(dev, "Cannot create trees of HSR devices.\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot create trees of HSR devices.");
return -EINVAL;
}
if (hsr_port_exists(dev)) {
- netdev_info(dev, "This device is already a HSR slave.\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "This device is already a HSR slave.");
return -EINVAL;
}
if (is_vlan_dev(dev)) {
- netdev_info(dev, "HSR on top of VLAN is not yet supported in this driver.\n");
+ NL_SET_ERR_MSG_MOD(extack, "HSR on top of VLAN is not yet supported in this driver.");
return -EINVAL;
}
if (dev->priv_flags & IFF_DONT_BRIDGE) {
- netdev_info(dev, "This device does not support bridging.\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "This device does not support bridging.");
return -EOPNOTSUPP;
}
@@ -96,19 +97,25 @@ static int hsr_check_dev_ok(struct net_device *dev)
}
/* Setup device to be added to the HSR bridge. */
-static int hsr_portdev_setup(struct net_device *dev, struct hsr_port *port)
+static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
+ struct hsr_port *port,
+ struct netlink_ext_ack *extack)
+
{
+ struct net_device *hsr_dev;
+ struct hsr_port *master;
int res;
- dev_hold(dev);
res = dev_set_promiscuity(dev, 1);
if (res)
- goto fail_promiscuity;
+ return res;
- /* FIXME:
- * What does net device "adjacency" mean? Should we do
- * res = netdev_master_upper_dev_link(port->dev, port->hsr->dev); ?
- */
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ hsr_dev = master->dev;
+
+ res = netdev_upper_dev_link(dev, hsr_dev, extack);
+ if (res)
+ goto fail_upper_dev_link;
res = netdev_rx_handler_register(dev, hsr_handle_frame, port);
if (res)
@@ -118,21 +125,20 @@ static int hsr_portdev_setup(struct net_device *dev, struct hsr_port *port)
return 0;
fail_rx_handler:
+ netdev_upper_dev_unlink(dev, hsr_dev);
+fail_upper_dev_link:
dev_set_promiscuity(dev, -1);
-fail_promiscuity:
- dev_put(dev);
-
return res;
}
int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
- enum hsr_port_type type)
+ enum hsr_port_type type, struct netlink_ext_ack *extack)
{
struct hsr_port *port, *master;
int res;
if (type != HSR_PT_MASTER) {
- res = hsr_check_dev_ok(dev);
+ res = hsr_check_dev_ok(dev, extack);
if (res)
return res;
}
@@ -150,7 +156,7 @@ int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
port->type = type;
if (type != HSR_PT_MASTER) {
- res = hsr_portdev_setup(dev, port);
+ res = hsr_portdev_setup(hsr, dev, port, extack);
if (res)
goto fail_dev_setup;
}
@@ -179,21 +185,14 @@ void hsr_del_port(struct hsr_port *port)
list_del_rcu(&port->port_list);
if (port != master) {
- if (master) {
- netdev_update_features(master->dev);
- dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
- }
+ netdev_update_features(master->dev);
+ dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
netdev_rx_handler_unregister(port->dev);
dev_set_promiscuity(port->dev, -1);
+ netdev_upper_dev_unlink(port->dev, master->dev);
}
- /* FIXME?
- * netdev_upper_dev_unlink(port->dev, port->hsr->dev);
- */
-
synchronize_rcu();
- if (port != master)
- dev_put(port->dev);
kfree(port);
}
diff --git a/net/hsr/hsr_slave.h b/net/hsr/hsr_slave.h
index 64b549529592..8953ea279ce9 100644
--- a/net/hsr/hsr_slave.h
+++ b/net/hsr/hsr_slave.h
@@ -13,7 +13,7 @@
#include "hsr_main.h"
int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
- enum hsr_port_type pt);
+ enum hsr_port_type pt, struct netlink_ext_ack *extack);
void hsr_del_port(struct hsr_port *port);
bool hsr_port_exists(const struct net_device *dev);
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 9d97bace13c8..9e1a186a3671 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
obj-$(CONFIG_NET_SOCK_MSG) += tcp_bpf.o
+obj-$(CONFIG_BPF_STREAM_PARSER) += udp_bpf.o
obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 2fe295432c24..cf58e29cf746 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -872,7 +872,7 @@ int inet_shutdown(struct socket *sock, int how)
err = -ENOTCONN;
/* Hack to wake up other listeners, who can poll for
EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
- /* fall through */
+ fallthrough;
default:
sk->sk_shutdown |= how;
if (sk->sk_prot->shutdown)
@@ -886,7 +886,7 @@ int inet_shutdown(struct socket *sock, int how)
case TCP_LISTEN:
if (!(how & RCV_SHUTDOWN))
break;
- /* fall through */
+ fallthrough;
case TCP_SYN_SENT:
err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
@@ -1793,6 +1793,10 @@ static __net_exit void ipv4_mib_exit_net(struct net *net)
free_percpu(net->mib.net_statistics);
free_percpu(net->mib.ip_statistics);
free_percpu(net->mib.tcp_statistics);
+#ifdef CONFIG_MPTCP
+ /* allocated on demand, see mptcp_init_sock() */
+ free_percpu(net->mib.mptcp_statistics);
+#endif
}
static __net_initdata struct pernet_operations ipv4_mib_ops = {
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 974179b3b314..d99e1be94019 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -107,7 +107,7 @@ static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
if (optlen < 6)
return -EINVAL;
memcpy(daddr, optptr+optlen-4, 4);
- /* Fall through */
+ fallthrough;
default:
memset(optptr, 0, optlen);
}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 05eb42f347e8..687971d83b4e 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1181,7 +1181,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
case SIOCSARP:
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- /* fall through */
+ fallthrough;
case SIOCGARP:
err = copy_from_user(&r, arg, sizeof(struct arpreq));
if (err)
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index 2bf3abeb1456..e3939f76b024 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -7,6 +7,7 @@
#include <linux/btf.h>
#include <linux/filter.h>
#include <net/tcp.h>
+#include <net/bpf_sk_storage.h>
static u32 optional_ops[] = {
offsetof(struct tcp_congestion_ops, init),
@@ -27,6 +28,27 @@ static u32 unsupported_ops[] = {
static const struct btf_type *tcp_sock_type;
static u32 tcp_sock_id, sock_id;
+static int btf_sk_storage_get_ids[5];
+static struct bpf_func_proto btf_sk_storage_get_proto __read_mostly;
+
+static int btf_sk_storage_delete_ids[5];
+static struct bpf_func_proto btf_sk_storage_delete_proto __read_mostly;
+
+static void convert_sk_func_proto(struct bpf_func_proto *to, int *to_btf_ids,
+ const struct bpf_func_proto *from)
+{
+ int i;
+
+ *to = *from;
+ to->btf_id = to_btf_ids;
+ for (i = 0; i < ARRAY_SIZE(to->arg_type); i++) {
+ if (to->arg_type[i] == ARG_PTR_TO_SOCKET) {
+ to->arg_type[i] = ARG_PTR_TO_BTF_ID;
+ to->btf_id[i] = tcp_sock_id;
+ }
+ }
+}
+
static int bpf_tcp_ca_init(struct btf *btf)
{
s32 type_id;
@@ -42,6 +64,13 @@ static int bpf_tcp_ca_init(struct btf *btf)
tcp_sock_id = type_id;
tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
+ convert_sk_func_proto(&btf_sk_storage_get_proto,
+ btf_sk_storage_get_ids,
+ &bpf_sk_storage_get_proto);
+ convert_sk_func_proto(&btf_sk_storage_delete_proto,
+ btf_sk_storage_delete_ids,
+ &bpf_sk_storage_delete_proto);
+
return 0;
}
@@ -167,6 +196,10 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
switch (func_id) {
case BPF_FUNC_tcp_send_ack:
return &bpf_tcp_send_ack_proto;
+ case BPF_FUNC_sk_storage_get:
+ return &btf_sk_storage_get_proto;
+ case BPF_FUNC_sk_storage_delete:
+ return &btf_sk_storage_delete_proto;
default:
return bpf_base_func_proto(func_id);
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index e4632bd2026d..30fa42f5997d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1566,11 +1566,11 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
}
}
ip_mc_up(in_dev);
- /* fall through */
+ fallthrough;
case NETDEV_CHANGEADDR:
if (!IN_DEV_ARP_NOTIFY(in_dev))
break;
- /* fall through */
+ fallthrough;
case NETDEV_NOTIFY_PEERS:
/* Send gratuitous ARP to notify of link change */
inetdev_send_gratuitous_arp(dev, in_dev);
@@ -1588,7 +1588,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
if (inetdev_valid_mtu(dev->mtu))
break;
/* disable IP when MTU is not enough */
- /* fall through */
+ fallthrough;
case NETDEV_UNREGISTER:
inetdev_destroy(in_dev);
break;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 103c7d599a3c..8b07f3a4f2db 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -341,22 +341,6 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err)
esp_output_done(base, err);
}
-static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
-{
- /* Fill padding... */
- if (tfclen) {
- memset(tail, 0, tfclen);
- tail += tfclen;
- }
- do {
- int i;
- for (i = 0; i < plen - 2; i++)
- tail[i] = i + 1;
- } while (0);
- tail[plen - 2] = plen - 2;
- tail[plen - 1] = proto;
-}
-
static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb,
int encap_type,
struct esp_info *esp,
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index e2e219c7854a..731022cff600 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -132,6 +132,36 @@ static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
return segs;
}
+static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
+ struct sk_buff *skb,
+ netdev_features_t features)
+{
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ const struct net_offload *ops;
+ int proto = xo->proto;
+
+ skb->transport_header += x->props.header_len;
+
+ if (proto == IPPROTO_BEETPH) {
+ struct ip_beet_phdr *ph = (struct ip_beet_phdr *)skb->data;
+
+ skb->transport_header += ph->hdrlen * 8;
+ proto = ph->nexthdr;
+ } else if (x->sel.family != AF_INET6) {
+ skb->transport_header -= IPV4_BEET_PHMAXLEN;
+ } else if (proto == IPPROTO_TCP) {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
+ }
+
+ __skb_pull(skb, skb_transport_offset(skb));
+ ops = rcu_dereference(inet_offloads[proto]);
+ if (likely(ops && ops->callbacks.gso_segment))
+ segs = ops->callbacks.gso_segment(skb, features);
+
+ return segs;
+}
+
static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
struct sk_buff *skb,
netdev_features_t features)
@@ -141,6 +171,8 @@ static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
return xfrm4_tunnel_gso_segment(x, skb, features);
case XFRM_MODE_TRANSPORT:
return xfrm4_transport_gso_segment(x, skb, features);
+ case XFRM_MODE_BEET:
+ return xfrm4_beet_gso_segment(x, skb, features);
}
return ERR_PTR(-EOPNOTSUPP);
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index c092e9a55790..818916b2a04d 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -35,7 +35,7 @@ static inline void fib_alias_accessed(struct fib_alias *fa)
void fib_release_info(struct fib_info *);
struct fib_info *fib_create_info(struct fib_config *cfg,
struct netlink_ext_ack *extack);
-int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
+int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
struct netlink_ext_ack *extack);
bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi);
int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index a803cdd9400a..6ed8c9317179 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -570,8 +570,9 @@ static int fib_detect_death(struct fib_info *fi, int order,
return 1;
}
-int fib_nh_common_init(struct fib_nh_common *nhc, struct nlattr *encap,
- u16 encap_type, void *cfg, gfp_t gfp_flags,
+int fib_nh_common_init(struct net *net, struct fib_nh_common *nhc,
+ struct nlattr *encap, u16 encap_type,
+ void *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack)
{
int err;
@@ -589,8 +590,9 @@ int fib_nh_common_init(struct fib_nh_common *nhc, struct nlattr *encap,
err = -EINVAL;
goto lwt_failure;
}
- err = lwtunnel_build_state(encap_type, encap, nhc->nhc_family,
- cfg, &lwtstate, extack);
+ err = lwtunnel_build_state(net, encap_type, encap,
+ nhc->nhc_family, cfg, &lwtstate,
+ extack);
if (err)
goto lwt_failure;
@@ -614,7 +616,7 @@ int fib_nh_init(struct net *net, struct fib_nh *nh,
nh->fib_nh_family = AF_INET;
- err = fib_nh_common_init(&nh->nh_common, cfg->fc_encap,
+ err = fib_nh_common_init(net, &nh->nh_common, cfg->fc_encap,
cfg->fc_encap_type, cfg, GFP_KERNEL, extack);
if (err)
return err;
@@ -814,7 +816,7 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
-static int fib_encap_match(u16 encap_type,
+static int fib_encap_match(struct net *net, u16 encap_type,
struct nlattr *encap,
const struct fib_nh *nh,
const struct fib_config *cfg,
@@ -826,7 +828,7 @@ static int fib_encap_match(u16 encap_type,
if (encap_type == LWTUNNEL_ENCAP_NONE)
return 0;
- ret = lwtunnel_build_state(encap_type, encap, AF_INET,
+ ret = lwtunnel_build_state(net, encap_type, encap, AF_INET,
cfg, &lwtstate, extack);
if (!ret) {
result = lwtunnel_cmp_encap(lwtstate, nh->fib_nh_lws);
@@ -836,7 +838,7 @@ static int fib_encap_match(u16 encap_type,
return result;
}
-int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
+int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
struct netlink_ext_ack *extack)
{
#ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -857,8 +859,8 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
struct fib_nh *nh = fib_info_nh(fi, 0);
if (cfg->fc_encap) {
- if (fib_encap_match(cfg->fc_encap_type, cfg->fc_encap,
- nh, cfg, extack))
+ if (fib_encap_match(net, cfg->fc_encap_type,
+ cfg->fc_encap, nh, cfg, extack))
return 1;
}
#ifdef CONFIG_IP_ROUTE_CLASSID
@@ -1962,7 +1964,7 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
nexthop_nh->fib_nh_flags |= RTNH_F_DEAD;
- /* fall through */
+ fallthrough;
case NETDEV_CHANGE:
nexthop_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
break;
@@ -1984,7 +1986,7 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
fi->fib_flags |= RTNH_F_DEAD;
- /* fall through */
+ fallthrough;
case NETDEV_CHANGE:
fi->fib_flags |= RTNH_F_LINKDOWN;
break;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index ff0c24371e33..4f334b425538 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -35,9 +35,6 @@
* Paul E. McKenney <paulmck@us.ibm.com>
* Patrick McHardy <kaber@trash.net>
*/
-
-#define VERSION "0.409"
-
#include <linux/cache.h>
#include <linux/uaccess.h>
#include <linux/bitops.h>
@@ -304,8 +301,6 @@ static inline void alias_free_mem_rcu(struct fib_alias *fa)
call_rcu(&fa->rcu, __alias_free_mem);
}
-#define TNODE_KMALLOC_MAX \
- ilog2((PAGE_SIZE - TNODE_SIZE(0)) / sizeof(struct key_vector *))
#define TNODE_VMALLOC_MAX \
ilog2((SIZE_MAX - TNODE_SIZE(0)) / sizeof(struct key_vector *))
@@ -1684,7 +1679,7 @@ int fib_table_delete(struct net *net, struct fib_table *tb,
fi->fib_prefsrc == cfg->fc_prefsrc) &&
(!cfg->fc_protocol ||
fi->fib_protocol == cfg->fc_protocol) &&
- fib_nh_match(cfg, fi, extack) == 0 &&
+ fib_nh_match(net, cfg, fi, extack) == 0 &&
fib_metrics_match(cfg, fi)) {
fa_to_delete = fa;
break;
@@ -2577,6 +2572,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
" %zd bytes, size of tnode: %zd bytes.\n",
LEAF_SIZE, TNODE_SIZE(0));
+ rcu_read_lock();
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct fib_table *tb;
@@ -2596,7 +2592,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
trie_show_usage(seq, t->stats);
#endif
}
+ cond_resched_rcu();
}
+ rcu_read_unlock();
return 0;
}
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index f369e7ce685b..fc61f51d87a3 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -865,7 +865,7 @@ static bool icmp_unreach(struct sk_buff *skb)
case 3:
if (!icmp_tag_validation(iph->protocol))
goto out;
- /* fall through */
+ fallthrough;
case 0:
info = ntohs(icmph->un.frag.mtu);
}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 3b9c7a2725a9..47f0502b2101 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -107,8 +107,6 @@
#ifdef CONFIG_IP_MULTICAST
/* Parameter names and values are taken from igmp-v2-06 draft */
-#define IGMP_V2_UNSOLICITED_REPORT_INTERVAL (10*HZ)
-#define IGMP_V3_UNSOLICITED_REPORT_INTERVAL (1*HZ)
#define IGMP_QUERY_INTERVAL (125*HZ)
#define IGMP_QUERY_RESPONSE_INTERVAL (10*HZ)
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index d545fb99a8a1..5f34eb951627 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -131,7 +131,7 @@ static int inet_csk_bind_conflict(const struct sock *sk,
{
struct sock *sk2;
bool reuse = sk->sk_reuse;
- bool reuseport = !!sk->sk_reuseport && reuseport_ok;
+ bool reuseport = !!sk->sk_reuseport;
kuid_t uid = sock_i_uid((struct sock *)sk);
/*
@@ -146,17 +146,21 @@ static int inet_csk_bind_conflict(const struct sock *sk,
(!sk->sk_bound_dev_if ||
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
- if ((!reuse || !sk2->sk_reuse ||
- sk2->sk_state == TCP_LISTEN) &&
- (!reuseport || !sk2->sk_reuseport ||
- rcu_access_pointer(sk->sk_reuseport_cb) ||
- (sk2->sk_state != TCP_TIME_WAIT &&
- !uid_eq(uid, sock_i_uid(sk2))))) {
- if (inet_rcv_saddr_equal(sk, sk2, true))
- break;
- }
- if (!relax && reuse && sk2->sk_reuse &&
+ if (reuse && sk2->sk_reuse &&
sk2->sk_state != TCP_LISTEN) {
+ if ((!relax ||
+ (!reuseport_ok &&
+ reuseport && sk2->sk_reuseport &&
+ !rcu_access_pointer(sk->sk_reuseport_cb) &&
+ (sk2->sk_state == TCP_TIME_WAIT ||
+ uid_eq(uid, sock_i_uid(sk2))))) &&
+ inet_rcv_saddr_equal(sk, sk2, true))
+ break;
+ } else if (!reuseport_ok ||
+ !reuseport || !sk2->sk_reuseport ||
+ rcu_access_pointer(sk->sk_reuseport_cb) ||
+ (sk2->sk_state != TCP_TIME_WAIT &&
+ !uid_eq(uid, sock_i_uid(sk2)))) {
if (inet_rcv_saddr_equal(sk, sk2, true))
break;
}
@@ -176,12 +180,14 @@ inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *
int port = 0;
struct inet_bind_hashbucket *head;
struct net *net = sock_net(sk);
+ bool relax = false;
int i, low, high, attempt_half;
struct inet_bind_bucket *tb;
u32 remaining, offset;
int l3mdev;
l3mdev = inet_sk_bound_l3mdev(sk);
+ports_exhausted:
attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
other_half_scan:
inet_get_local_port_range(net, &low, &high);
@@ -219,7 +225,7 @@ other_parity_scan:
inet_bind_bucket_for_each(tb, &head->chain)
if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
tb->port == port) {
- if (!inet_csk_bind_conflict(sk, tb, false, false))
+ if (!inet_csk_bind_conflict(sk, tb, relax, false))
goto success;
goto next_port;
}
@@ -239,6 +245,12 @@ next_port:
attempt_half = 2;
goto other_half_scan;
}
+
+ if (net->ipv4.sysctl_ip_autobind_reuse && !relax) {
+ /* We still have a chance to connect to different destinations */
+ relax = true;
+ goto ports_exhausted;
+ }
return NULL;
success:
*port_ret = port;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8c8377568a78..5d50aad3cdbf 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -23,6 +23,7 @@
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
#include <net/inet6_hashtables.h>
+#include <net/bpf_sk_storage.h>
#include <net/netlink.h>
#include <linux/inet.h>
@@ -170,26 +171,28 @@ errout:
}
EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill);
+#define MAX_DUMP_ALLOC_SIZE (KMALLOC_MAX_SIZE - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
- struct sk_buff *skb, const struct inet_diag_req_v2 *req,
- struct user_namespace *user_ns,
- u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh,
- bool net_admin)
+ struct sk_buff *skb, struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *req,
+ u16 nlmsg_flags, bool net_admin)
{
const struct tcp_congestion_ops *ca_ops;
const struct inet_diag_handler *handler;
+ struct inet_diag_dump_data *cb_data;
int ext = req->idiag_ext;
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
struct nlattr *attr;
void *info = NULL;
+ cb_data = cb->data;
handler = inet_diag_table[req->sdiag_protocol];
BUG_ON(!handler);
- nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
- nlmsg_flags);
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
if (!nlh)
return -EMSGSIZE;
@@ -201,7 +204,9 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
r->idiag_timer = 0;
r->idiag_retrans = 0;
- if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
+ if (inet_diag_msg_attrs_fill(sk, skb, r, ext,
+ sk_user_ns(NETLINK_CB(cb->skb).sk),
+ net_admin))
goto errout;
if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
@@ -298,6 +303,48 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
goto errout;
}
+ /* Keep it at the end for potential retry with a larger skb,
+ * or else do best-effort fitting, which is only done for the
+ * first_nlmsg.
+ */
+ if (cb_data->bpf_stg_diag) {
+ bool first_nlmsg = ((unsigned char *)nlh == skb->data);
+ unsigned int prev_min_dump_alloc;
+ unsigned int total_nla_size = 0;
+ unsigned int msg_len;
+ int err;
+
+ msg_len = skb_tail_pointer(skb) - (unsigned char *)nlh;
+ err = bpf_sk_storage_diag_put(cb_data->bpf_stg_diag, sk, skb,
+ INET_DIAG_SK_BPF_STORAGES,
+ &total_nla_size);
+
+ if (!err)
+ goto out;
+
+ total_nla_size += msg_len;
+ prev_min_dump_alloc = cb->min_dump_alloc;
+ if (total_nla_size > prev_min_dump_alloc)
+ cb->min_dump_alloc = min_t(u32, total_nla_size,
+ MAX_DUMP_ALLOC_SIZE);
+
+ if (!first_nlmsg)
+ goto errout;
+
+ if (cb->min_dump_alloc > prev_min_dump_alloc)
+ /* Retry with pskb_expand_head() with
+ * __GFP_DIRECT_RECLAIM
+ */
+ goto errout;
+
+ WARN_ON_ONCE(total_nla_size <= prev_min_dump_alloc);
+
+ /* Send what we have for this sk
+ * and move on to the next sk in the following
+ * dump()
+ */
+ }
+
out:
nlmsg_end(skb, nlh);
return 0;
@@ -308,30 +355,19 @@ errout:
}
EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
-static int inet_csk_diag_fill(struct sock *sk,
- struct sk_buff *skb,
- const struct inet_diag_req_v2 *req,
- struct user_namespace *user_ns,
- u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh,
- bool net_admin)
-{
- return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, user_ns,
- portid, seq, nlmsg_flags, unlh, net_admin);
-}
-
static int inet_twsk_diag_fill(struct sock *sk,
struct sk_buff *skb,
- u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh)
+ struct netlink_callback *cb,
+ u16 nlmsg_flags)
{
struct inet_timewait_sock *tw = inet_twsk(sk);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
long tmo;
- nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
- nlmsg_flags);
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type,
+ sizeof(*r), nlmsg_flags);
if (!nlh)
return -EMSGSIZE;
@@ -355,16 +391,16 @@ static int inet_twsk_diag_fill(struct sock *sk,
}
static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
- u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh, bool net_admin)
+ struct netlink_callback *cb,
+ u16 nlmsg_flags, bool net_admin)
{
struct request_sock *reqsk = inet_reqsk(sk);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
long tmo;
- nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
- nlmsg_flags);
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ cb->nlh->nlmsg_type, sizeof(*r), nlmsg_flags);
if (!nlh)
return -EMSGSIZE;
@@ -393,21 +429,18 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
+ struct netlink_callback *cb,
const struct inet_diag_req_v2 *r,
- struct user_namespace *user_ns,
- u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh, bool net_admin)
+ u16 nlmsg_flags, bool net_admin)
{
if (sk->sk_state == TCP_TIME_WAIT)
- return inet_twsk_diag_fill(sk, skb, portid, seq,
- nlmsg_flags, unlh);
+ return inet_twsk_diag_fill(sk, skb, cb, nlmsg_flags);
if (sk->sk_state == TCP_NEW_SYN_RECV)
- return inet_req_diag_fill(sk, skb, portid, seq,
- nlmsg_flags, unlh, net_admin);
+ return inet_req_diag_fill(sk, skb, cb, nlmsg_flags, net_admin);
- return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
- nlmsg_flags, unlh, net_admin);
+ return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, r, nlmsg_flags,
+ net_admin);
}
struct sock *inet_diag_find_one_icsk(struct net *net,
@@ -455,10 +488,10 @@ struct sock *inet_diag_find_one_icsk(struct net *net,
EXPORT_SYMBOL_GPL(inet_diag_find_one_icsk);
int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
- struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
+ struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
+ struct sk_buff *in_skb = cb->skb;
bool net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN);
struct net *net = sock_net(in_skb->sk);
struct sk_buff *rep;
@@ -475,10 +508,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
goto out;
}
- err = sk_diag_fill(sk, rep, req,
- sk_user_ns(NETLINK_CB(in_skb).sk),
- NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0, nlh, net_admin);
+ err = sk_diag_fill(sk, rep, cb, req, 0, net_admin);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
nlmsg_free(rep);
@@ -505,14 +535,21 @@ static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb,
int err;
handler = inet_diag_lock_handler(req->sdiag_protocol);
- if (IS_ERR(handler))
+ if (IS_ERR(handler)) {
err = PTR_ERR(handler);
- else if (cmd == SOCK_DIAG_BY_FAMILY)
- err = handler->dump_one(in_skb, nlh, req);
- else if (cmd == SOCK_DESTROY && handler->destroy)
+ } else if (cmd == SOCK_DIAG_BY_FAMILY) {
+ struct inet_diag_dump_data empty_dump_data = {};
+ struct netlink_callback cb = {
+ .nlh = nlh,
+ .skb = in_skb,
+ .data = &empty_dump_data,
+ };
+ err = handler->dump_one(&cb, req);
+ } else if (cmd == SOCK_DESTROY && handler->destroy) {
err = handler->destroy(in_skb, req);
- else
+ } else {
err = -EOPNOTSUPP;
+ }
inet_diag_unlock_handler(handler);
return err;
@@ -843,23 +880,6 @@ static int inet_diag_bc_audit(const struct nlattr *attr,
return len == 0 ? 0 : -EINVAL;
}
-static int inet_csk_diag_dump(struct sock *sk,
- struct sk_buff *skb,
- struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- const struct nlattr *bc,
- bool net_admin)
-{
- if (!inet_diag_bc_sk(bc, sk))
- return 0;
-
- return inet_csk_diag_fill(sk, skb, r,
- sk_user_ns(NETLINK_CB(cb->skb).sk),
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh,
- net_admin);
-}
-
static void twsk_build_assert(void)
{
BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) !=
@@ -888,14 +908,17 @@ static void twsk_build_assert(void)
void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
+ struct inet_diag_dump_data *cb_data = cb->data;
struct net *net = sock_net(skb->sk);
u32 idiag_states = r->idiag_states;
int i, num, s_i, s_num;
+ struct nlattr *bc;
struct sock *sk;
+ bc = cb_data->inet_diag_nla_bc;
if (idiag_states & TCPF_SYN_RECV)
idiag_states |= TCPF_NEW_SYN_RECV;
s_i = cb->args[1];
@@ -931,8 +954,12 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
r->id.idiag_sport)
goto next_listen;
- if (inet_csk_diag_dump(sk, skb, cb, r,
- bc, net_admin) < 0) {
+ if (!inet_diag_bc_sk(bc, sk))
+ goto next_listen;
+
+ if (inet_sk_diag_fill(sk, inet_csk(sk), skb,
+ cb, r, NLM_F_MULTI,
+ net_admin) < 0) {
spin_unlock(&ilb->lock);
goto done;
}
@@ -1010,11 +1037,8 @@ next_normal:
res = 0;
for (idx = 0; idx < accum; idx++) {
if (res >= 0) {
- res = sk_diag_fill(sk_arr[idx], skb, r,
- sk_user_ns(NETLINK_CB(cb->skb).sk),
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
- cb->nlh, net_admin);
+ res = sk_diag_fill(sk_arr[idx], skb, cb, r,
+ NLM_F_MULTI, net_admin);
if (res < 0)
num = num_arr[idx];
}
@@ -1038,31 +1062,101 @@ out:
EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
const struct inet_diag_handler *handler;
+ u32 prev_min_dump_alloc;
int err = 0;
+again:
+ prev_min_dump_alloc = cb->min_dump_alloc;
handler = inet_diag_lock_handler(r->sdiag_protocol);
if (!IS_ERR(handler))
- handler->dump(skb, cb, r, bc);
+ handler->dump(skb, cb, r);
else
err = PTR_ERR(handler);
inet_diag_unlock_handler(handler);
+ /* The skb is not large enough to fit one sk info and
+ * inet_sk_diag_fill() has requested for a larger skb.
+ */
+ if (!skb->len && cb->min_dump_alloc > prev_min_dump_alloc) {
+ err = pskb_expand_head(skb, 0, cb->min_dump_alloc, GFP_KERNEL);
+ if (!err)
+ goto again;
+ }
+
return err ? : skb->len;
}
static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
- int hdrlen = sizeof(struct inet_diag_req_v2);
- struct nlattr *bc = NULL;
+ return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh));
+}
- if (nlmsg_attrlen(cb->nlh, hdrlen))
- bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
+static int __inet_diag_dump_start(struct netlink_callback *cb, int hdrlen)
+{
+ const struct nlmsghdr *nlh = cb->nlh;
+ struct inet_diag_dump_data *cb_data;
+ struct sk_buff *skb = cb->skb;
+ struct nlattr *nla;
+ int rem, err;
+
+ cb_data = kzalloc(sizeof(*cb_data), GFP_KERNEL);
+ if (!cb_data)
+ return -ENOMEM;
+
+ nla_for_each_attr(nla, nlmsg_attrdata(nlh, hdrlen),
+ nlmsg_attrlen(nlh, hdrlen), rem) {
+ int type = nla_type(nla);
+
+ if (type < __INET_DIAG_REQ_MAX)
+ cb_data->req_nlas[type] = nla;
+ }
+
+ nla = cb_data->inet_diag_nla_bc;
+ if (nla) {
+ err = inet_diag_bc_audit(nla, skb);
+ if (err) {
+ kfree(cb_data);
+ return err;
+ }
+ }
+
+ nla = cb_data->inet_diag_nla_bpf_stgs;
+ if (nla) {
+ struct bpf_sk_storage_diag *bpf_stg_diag;
+
+ bpf_stg_diag = bpf_sk_storage_diag_alloc(nla);
+ if (IS_ERR(bpf_stg_diag)) {
+ kfree(cb_data);
+ return PTR_ERR(bpf_stg_diag);
+ }
+ cb_data->bpf_stg_diag = bpf_stg_diag;
+ }
+
+ cb->data = cb_data;
+ return 0;
+}
+
+static int inet_diag_dump_start(struct netlink_callback *cb)
+{
+ return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req_v2));
+}
+
+static int inet_diag_dump_start_compat(struct netlink_callback *cb)
+{
+ return __inet_diag_dump_start(cb, sizeof(struct inet_diag_req));
+}
- return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc);
+static int inet_diag_dump_done(struct netlink_callback *cb)
+{
+ struct inet_diag_dump_data *cb_data = cb->data;
+
+ bpf_sk_storage_diag_free(cb_data->bpf_stg_diag);
+ kfree(cb->data);
+
+ return 0;
}
static int inet_diag_type2proto(int type)
@@ -1081,9 +1175,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct inet_diag_req *rc = nlmsg_data(cb->nlh);
- int hdrlen = sizeof(struct inet_diag_req);
struct inet_diag_req_v2 req;
- struct nlattr *bc = NULL;
req.sdiag_family = AF_UNSPEC; /* compatibility */
req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
@@ -1091,10 +1183,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb,
req.idiag_states = rc->idiag_states;
req.id = rc->id;
- if (nlmsg_attrlen(cb->nlh, hdrlen))
- bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
-
- return __inet_diag_dump(skb, cb, &req, bc);
+ return __inet_diag_dump(skb, cb, &req);
}
static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
@@ -1122,22 +1211,12 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EINVAL;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
- if (nlmsg_attrlen(nlh, hdrlen)) {
- struct nlattr *attr;
- int err;
-
- attr = nlmsg_find_attr(nlh, hdrlen,
- INET_DIAG_REQ_BYTECODE);
- err = inet_diag_bc_audit(attr, skb);
- if (err)
- return err;
- }
- {
- struct netlink_dump_control c = {
- .dump = inet_diag_dump_compat,
- };
- return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
- }
+ struct netlink_dump_control c = {
+ .start = inet_diag_dump_start_compat,
+ .done = inet_diag_dump_done,
+ .dump = inet_diag_dump_compat,
+ };
+ return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
}
return inet_diag_get_exact_compat(skb, nlh);
@@ -1153,22 +1232,12 @@ static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h)
if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY &&
h->nlmsg_flags & NLM_F_DUMP) {
- if (nlmsg_attrlen(h, hdrlen)) {
- struct nlattr *attr;
- int err;
-
- attr = nlmsg_find_attr(h, hdrlen,
- INET_DIAG_REQ_BYTECODE);
- err = inet_diag_bc_audit(attr, skb);
- if (err)
- return err;
- }
- {
- struct netlink_dump_control c = {
- .dump = inet_diag_dump,
- };
- return netlink_dump_start(net->diag_nlsk, skb, h, &c);
- }
+ struct netlink_dump_control c = {
+ .start = inet_diag_dump_start,
+ .done = inet_diag_dump_done,
+ .dump = inet_diag_dump,
+ };
+ return netlink_dump_start(net->diag_nlsk, skb, h, &c);
}
return inet_diag_cmd_exact(h->nlmsg_type, skb, h, nlmsg_data(h));
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index aa438c6758a7..b0c244af1e4d 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -509,7 +509,8 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
IPCB(skb)->iif = skb->skb_iif;
/* Must drop socket now because of tproxy. */
- skb_orphan(skb);
+ if (!skb_sk_is_prefetched(skb))
+ skb_orphan(skb);
return skb;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d84819893db9..090d3097ee15 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -263,7 +263,7 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
* insufficent MTU.
*/
features = netif_skb_features(skb);
- BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
+ BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs)) {
kfree_skb(skb);
@@ -333,7 +333,7 @@ static int ip_mc_finish_output(struct net *net, struct sock *sk,
switch (ret) {
case NET_XMIT_CN:
do_cn = true;
- /* fall through */
+ fallthrough;
case NET_XMIT_SUCCESS:
break;
default:
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 74e1d964a615..cd4b84310d92 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -142,11 +142,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
cand = t;
}
- if (flags & TUNNEL_NO_KEY)
- goto skip_key_lookup;
-
hlist_for_each_entry_rcu(t, head, hash_node) {
- if (t->parms.i_key != key ||
+ if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) ||
t->parms.iph.saddr != 0 ||
t->parms.iph.daddr != 0 ||
!(t->dev->flags & IFF_UP))
@@ -158,7 +155,6 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
cand = t;
}
-skip_key_lookup:
if (cand)
return cand;
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 47f8b947eef1..181b7a2a0247 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -432,7 +432,7 @@ static int ip_tun_set_opts(struct nlattr *attr, struct ip_tunnel_info *info,
return ip_tun_parse_opts(attr, info, extack);
}
-static int ip_tun_build_state(struct nlattr *attr,
+static int ip_tun_build_state(struct net *net, struct nlattr *attr,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts,
struct netlink_ext_ack *extack)
@@ -719,7 +719,7 @@ static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
[LWTUNNEL_IP6_OPTS] = { .type = NLA_NESTED },
};
-static int ip6_tun_build_state(struct nlattr *attr,
+static int ip6_tun_build_state(struct net *net, struct nlattr *attr,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts,
struct netlink_ext_ack *extack)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 6e68def66822..9cf83cc85e4a 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1465,7 +1465,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
case MRT_ADD_MFC:
case MRT_DEL_MFC:
parent = -1;
- /* fall through */
+ fallthrough;
case MRT_ADD_MFC_PROXY:
case MRT_DEL_MFC_PROXY:
if (optlen != sizeof(mfc)) {
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index f1f78a742b36..b167f4a5b684 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1057,7 +1057,7 @@ struct compat_arpt_replace {
u32 underflow[NF_ARP_NUMHOOKS];
u32 num_counters;
compat_uptr_t counters;
- struct compat_arpt_entry entries[0];
+ struct compat_arpt_entry entries[];
};
static inline void compat_release_entry(struct compat_arpt_entry *e)
@@ -1383,7 +1383,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
struct compat_arpt_get_entries {
char name[XT_TABLE_MAXNAMELEN];
compat_uint_t size;
- struct compat_arpt_entry entrytable[0];
+ struct compat_arpt_entry entrytable[];
};
static int compat_get_entries(struct net *net,
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 10b91ebdf213..c2670eaa74e6 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1211,7 +1211,7 @@ struct compat_ipt_replace {
u32 underflow[NF_INET_NUMHOOKS];
u32 num_counters;
compat_uptr_t counters; /* struct xt_counters * */
- struct compat_ipt_entry entries[0];
+ struct compat_ipt_entry entries[];
};
static int
@@ -1562,7 +1562,7 @@ compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
struct compat_ipt_get_entries {
char name[XT_TABLE_MAXNAMELEN];
compat_uint_t size;
- struct compat_ipt_entry entrytable[0];
+ struct compat_ipt_entry entrytable[];
};
static int
diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
index 4b2d49cc9f1a..0c72156130b6 100644
--- a/net/ipv4/netfilter/nf_log_ipv4.c
+++ b/net/ipv4/netfilter/nf_log_ipv4.c
@@ -173,7 +173,7 @@ static void dump_ipv4_packet(struct net *net, struct nf_log_buf *m,
case ICMP_REDIRECT:
/* Max length: 24 "GATEWAY=255.255.255.255 " */
nf_log_buf_add(m, "GATEWAY=%pI4 ", &ich->un.gateway);
- /* Fall through */
+ fallthrough;
case ICMP_DEST_UNREACH:
case ICMP_SOURCE_QUENCH:
case ICMP_TIME_EXCEEDED:
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index b2aeb7bf5dac..3c25a467b3ef 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -168,7 +168,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
pr_debug("unknown outbound packet 0x%04x:%s\n", msg,
msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
pptp_msg_name[0]);
- /* fall through */
+ fallthrough;
case PPTP_SET_LINK_INFO:
/* only need to NAT in case PAC is behind NAT box */
case PPTP_START_SESSION_REQUEST:
@@ -271,7 +271,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
pr_debug("unknown inbound packet %s\n",
msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
pptp_msg_name[0]);
- /* fall through */
+ fallthrough;
case PPTP_START_SESSION_REQUEST:
case PPTP_START_SESSION_REPLY:
case PPTP_STOP_SESSION_REQUEST:
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index d072c326dd64..fdfca534d094 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -1327,7 +1327,7 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
case AF_UNSPEC:
if (tb[NHA_GROUP])
break;
- /* fallthrough */
+ fallthrough;
default:
NL_SET_ERR_MSG(extack, "Invalid address family");
goto out;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 2580303249e2..75545a829a2b 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -32,6 +32,7 @@
#include <net/icmp.h>
#include <net/protocol.h>
#include <net/tcp.h>
+#include <net/mptcp.h>
#include <net/udp.h>
#include <net/udplite.h>
#include <linux/bottom_half.h>
@@ -485,6 +486,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
offsetof(struct ipstats_mib, syncp)));
seq_putc(seq, '\n');
+ mptcp_seq_show(seq);
return 0;
}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 3183413ebc6c..47665919048f 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -1034,6 +1034,7 @@ static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos)
}
void *raw_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(&h->lock)
{
struct raw_hashinfo *h = PDE_DATA(file_inode(seq->file));
@@ -1056,6 +1057,7 @@ void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
EXPORT_SYMBOL_GPL(raw_seq_next);
void raw_seq_stop(struct seq_file *seq, void *v)
+ __releases(&h->lock)
{
struct raw_hashinfo *h = PDE_DATA(file_inode(seq->file));
diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c
index a93e7d1e1251..1b5b8af27aaf 100644
--- a/net/ipv4/raw_diag.c
+++ b/net/ipv4/raw_diag.c
@@ -87,15 +87,16 @@ out_unlock:
return sk ? sk : ERR_PTR(-ENOENT);
}
-static int raw_diag_dump_one(struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
+static int raw_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *r)
{
- struct net *net = sock_net(in_skb->sk);
+ struct sk_buff *in_skb = cb->skb;
struct sk_buff *rep;
struct sock *sk;
+ struct net *net;
int err;
+ net = sock_net(in_skb->sk);
sk = raw_sock_get(net, r);
if (IS_ERR(sk))
return PTR_ERR(sk);
@@ -109,10 +110,7 @@ static int raw_diag_dump_one(struct sk_buff *in_skb,
return -ENOMEM;
}
- err = inet_sk_diag_fill(sk, NULL, rep, r,
- sk_user_ns(NETLINK_CB(in_skb).sk),
- NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0, nlh,
+ err = inet_sk_diag_fill(sk, NULL, rep, cb, r, 0,
netlink_net_capable(in_skb, CAP_NET_ADMIN));
sock_put(sk);
@@ -137,25 +135,25 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
if (!inet_diag_bc_sk(bc, sk))
return 0;
- return inet_sk_diag_fill(sk, NULL, skb, r,
- sk_user_ns(NETLINK_CB(cb->skb).sk),
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
- cb->nlh, net_admin);
+ return inet_sk_diag_fill(sk, NULL, skb, cb, r, NLM_F_MULTI, net_admin);
}
static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
struct net *net = sock_net(skb->sk);
+ struct inet_diag_dump_data *cb_data;
int num, s_num, slot, s_slot;
struct sock *sk = NULL;
+ struct nlattr *bc;
if (IS_ERR(hashinfo))
return;
+ cb_data = cb->data;
+ bc = cb_data->inet_diag_nla_bc;
s_slot = cb->args[0];
num = s_num = cb->args[1];
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ebe7060d0fc9..788c69d9bfe0 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1621,12 +1621,11 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
struct rtable *rt_dst_alloc(struct net_device *dev,
unsigned int flags, u16 type,
- bool nopolicy, bool noxfrm, bool will_cache)
+ bool nopolicy, bool noxfrm)
{
struct rtable *rt;
rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
- (will_cache ? 0 : DST_HOST) |
(nopolicy ? DST_NOPOLICY : 0) |
(noxfrm ? DST_NOXFRM : 0));
@@ -1674,7 +1673,6 @@ struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
new_rt->rt_gw6 = rt->rt_gw6;
INIT_LIST_HEAD(&new_rt->rt_uncached);
- new_rt->dst.flags |= DST_HOST;
new_rt->dst.input = rt->dst.input;
new_rt->dst.output = rt->dst.output;
new_rt->dst.error = rt->dst.error;
@@ -1734,7 +1732,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
flags |= RTCF_LOCAL;
rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
- IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
+ IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
if (!rth)
return -ENOBUFS;
@@ -1851,7 +1849,7 @@ static int __mkroute_input(struct sk_buff *skb,
rth = rt_dst_alloc(out_dev->dev, 0, res->type,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
- IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
+ IN_DEV_CONF_GET(out_dev, NOXFRM));
if (!rth) {
err = -ENOBUFS;
goto cleanup;
@@ -2219,7 +2217,7 @@ local_input:
rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
flags | RTCF_LOCAL, res->type,
- IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
+ IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
if (!rth)
goto e_nobufs;
@@ -2443,8 +2441,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
add:
rth = rt_dst_alloc(dev_out, flags, type,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
- IN_DEV_CONF_GET(in_dev, NOXFRM),
- do_cache);
+ IN_DEV_CONF_GET(in_dev, NOXFRM));
if (!rth)
return ERR_PTR(-ENOBUFS);
@@ -2774,6 +2771,54 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
}
EXPORT_SYMBOL_GPL(ip_route_output_flow);
+struct rtable *ip_route_output_tunnel(struct sk_buff *skb,
+ struct net_device *dev,
+ struct net *net, __be32 *saddr,
+ const struct ip_tunnel_info *info,
+ u8 protocol, bool use_cache)
+{
+#ifdef CONFIG_DST_CACHE
+ struct dst_cache *dst_cache;
+#endif
+ struct rtable *rt = NULL;
+ struct flowi4 fl4;
+ __u8 tos;
+
+#ifdef CONFIG_DST_CACHE
+ dst_cache = (struct dst_cache *)&info->dst_cache;
+ if (use_cache) {
+ rt = dst_cache_get_ip4(dst_cache, saddr);
+ if (rt)
+ return rt;
+ }
+#endif
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.flowi4_mark = skb->mark;
+ fl4.flowi4_proto = protocol;
+ fl4.daddr = info->key.u.ipv4.dst;
+ fl4.saddr = info->key.u.ipv4.src;
+ tos = info->key.tos;
+ fl4.flowi4_tos = RT_TOS(tos);
+
+ rt = ip_route_output_key(net, &fl4);
+ if (IS_ERR(rt)) {
+ netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
+ return ERR_PTR(-ENETUNREACH);
+ }
+ if (rt->dst.dev == dev) { /* is this necessary? */
+ netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
+ ip_rt_put(rt);
+ return ERR_PTR(-ELOOP);
+ }
+#ifdef CONFIG_DST_CACHE
+ if (use_cache)
+ dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
+#endif
+ *saddr = fl4.saddr;
+ return rt;
+}
+EXPORT_SYMBOL_GPL(ip_route_output_tunnel);
+
/* called with rcu_read_lock held */
static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
struct rtable *rt, u32 table_id, struct flowi4 *fl4,
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 9684af02e0a5..81b267e990a1 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -555,18 +555,6 @@ static struct ctl_table ipv4_table[] = {
},
#endif /* CONFIG_NETLABEL */
{
- .procname = "tcp_available_congestion_control",
- .maxlen = TCP_CA_BUF_MAX,
- .mode = 0444,
- .proc_handler = proc_tcp_available_congestion_control,
- },
- {
- .procname = "tcp_allowed_congestion_control",
- .maxlen = TCP_CA_BUF_MAX,
- .mode = 0644,
- .proc_handler = proc_allowed_congestion_control,
- },
- {
.procname = "tcp_available_ulp",
.maxlen = TCP_ULP_BUF_MAX,
.mode = 0444,
@@ -776,6 +764,15 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dointvec
},
{
+ .procname = "ip_autobind_reuse",
+ .data = &init_net.ipv4.sysctl_ip_autobind_reuse,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
.procname = "fwmark_reflect",
.data = &init_net.ipv4.sysctl_fwmark_reflect,
.maxlen = sizeof(int),
@@ -886,6 +883,18 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_tcp_congestion_control,
},
{
+ .procname = "tcp_available_congestion_control",
+ .maxlen = TCP_CA_BUF_MAX,
+ .mode = 0444,
+ .proc_handler = proc_tcp_available_congestion_control,
+ },
+ {
+ .procname = "tcp_allowed_congestion_control",
+ .maxlen = TCP_CA_BUF_MAX,
+ .mode = 0644,
+ .proc_handler = proc_allowed_congestion_control,
+ },
+ {
.procname = "tcp_keepalive_time",
.data = &init_net.ipv4.sysctl_tcp_keepalive_time,
.maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index dc77c303e6f7..6d87de434377 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2251,7 +2251,7 @@ void tcp_set_state(struct sock *sk, int state)
if (inet_csk(sk)->icsk_bind_hash &&
!(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
inet_put_port(sk);
- /* fall through */
+ fallthrough;
default:
if (oldstate == TCP_ESTABLISHED)
TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
@@ -3346,6 +3346,7 @@ static size_t tcp_opt_stats_get_size(void)
nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */
nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */
nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */
+ nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */
0;
}
@@ -3401,6 +3402,8 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen);
nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3);
nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash);
+ nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT,
+ max_t(int, 0, tp->write_seq - tp->snd_nxt));
return stats;
}
@@ -3669,13 +3672,35 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
if (get_user(len, optlen))
return -EFAULT;
- if (len != sizeof(zc))
+ if (len < offsetofend(struct tcp_zerocopy_receive, length))
return -EINVAL;
+ if (len > sizeof(zc)) {
+ len = sizeof(zc);
+ if (put_user(len, optlen))
+ return -EFAULT;
+ }
if (copy_from_user(&zc, optval, len))
return -EFAULT;
lock_sock(sk);
err = tcp_zerocopy_receive(sk, &zc);
release_sock(sk);
+ if (len == sizeof(zc))
+ goto zerocopy_rcv_sk_err;
+ switch (len) {
+ case offsetofend(struct tcp_zerocopy_receive, err):
+ goto zerocopy_rcv_sk_err;
+ case offsetofend(struct tcp_zerocopy_receive, inq):
+ goto zerocopy_rcv_inq;
+ case offsetofend(struct tcp_zerocopy_receive, length):
+ default:
+ goto zerocopy_rcv_out;
+ }
+zerocopy_rcv_sk_err:
+ if (!err)
+ zc.err = sock_error(sk);
+zerocopy_rcv_inq:
+ zc.inq = tcp_inq_hint(sk);
+zerocopy_rcv_out:
if (!err && copy_to_user(optval, &zc, len))
err = -EFAULT;
return err;
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 645cc3009e64..f5f588b1f6e9 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -145,12 +145,13 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!tcp_is_cwnd_limited(sk))
return;
- if (tcp_in_slow_start(tp))
- tcp_slow_start(tp, acked);
- else {
- bictcp_update(ca, tp->snd_cwnd);
- tcp_cong_avoid_ai(tp, ca->cnt, 1);
+ if (tcp_in_slow_start(tp)) {
+ acked = tcp_slow_start(tp, acked);
+ if (!acked)
+ return;
}
+ bictcp_update(ca, tp->snd_cwnd);
+ tcp_cong_avoid_ai(tp, ca->cnt, acked);
}
/*
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 8a01428f80c1..5a05327f97c1 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -10,38 +10,6 @@
#include <net/inet_common.h>
#include <net/tls.h>
-static bool tcp_bpf_stream_read(const struct sock *sk)
-{
- struct sk_psock *psock;
- bool empty = true;
-
- rcu_read_lock();
- psock = sk_psock(sk);
- if (likely(psock))
- empty = list_empty(&psock->ingress_msg);
- rcu_read_unlock();
- return !empty;
-}
-
-static int tcp_bpf_wait_data(struct sock *sk, struct sk_psock *psock,
- int flags, long timeo, int *err)
-{
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int ret = 0;
-
- if (!timeo)
- return ret;
-
- add_wait_queue(sk_sleep(sk), &wait);
- sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- ret = sk_wait_event(sk, &timeo,
- !list_empty(&psock->ingress_msg) ||
- !skb_queue_empty(&sk->sk_receive_queue), &wait);
- sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- remove_wait_queue(sk_sleep(sk), &wait);
- return ret;
-}
-
int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
struct msghdr *msg, int len, int flags)
{
@@ -115,49 +83,6 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
}
EXPORT_SYMBOL_GPL(__tcp_bpf_recvmsg);
-int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
-{
- struct sk_psock *psock;
- int copied, ret;
-
- psock = sk_psock_get(sk);
- if (unlikely(!psock))
- return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
- if (unlikely(flags & MSG_ERRQUEUE))
- return inet_recv_error(sk, msg, len, addr_len);
- if (!skb_queue_empty(&sk->sk_receive_queue) &&
- sk_psock_queue_empty(psock))
- return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
- lock_sock(sk);
-msg_bytes_ready:
- copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags);
- if (!copied) {
- int data, err = 0;
- long timeo;
-
- timeo = sock_rcvtimeo(sk, nonblock);
- data = tcp_bpf_wait_data(sk, psock, flags, timeo, &err);
- if (data) {
- if (!sk_psock_queue_empty(psock))
- goto msg_bytes_ready;
- release_sock(sk);
- sk_psock_put(sk, psock);
- return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
- }
- if (err) {
- ret = err;
- goto out;
- }
- copied = -EAGAIN;
- }
- ret = copied;
-out:
- release_sock(sk);
- sk_psock_put(sk, psock);
- return ret;
-}
-
static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
struct sk_msg *msg, u32 apply_bytes, int flags)
{
@@ -298,6 +223,82 @@ int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg,
}
EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir);
+#ifdef CONFIG_BPF_STREAM_PARSER
+static bool tcp_bpf_stream_read(const struct sock *sk)
+{
+ struct sk_psock *psock;
+ bool empty = true;
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (likely(psock))
+ empty = list_empty(&psock->ingress_msg);
+ rcu_read_unlock();
+ return !empty;
+}
+
+static int tcp_bpf_wait_data(struct sock *sk, struct sk_psock *psock,
+ int flags, long timeo, int *err)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int ret = 0;
+
+ if (!timeo)
+ return ret;
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ ret = sk_wait_event(sk, &timeo,
+ !list_empty(&psock->ingress_msg) ||
+ !skb_queue_empty(&sk->sk_receive_queue), &wait);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return ret;
+}
+
+static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int nonblock, int flags, int *addr_len)
+{
+ struct sk_psock *psock;
+ int copied, ret;
+
+ psock = sk_psock_get(sk);
+ if (unlikely(!psock))
+ return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+ if (unlikely(flags & MSG_ERRQUEUE))
+ return inet_recv_error(sk, msg, len, addr_len);
+ if (!skb_queue_empty(&sk->sk_receive_queue) &&
+ sk_psock_queue_empty(psock))
+ return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+ lock_sock(sk);
+msg_bytes_ready:
+ copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags);
+ if (!copied) {
+ int data, err = 0;
+ long timeo;
+
+ timeo = sock_rcvtimeo(sk, nonblock);
+ data = tcp_bpf_wait_data(sk, psock, flags, timeo, &err);
+ if (data) {
+ if (!sk_psock_queue_empty(psock))
+ goto msg_bytes_ready;
+ release_sock(sk);
+ sk_psock_put(sk, psock);
+ return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+ }
+ if (err) {
+ ret = err;
+ goto out;
+ }
+ copied = -EAGAIN;
+ }
+ ret = copied;
+out:
+ release_sock(sk);
+ sk_psock_put(sk, psock);
+ return ret;
+}
+
static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
struct sk_msg *msg, int *copied, int flags)
{
@@ -528,57 +529,6 @@ out_err:
return copied ? copied : err;
}
-static void tcp_bpf_remove(struct sock *sk, struct sk_psock *psock)
-{
- struct sk_psock_link *link;
-
- while ((link = sk_psock_link_pop(psock))) {
- sk_psock_unlink(sk, link);
- sk_psock_free_link(link);
- }
-}
-
-static void tcp_bpf_unhash(struct sock *sk)
-{
- void (*saved_unhash)(struct sock *sk);
- struct sk_psock *psock;
-
- rcu_read_lock();
- psock = sk_psock(sk);
- if (unlikely(!psock)) {
- rcu_read_unlock();
- if (sk->sk_prot->unhash)
- sk->sk_prot->unhash(sk);
- return;
- }
-
- saved_unhash = psock->saved_unhash;
- tcp_bpf_remove(sk, psock);
- rcu_read_unlock();
- saved_unhash(sk);
-}
-
-static void tcp_bpf_close(struct sock *sk, long timeout)
-{
- void (*saved_close)(struct sock *sk, long timeout);
- struct sk_psock *psock;
-
- lock_sock(sk);
- rcu_read_lock();
- psock = sk_psock(sk);
- if (unlikely(!psock)) {
- rcu_read_unlock();
- release_sock(sk);
- return sk->sk_prot->close(sk, timeout);
- }
-
- saved_close = psock->saved_close;
- tcp_bpf_remove(sk, psock);
- rcu_read_unlock();
- release_sock(sk);
- saved_close(sk, timeout);
-}
-
enum {
TCP_BPF_IPV4,
TCP_BPF_IPV6,
@@ -599,8 +549,8 @@ static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
struct proto *base)
{
prot[TCP_BPF_BASE] = *base;
- prot[TCP_BPF_BASE].unhash = tcp_bpf_unhash;
- prot[TCP_BPF_BASE].close = tcp_bpf_close;
+ prot[TCP_BPF_BASE].unhash = sock_map_unhash;
+ prot[TCP_BPF_BASE].close = sock_map_close;
prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg;
prot[TCP_BPF_BASE].stream_memory_read = tcp_bpf_stream_read;
@@ -629,26 +579,6 @@ static int __init tcp_bpf_v4_build_proto(void)
}
core_initcall(tcp_bpf_v4_build_proto);
-static void tcp_bpf_update_sk_prot(struct sock *sk, struct sk_psock *psock)
-{
- int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
- int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE;
-
- sk_psock_update_proto(sk, psock, &tcp_bpf_prots[family][config]);
-}
-
-static void tcp_bpf_reinit_sk_prot(struct sock *sk, struct sk_psock *psock)
-{
- int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
- int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE;
-
- /* Reinit occurs when program types change e.g. TCP_BPF_TX is removed
- * or added requiring sk_prot hook updates. We keep original saved
- * hooks in this case.
- */
- sk->sk_prot = &tcp_bpf_prots[family][config];
-}
-
static int tcp_bpf_assert_proto_ops(struct proto *ops)
{
/* In order to avoid retpoline, we make assumptions when we call
@@ -660,34 +590,34 @@ static int tcp_bpf_assert_proto_ops(struct proto *ops)
ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP;
}
-void tcp_bpf_reinit(struct sock *sk)
+struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock)
{
- struct sk_psock *psock;
+ int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
+ int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE;
- sock_owned_by_me(sk);
+ if (!psock->sk_proto) {
+ struct proto *ops = READ_ONCE(sk->sk_prot);
- rcu_read_lock();
- psock = sk_psock(sk);
- tcp_bpf_reinit_sk_prot(sk, psock);
- rcu_read_unlock();
+ if (tcp_bpf_assert_proto_ops(ops))
+ return ERR_PTR(-EINVAL);
+
+ tcp_bpf_check_v6_needs_rebuild(sk, ops);
+ }
+
+ return &tcp_bpf_prots[family][config];
}
-int tcp_bpf_init(struct sock *sk)
+/* If a child got cloned from a listening socket that had tcp_bpf
+ * protocol callbacks installed, we need to restore the callbacks to
+ * the default ones because the child does not inherit the psock state
+ * that tcp_bpf callbacks expect.
+ */
+void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
{
- struct proto *ops = READ_ONCE(sk->sk_prot);
- struct sk_psock *psock;
-
- sock_owned_by_me(sk);
+ int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
+ struct proto *prot = newsk->sk_prot;
- rcu_read_lock();
- psock = sk_psock(sk);
- if (unlikely(!psock || psock->sk_proto ||
- tcp_bpf_assert_proto_ops(ops))) {
- rcu_read_unlock();
- return -EINVAL;
- }
- tcp_bpf_check_v6_needs_rebuild(sk, ops);
- tcp_bpf_update_sk_prot(sk, psock);
- rcu_read_unlock();
- return 0;
+ if (prot == &tcp_bpf_prots[family][TCP_BPF_BASE])
+ newsk->sk_prot = sk->sk_prot_creator;
}
+#endif /* CONFIG_BPF_STREAM_PARSER */
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 0d08f9e2d8d0..75a1c985f49a 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -179,15 +179,15 @@ static size_t tcp_diag_get_aux_size(struct sock *sk, bool net_admin)
}
static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
- inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc);
+ inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r);
}
-static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+static int tcp_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
- return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req);
+ return inet_diag_dump_one_icsk(&tcp_hashinfo, cb, req);
}
#ifdef CONFIG_INET_DIAG_DESTROY
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 6b6b57000dad..bf4ced9273e8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2865,7 +2865,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
(*ack_flag & FLAG_LOST_RETRANS)))
return;
/* Change state if cwnd is undone or retransmits are lost */
- /* fall through */
+ fallthrough;
default:
if (tcp_is_reno(tp)) {
if (flag & FLAG_SND_UNA_ADVANCED)
@@ -6367,7 +6367,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
mptcp_incoming_options(sk, skb, &tp->rx_opt);
break;
}
- /* fall through */
+ fallthrough;
case TCP_FIN_WAIT1:
case TCP_FIN_WAIT2:
/* RFC 793 says to queue data in these states,
@@ -6382,7 +6382,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
return 1;
}
}
- /* Fall through */
+ fallthrough;
case TCP_ESTABLISHED:
tcp_data_queue(sk, skb);
queued = 1;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index df1166b76126..83a5d24e13b8 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1019,7 +1019,8 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
if (!md5sig)
return NULL;
- hlist_for_each_entry_rcu(key, &md5sig->head, node) {
+ hlist_for_each_entry_rcu(key, &md5sig->head, node,
+ lockdep_sock_is_held(sk)) {
if (key->family != family)
continue;
if (key->l3index && key->l3index != l3index)
@@ -1064,7 +1065,8 @@ static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
if (family == AF_INET6)
size = sizeof(struct in6_addr);
#endif
- hlist_for_each_entry_rcu(key, &md5sig->head, node) {
+ hlist_for_each_entry_rcu(key, &md5sig->head, node,
+ lockdep_sock_is_held(sk)) {
if (key->family != family)
continue;
if (key->l3index && key->l3index != l3index)
@@ -2070,7 +2072,7 @@ do_time_wait:
}
}
/* to ACK */
- /* fall through */
+ fallthrough;
case TCP_TW_ACK:
tcp_v4_timewait_ack(sk, skb);
break;
@@ -2366,7 +2368,7 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
break;
st->bucket = 0;
st->state = TCP_SEQ_STATE_ESTABLISHED;
- /* Fallthrough */
+ fallthrough;
case TCP_SEQ_STATE_ESTABLISHED:
if (st->bucket > tcp_hashinfo.ehash_mask)
break;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index ad3b56d9fa71..7e40322cc5ec 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -548,6 +548,8 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->fastopen_req = NULL;
RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
+ tcp_bpf_clone(sk, newsk);
+
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
return newsk;
@@ -772,6 +774,12 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
if (!child)
goto listen_overflow;
+ if (own_req && sk_is_mptcp(child) && mptcp_sk_is_subflow(child)) {
+ reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
+ inet_csk_reqsk_queue_drop_and_put(sk, req);
+ return child;
+ }
+
sock_rps_save_rxhash(child, skb);
tcp_synack_rtt_meas(child, req);
*req_stolen = !own_req;
@@ -817,6 +825,7 @@ EXPORT_SYMBOL(tcp_check_req);
int tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb)
+ __releases(&((child)->sk_lock.slock))
{
int ret = 0;
int state = child->sk_state;
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 471571e1ab26..6cebf412d590 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -10,10 +10,9 @@
#include <net/tcp.h>
/* These factors derived from the recommended values in the aer:
- * .01 and and 7/8. We use 50 instead of 100 to account for
- * delayed ack.
+ * .01 and and 7/8.
*/
-#define TCP_SCALABLE_AI_CNT 50U
+#define TCP_SCALABLE_AI_CNT 100U
#define TCP_SCALABLE_MD_SCALE 3
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
@@ -23,11 +22,13 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!tcp_is_cwnd_limited(sk))
return;
- if (tcp_in_slow_start(tp))
- tcp_slow_start(tp, acked);
- else
- tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
- 1);
+ if (tcp_in_slow_start(tp)) {
+ acked = tcp_slow_start(tp, acked);
+ if (!acked)
+ return;
+ }
+ tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
+ acked);
}
static u32 tcp_scalable_ssthresh(struct sock *sk)
diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
index 38d3ad141161..7c27aa629af1 100644
--- a/net/ipv4/tcp_ulp.c
+++ b/net/ipv4/tcp_ulp.c
@@ -22,7 +22,8 @@ static struct tcp_ulp_ops *tcp_ulp_find(const char *name)
{
struct tcp_ulp_ops *e;
- list_for_each_entry_rcu(e, &tcp_ulp_list, list) {
+ list_for_each_entry_rcu(e, &tcp_ulp_list, list,
+ lockdep_is_held(&tcp_ulp_list_lock)) {
if (strcmp(e->name, name) == 0)
return e;
}
@@ -104,12 +105,6 @@ void tcp_update_ulp(struct sock *sk, struct proto *proto,
{
struct inet_connection_sock *icsk = inet_csk(sk);
- if (!icsk->icsk_ulp_ops) {
- sk->sk_write_space = write_space;
- sk->sk_prot = proto;
- return;
- }
-
if (icsk->icsk_ulp_ops->update)
icsk->icsk_ulp_ops->update(sk, proto, write_space);
}
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 3b36bb1a0dda..50a9a6e2c4cd 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -153,31 +153,34 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
if (tcp_in_slow_start(tp)) {
- /* Slow start. */
- tcp_slow_start(tp, acked);
+ /* Slow start. */
+ acked = tcp_slow_start(tp, acked);
+ if (!acked)
+ goto done;
+ }
+
+ /* Congestion avoidance. */
+ if (veno->diff < beta) {
+ /* In the "non-congestive state", increase cwnd
+ * every rtt.
+ */
+ tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
} else {
- /* Congestion avoidance. */
- if (veno->diff < beta) {
- /* In the "non-congestive state", increase cwnd
- * every rtt.
- */
- tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
- } else {
- /* In the "congestive state", increase cwnd
- * every other rtt.
- */
- if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
- if (veno->inc &&
- tp->snd_cwnd < tp->snd_cwnd_clamp) {
- tp->snd_cwnd++;
- veno->inc = 0;
- } else
- veno->inc = 1;
- tp->snd_cwnd_cnt = 0;
+ /* In the "congestive state", increase cwnd
+ * every other rtt.
+ */
+ if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
+ if (veno->inc &&
+ tp->snd_cwnd < tp->snd_cwnd_clamp) {
+ tp->snd_cwnd++;
+ veno->inc = 0;
} else
- tp->snd_cwnd_cnt++;
- }
+ veno->inc = 1;
+ tp->snd_cwnd_cnt = 0;
+ } else
+ tp->snd_cwnd_cnt += acked;
}
+done:
if (tp->snd_cwnd < 2)
tp->snd_cwnd = 2;
else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index e00570dd0a69..3bb448761ca3 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -36,8 +36,6 @@ struct yeah {
u32 reno_count;
u32 fast_count;
-
- u32 pkts_acked;
};
static void tcp_yeah_init(struct sock *sk)
@@ -57,18 +55,6 @@ static void tcp_yeah_init(struct sock *sk)
tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
}
-static void tcp_yeah_pkts_acked(struct sock *sk,
- const struct ack_sample *sample)
-{
- const struct inet_connection_sock *icsk = inet_csk(sk);
- struct yeah *yeah = inet_csk_ca(sk);
-
- if (icsk->icsk_ca_state == TCP_CA_Open)
- yeah->pkts_acked = sample->pkts_acked;
-
- tcp_vegas_pkts_acked(sk, sample);
-}
-
static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -77,24 +63,19 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!tcp_is_cwnd_limited(sk))
return;
- if (tcp_in_slow_start(tp))
- tcp_slow_start(tp, acked);
+ if (tcp_in_slow_start(tp)) {
+ acked = tcp_slow_start(tp, acked);
+ if (!acked)
+ goto do_vegas;
+ }
- else if (!yeah->doing_reno_now) {
+ if (!yeah->doing_reno_now) {
/* Scalable */
-
- tp->snd_cwnd_cnt += yeah->pkts_acked;
- if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)) {
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
- tp->snd_cwnd_cnt = 0;
- }
-
- yeah->pkts_acked = 1;
-
+ tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
+ acked);
} else {
/* Reno */
- tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
+ tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
}
/* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
@@ -118,7 +99,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
* of bytes we send in an RTT is often less than our cwnd will allow.
* So we keep track of our cwnd separately, in v_beg_snd_cwnd.
*/
-
+do_vegas:
if (after(ack, yeah->vegas.beg_snd_nxt)) {
/* We do the Vegas calculations only if we got enough RTT
* samples that we can be reasonably sure that we got
@@ -232,7 +213,7 @@ static struct tcp_congestion_ops tcp_yeah __read_mostly = {
.set_state = tcp_vegas_state,
.cwnd_event = tcp_vegas_cwnd_event,
.get_info = tcp_vegas_get_info,
- .pkts_acked = tcp_yeah_pkts_acked,
+ .pkts_acked = tcp_vegas_pkts_acked,
.owner = THIS_MODULE,
.name = "yeah",
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 08a41f1e1cd2..32564b350823 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1671,10 +1671,11 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
error = -EAGAIN;
do {
spin_lock_bh(&queue->lock);
- skb = __skb_try_recv_from_queue(sk, queue, flags,
- udp_skb_destructor,
- off, err, &last);
+ skb = __skb_try_recv_from_queue(sk, queue, flags, off,
+ err, &last);
if (skb) {
+ if (!(flags & MSG_PEEK))
+ udp_skb_destructor(sk, skb);
spin_unlock_bh(&queue->lock);
return skb;
}
@@ -1692,9 +1693,10 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
spin_lock(&sk_queue->lock);
skb_queue_splice_tail_init(sk_queue, queue);
- skb = __skb_try_recv_from_queue(sk, queue, flags,
- udp_skb_dtor_locked,
- off, err, &last);
+ skb = __skb_try_recv_from_queue(sk, queue, flags, off,
+ err, &last);
+ if (skb && !(flags & MSG_PEEK))
+ udp_skb_dtor_locked(sk, skb);
spin_unlock(&sk_queue->lock);
spin_unlock_bh(&queue->lock);
if (skb)
@@ -2107,7 +2109,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (likely(!udp_unexpected_gso(sk, skb)))
return udp_queue_rcv_one_skb(sk, skb);
- BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_SGO_CB_OFFSET);
+ BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_GSO_CB_OFFSET);
__skb_push(skb, -skb_mac_offset(skb));
segs = udp_rcv_segment(sk, skb, true);
skb_list_walk_safe(segs, skb, next) {
@@ -2286,6 +2288,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
struct rtable *rt = skb_rtable(skb);
__be32 saddr, daddr;
struct net *net = dev_net(skb->dev);
+ bool refcounted;
/*
* Validate the packet.
@@ -2311,7 +2314,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (udp4_csum_init(skb, uh, proto))
goto csum_error;
- sk = skb_steal_sock(skb);
+ sk = skb_steal_sock(skb, &refcounted);
if (sk) {
struct dst_entry *dst = skb_dst(skb);
int ret;
@@ -2320,7 +2323,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
udp_sk_rx_dst_set(sk, dst);
ret = udp_unicast_rcv_skb(sk, skb, uh);
- sock_put(sk);
+ if (refcounted)
+ sock_put(sk);
return ret;
}
@@ -2561,7 +2565,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
case UDP_ENCAP_ESPINUDP_NON_IKE:
up->encap_rcv = xfrm4_udp_encap_rcv;
#endif
- /* FALLTHROUGH */
+ fallthrough;
case UDP_ENCAP_L2TPINUDP:
up->encap_type = val;
lock_sock(sk);
diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c
new file mode 100644
index 000000000000..eddd973e6575
--- /dev/null
+++ b/net/ipv4/udp_bpf.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Cloudflare Ltd https://cloudflare.com */
+
+#include <linux/skmsg.h>
+#include <net/sock.h>
+#include <net/udp.h>
+
+enum {
+ UDP_BPF_IPV4,
+ UDP_BPF_IPV6,
+ UDP_BPF_NUM_PROTS,
+};
+
+static struct proto *udpv6_prot_saved __read_mostly;
+static DEFINE_SPINLOCK(udpv6_prot_lock);
+static struct proto udp_bpf_prots[UDP_BPF_NUM_PROTS];
+
+static void udp_bpf_rebuild_protos(struct proto *prot, const struct proto *base)
+{
+ *prot = *base;
+ prot->unhash = sock_map_unhash;
+ prot->close = sock_map_close;
+}
+
+static void udp_bpf_check_v6_needs_rebuild(struct sock *sk, struct proto *ops)
+{
+ if (sk->sk_family == AF_INET6 &&
+ unlikely(ops != smp_load_acquire(&udpv6_prot_saved))) {
+ spin_lock_bh(&udpv6_prot_lock);
+ if (likely(ops != udpv6_prot_saved)) {
+ udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV6], ops);
+ smp_store_release(&udpv6_prot_saved, ops);
+ }
+ spin_unlock_bh(&udpv6_prot_lock);
+ }
+}
+
+static int __init udp_bpf_v4_build_proto(void)
+{
+ udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV4], &udp_prot);
+ return 0;
+}
+core_initcall(udp_bpf_v4_build_proto);
+
+struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock)
+{
+ int family = sk->sk_family == AF_INET ? UDP_BPF_IPV4 : UDP_BPF_IPV6;
+
+ if (!psock->sk_proto)
+ udp_bpf_check_v6_needs_rebuild(sk, READ_ONCE(sk->sk_prot));
+
+ return &udp_bpf_prots[family];
+}
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index dccd2286bc28..1dbece34496e 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -21,16 +21,15 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
if (!inet_diag_bc_sk(bc, sk))
return 0;
- return inet_sk_diag_fill(sk, NULL, skb, req,
- sk_user_ns(NETLINK_CB(cb->skb).sk),
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, net_admin);
+ return inet_sk_diag_fill(sk, NULL, skb, cb, req, NLM_F_MULTI,
+ net_admin);
}
-static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
+static int udp_dump_one(struct udp_table *tbl,
+ struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
+ struct sk_buff *in_skb = cb->skb;
int err = -EINVAL;
struct sock *sk = NULL;
struct sk_buff *rep;
@@ -71,11 +70,8 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
if (!rep)
goto out;
- err = inet_sk_diag_fill(sk, NULL, rep, req,
- sk_user_ns(NETLINK_CB(in_skb).sk),
- NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0, nlh,
- netlink_net_capable(in_skb, CAP_NET_ADMIN));
+ err = inet_sk_diag_fill(sk, NULL, rep, cb, req, 0,
+ netlink_net_capable(in_skb, CAP_NET_ADMIN));
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
kfree_skb(rep);
@@ -94,12 +90,16 @@ out_nosk:
static void udp_dump(struct udp_table *table, struct sk_buff *skb,
struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
struct net *net = sock_net(skb->sk);
+ struct inet_diag_dump_data *cb_data;
int num, s_num, slot, s_slot;
+ struct nlattr *bc;
+ cb_data = cb->data;
+ bc = cb_data->inet_diag_nla_bc;
s_slot = cb->args[0];
num = s_num = cb->args[1];
@@ -147,15 +147,15 @@ done:
}
static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
- udp_dump(&udp_table, skb, cb, r, bc);
+ udp_dump(&udp_table, skb, cb, r);
}
-static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+static int udp_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
- return udp_dump_one(&udp_table, in_skb, nlh, req);
+ return udp_dump_one(&udp_table, cb, req);
}
static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
@@ -250,16 +250,15 @@ static const struct inet_diag_handler udp_diag_handler = {
};
static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r,
- struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
- udp_dump(&udplite_table, skb, cb, r, bc);
+ udp_dump(&udplite_table, skb, cb, r);
}
-static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+static int udplite_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
- return udp_dump_one(&udplite_table, in_skb, nlh, req);
+ return udp_dump_one(&udplite_table, cb, req);
}
static const struct inet_diag_handler udplite_diag_handler = {
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 1a98583a79f4..e67a66fbf27b 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -453,6 +453,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
unsigned int off = skb_gro_offset(skb);
int flush = 1;
+ NAPI_GRO_CB(skb)->is_flist = 0;
if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled: 1;
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index ae1344e4cec5..2ccaee98fddb 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -303,4 +303,14 @@ config IPV6_SEG6_BPF
depends on IPV6_SEG6_LWTUNNEL
depends on IPV6 = y
+config IPV6_RPL_LWTUNNEL
+ bool "IPv6: RPL Source Routing Header support"
+ depends on IPV6
+ select LWTUNNEL
+ ---help---
+ Support for RFC6554 RPL Source Routing Header using the lightweight
+ tunnels mechanism.
+
+ If unsure, say N.
+
endif # IPV6
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 8ccf35514015..cf7b47bdb9b3 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -10,7 +10,7 @@ ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \
raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o ping.o \
exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o \
- udp_offload.o seg6.o fib6_notifier.o
+ udp_offload.o seg6.o fib6_notifier.o rpl.o
ipv6-offload := ip6_offload.o tcpv6_offload.o exthdrs_offload.o
@@ -26,6 +26,7 @@ ipv6-$(CONFIG_SYN_COOKIES) += syncookies.o
ipv6-$(CONFIG_NETLABEL) += calipso.o
ipv6-$(CONFIG_IPV6_SEG6_LWTUNNEL) += seg6_iptunnel.o seg6_local.o
ipv6-$(CONFIG_IPV6_SEG6_HMAC) += seg6_hmac.o
+ipv6-$(CONFIG_IPV6_RPL_LWTUNNEL) += rpl_iptunnel.o
ipv6-objs += $(ipv6-y)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 46d614b611db..a11fd4d67832 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -236,6 +236,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.enhanced_dad = 1,
.addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
.disable_policy = 0,
+ .rpl_seg_enabled = 0,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -290,6 +291,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.enhanced_dad = 1,
.addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
.disable_policy = 0,
+ .rpl_seg_enabled = 0,
};
/* Check if link is ready: is it up and is a valid qdisc available */
@@ -3301,7 +3303,7 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
switch (idev->cnf.addr_gen_mode) {
case IN6_ADDR_GEN_MODE_RANDOM:
ipv6_gen_mode_random_init(idev);
- /* fallthrough */
+ fallthrough;
case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
if (!ipv6_generate_stable_address(&addr, 0, idev))
addrconf_add_linklocal(idev, &addr,
@@ -3523,9 +3525,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
break;
run_pending = 1;
-
- /* fall through */
-
+ fallthrough;
case NETDEV_UP:
case NETDEV_CHANGE:
if (dev->flags & IFF_SLAVE)
@@ -4400,6 +4400,59 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
}
#endif
+/* RFC6554 has some algorithm to avoid loops in segment routing by
+ * checking if the segments contains any of a local interface address.
+ *
+ * Quote:
+ *
+ * To detect loops in the SRH, a router MUST determine if the SRH
+ * includes multiple addresses assigned to any interface on that router.
+ * If such addresses appear more than once and are separated by at least
+ * one address not assigned to that router.
+ */
+int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs,
+ unsigned char nsegs)
+{
+ const struct in6_addr *addr;
+ int i, ret = 0, found = 0;
+ struct inet6_ifaddr *ifp;
+ bool separated = false;
+ unsigned int hash;
+ bool hash_found;
+
+ rcu_read_lock();
+ for (i = 0; i < nsegs; i++) {
+ addr = &segs[i];
+ hash = inet6_addr_hash(net, addr);
+
+ hash_found = false;
+ hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
+ if (!net_eq(dev_net(ifp->idev->dev), net))
+ continue;
+
+ if (ipv6_addr_equal(&ifp->addr, addr)) {
+ hash_found = true;
+ break;
+ }
+ }
+
+ if (hash_found) {
+ if (found > 1 && separated) {
+ ret = 1;
+ break;
+ }
+
+ separated = false;
+ found++;
+ } else {
+ separated = true;
+ }
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
/*
* Periodic address status verification
*/
@@ -5469,6 +5522,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
array[DEVCONF_NDISC_TCLASS] = cnf->ndisc_tclass;
+ array[DEVCONF_RPL_SEG_ENABLED] = cnf->rpl_seg_enabled;
}
static inline size_t inet6_ifla6_size(void)
@@ -6850,6 +6904,13 @@ static const struct ctl_table addrconf_sysctl[] = {
.extra2 = (void *)&two_five_five,
},
{
+ .procname = "rpl_seg_enabled",
+ .data = &ipv6_devconf.rpl_seg_enabled,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
/* sentinel */
}
};
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index d727c3b41495..345baa0a754f 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -59,6 +59,7 @@
#endif
#include <net/calipso.h>
#include <net/seg6.h>
+#include <net/rpl.h>
#include <linux/uaccess.h>
#include <linux/mroute6.h>
@@ -1114,6 +1115,10 @@ static int __init inet6_init(void)
if (err)
goto seg6_fail;
+ err = rpl_init();
+ if (err)
+ goto rpl_fail;
+
err = igmp6_late_init();
if (err)
goto igmp6_late_err;
@@ -1136,6 +1141,8 @@ sysctl_fail:
igmp6_late_cleanup();
#endif
igmp6_late_err:
+ rpl_exit();
+rpl_fail:
seg6_exit();
seg6_fail:
calipso_exit();
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 95835e8d99aa..45e2adc56610 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -36,7 +36,7 @@ struct tmp_ext {
struct in6_addr saddr;
#endif
struct in6_addr daddr;
- char hdrs[0];
+ char hdrs[];
};
struct ah_skb_cb {
@@ -259,7 +259,7 @@ static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
case NEXTHDR_DEST:
if (dir == XFRM_POLICY_OUT)
ipv6_rearrange_destopt(iph, exthdr.opth);
- /* fall through */
+ fallthrough;
case NEXTHDR_HOP:
if (!zero_out_mutable_opts(exthdr.opth)) {
net_dbg_ratelimited("overrun %sopts\n",
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index a3b403ba8f8f..11143d039f16 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -207,22 +207,6 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err)
esp_output_done(base, err);
}
-static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
-{
- /* Fill padding... */
- if (tfclen) {
- memset(tail, 0, tfclen);
- tail += tfclen;
- }
- do {
- int i;
- for (i = 0; i < plen - 2; i++)
- tail[i] = i + 1;
- } while (0);
- tail[plen - 2] = plen - 2;
- tail[plen - 1] = proto;
-}
-
int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
{
u8 *tail;
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index fd535053245b..8eab2c869d61 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -159,6 +159,40 @@ static struct sk_buff *xfrm6_transport_gso_segment(struct xfrm_state *x,
return segs;
}
+static struct sk_buff *xfrm6_beet_gso_segment(struct xfrm_state *x,
+ struct sk_buff *skb,
+ netdev_features_t features)
+{
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ const struct net_offload *ops;
+ int proto = xo->proto;
+
+ skb->transport_header += x->props.header_len;
+
+ if (proto == IPPROTO_BEETPH) {
+ struct ip_beet_phdr *ph = (struct ip_beet_phdr *)skb->data;
+
+ skb->transport_header += ph->hdrlen * 8;
+ proto = ph->nexthdr;
+ }
+
+ if (x->sel.family != AF_INET6) {
+ skb->transport_header -=
+ (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
+
+ if (proto == IPPROTO_TCP)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
+ }
+
+ __skb_pull(skb, skb_transport_offset(skb));
+ ops = rcu_dereference(inet6_offloads[proto]);
+ if (likely(ops && ops->callbacks.gso_segment))
+ segs = ops->callbacks.gso_segment(skb, features);
+
+ return segs;
+}
+
static struct sk_buff *xfrm6_outer_mode_gso_segment(struct xfrm_state *x,
struct sk_buff *skb,
netdev_features_t features)
@@ -168,6 +202,8 @@ static struct sk_buff *xfrm6_outer_mode_gso_segment(struct xfrm_state *x,
return xfrm6_tunnel_gso_segment(x, skb, features);
case XFRM_MODE_TRANSPORT:
return xfrm6_transport_gso_segment(x, skb, features);
+ case XFRM_MODE_BEET:
+ return xfrm6_beet_gso_segment(x, skb, features);
}
return ERR_PTR(-EOPNOTSUPP);
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index ab5add0fe6b4..5a8bbcdcaf2b 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -48,6 +48,7 @@
#ifdef CONFIG_IPV6_SEG6_HMAC
#include <net/seg6_hmac.h>
#endif
+#include <net/rpl.h>
#include <linux/uaccess.h>
@@ -97,7 +98,7 @@ static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff,
*/
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
break;
- /* fall through */
+ fallthrough;
case 2: /* send ICMP PARM PROB regardless and drop packet */
icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
return false;
@@ -468,6 +469,195 @@ looped_back:
return -1;
}
+static int ipv6_rpl_srh_rcv(struct sk_buff *skb)
+{
+ struct ipv6_rpl_sr_hdr *hdr, *ohdr, *chdr;
+ struct inet6_skb_parm *opt = IP6CB(skb);
+ struct net *net = dev_net(skb->dev);
+ struct inet6_dev *idev;
+ struct ipv6hdr *oldhdr;
+ struct in6_addr addr;
+ unsigned char *buf;
+ int accept_rpl_seg;
+ int i, err;
+ u64 n = 0;
+ u32 r;
+
+ idev = __in6_dev_get(skb->dev);
+
+ accept_rpl_seg = net->ipv6.devconf_all->rpl_seg_enabled;
+ if (accept_rpl_seg > idev->cnf.rpl_seg_enabled)
+ accept_rpl_seg = idev->cnf.rpl_seg_enabled;
+
+ if (!accept_rpl_seg) {
+ kfree_skb(skb);
+ return -1;
+ }
+
+looped_back:
+ hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb);
+
+ if (hdr->segments_left == 0) {
+ if (hdr->nexthdr == NEXTHDR_IPV6) {
+ int offset = (hdr->hdrlen + 1) << 3;
+
+ skb_postpull_rcsum(skb, skb_network_header(skb),
+ skb_network_header_len(skb));
+
+ if (!pskb_pull(skb, offset)) {
+ kfree_skb(skb);
+ return -1;
+ }
+ skb_postpull_rcsum(skb, skb_transport_header(skb),
+ offset);
+
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ skb->encapsulation = 0;
+
+ __skb_tunnel_rx(skb, skb->dev, net);
+
+ netif_rx(skb);
+ return -1;
+ }
+
+ opt->srcrt = skb_network_header_len(skb);
+ opt->lastopt = opt->srcrt;
+ skb->transport_header += (hdr->hdrlen + 1) << 3;
+ opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
+
+ return 1;
+ }
+
+ if (!pskb_may_pull(skb, sizeof(*hdr))) {
+ kfree_skb(skb);
+ return -1;
+ }
+
+ n = (hdr->hdrlen << 3) - hdr->pad - (16 - hdr->cmpre);
+ r = do_div(n, (16 - hdr->cmpri));
+ /* checks if calculation was without remainder and n fits into
+ * unsigned char which is segments_left field. Should not be
+ * higher than that.
+ */
+ if (r || (n + 1) > 255) {
+ kfree_skb(skb);
+ return -1;
+ }
+
+ if (hdr->segments_left > n + 1) {
+ __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
+ icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
+ ((&hdr->segments_left) -
+ skb_network_header(skb)));
+ return -1;
+ }
+
+ if (skb_cloned(skb)) {
+ if (pskb_expand_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE, 0,
+ GFP_ATOMIC)) {
+ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_OUTDISCARDS);
+ kfree_skb(skb);
+ return -1;
+ }
+ } else {
+ err = skb_cow_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE);
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ return -1;
+ }
+ }
+
+ hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb);
+
+ if (!pskb_may_pull(skb, ipv6_rpl_srh_size(n, hdr->cmpri,
+ hdr->cmpre))) {
+ kfree_skb(skb);
+ return -1;
+ }
+
+ hdr->segments_left--;
+ i = n - hdr->segments_left;
+
+ buf = kzalloc(ipv6_rpl_srh_alloc_size(n + 1) * 2, GFP_ATOMIC);
+ if (unlikely(!buf)) {
+ kfree_skb(skb);
+ return -1;
+ }
+
+ ohdr = (struct ipv6_rpl_sr_hdr *)buf;
+ ipv6_rpl_srh_decompress(ohdr, hdr, &ipv6_hdr(skb)->daddr, n);
+ chdr = (struct ipv6_rpl_sr_hdr *)(buf + ((ohdr->hdrlen + 1) << 3));
+
+ if ((ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST) ||
+ (ipv6_addr_type(&ohdr->rpl_segaddr[i]) & IPV6_ADDR_MULTICAST)) {
+ kfree_skb(skb);
+ kfree(buf);
+ return -1;
+ }
+
+ err = ipv6_chk_rpl_srh_loop(net, ohdr->rpl_segaddr, n + 1);
+ if (err) {
+ icmpv6_send(skb, ICMPV6_PARAMPROB, 0, 0);
+ kfree_skb(skb);
+ kfree(buf);
+ return -1;
+ }
+
+ addr = ipv6_hdr(skb)->daddr;
+ ipv6_hdr(skb)->daddr = ohdr->rpl_segaddr[i];
+ ohdr->rpl_segaddr[i] = addr;
+
+ ipv6_rpl_srh_compress(chdr, ohdr, &ipv6_hdr(skb)->daddr, n);
+
+ oldhdr = ipv6_hdr(skb);
+
+ skb_pull(skb, ((hdr->hdrlen + 1) << 3));
+ skb_postpull_rcsum(skb, oldhdr,
+ sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3));
+ skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr));
+ skb_reset_network_header(skb);
+ skb_mac_header_rebuild(skb);
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+
+ memmove(ipv6_hdr(skb), oldhdr, sizeof(struct ipv6hdr));
+ memcpy(skb_transport_header(skb), chdr, (chdr->hdrlen + 1) << 3);
+
+ ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
+ skb_postpush_rcsum(skb, ipv6_hdr(skb),
+ sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3));
+
+ kfree(buf);
+
+ skb_dst_drop(skb);
+
+ ip6_route_input(skb);
+
+ if (skb_dst(skb)->error) {
+ dst_input(skb);
+ return -1;
+ }
+
+ if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
+ if (ipv6_hdr(skb)->hop_limit <= 1) {
+ __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
+ icmpv6_send(skb, ICMPV6_TIME_EXCEED,
+ ICMPV6_EXC_HOPLIMIT, 0);
+ kfree_skb(skb);
+ return -1;
+ }
+ ipv6_hdr(skb)->hop_limit--;
+
+ skb_pull(skb, sizeof(struct ipv6hdr));
+ goto looped_back;
+ }
+
+ dst_input(skb);
+
+ return -1;
+}
+
/********************************
Routing header.
********************************/
@@ -506,9 +696,16 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
return -1;
}
- /* segment routing */
- if (hdr->type == IPV6_SRCRT_TYPE_4)
+ switch (hdr->type) {
+ case IPV6_SRCRT_TYPE_4:
+ /* segment routing */
return ipv6_srh_rcv(skb);
+ case IPV6_SRCRT_TYPE_3:
+ /* rpl segment routing */
+ return ipv6_rpl_srh_rcv(skb);
+ default:
+ break;
+ }
looped_back:
if (hdr->segments_left == 0) {
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index ef408a5090a2..2688f3e82165 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -898,7 +898,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
hdr = icmp6_hdr(skb);
/* to notify */
- /* fall through */
+ fallthrough;
case ICMPV6_DEST_UNREACH:
case ICMPV6_TIME_EXCEED:
case ICMPV6_PARAMPROB:
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index 422dcc691f71..8c1ce78956ba 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -125,7 +125,7 @@ static const struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
[ILA_ATTR_HOOK_TYPE] = { .type = NLA_U8, },
};
-static int ila_build_state(struct nlattr *nla,
+static int ila_build_state(struct net *net, struct nlattr *nla,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts,
struct netlink_ext_ack *extack)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 72abf892302f..46ed56719476 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -2068,8 +2068,8 @@ static int fib6_walk_continue(struct fib6_walker *w)
continue;
}
w->state = FWS_L;
+ fallthrough;
#endif
- /* fall through */
case FWS_L:
left = rcu_dereference_protected(fn->left, 1);
if (left) {
@@ -2078,7 +2078,7 @@ static int fib6_walk_continue(struct fib6_walker *w)
continue;
}
w->state = FWS_R;
- /* fall through */
+ fallthrough;
case FWS_R:
right = rcu_dereference_protected(fn->right, 1);
if (right) {
@@ -2088,7 +2088,7 @@ static int fib6_walk_continue(struct fib6_walker *w)
}
w->state = FWS_C;
w->leaf = rcu_dereference_protected(fn->leaf, 1);
- /* fall through */
+ fallthrough;
case FWS_C:
if (w->leaf && fn->fn_flags & RTN_RTINFO) {
int err;
@@ -2107,7 +2107,7 @@ static int fib6_walk_continue(struct fib6_walker *w)
}
skip:
w->state = FWS_U;
- /* fall through */
+ fallthrough;
case FWS_U:
if (fn == w->root)
return 0;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 7b089d0ac8cd..e96304d8a4a7 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -285,7 +285,8 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
rcu_read_unlock();
/* Must drop socket now because of tproxy. */
- skb_orphan(skb);
+ if (!skb_sk_is_prefetched(skb))
+ skb_orphan(skb);
return skb;
err:
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 087304427bbb..8a8c2d0cfcc8 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -54,6 +54,7 @@
#include <linux/mroute6.h>
#include <net/l3mdev.h>
#include <net/lwtunnel.h>
+#include <net/ip_tunnels.h>
static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
{
@@ -1196,6 +1197,75 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
}
EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
+/**
+ * ip6_dst_lookup_tunnel - perform route lookup on tunnel
+ * @skb: Packet for which lookup is done
+ * @dev: Tunnel device
+ * @net: Network namespace of tunnel device
+ * @sk: Socket which provides route info
+ * @saddr: Memory to store the src ip address
+ * @info: Tunnel information
+ * @protocol: IP protocol
+ * @use_cahce: Flag to enable cache usage
+ * This function performs a route lookup on a tunnel
+ *
+ * It returns a valid dst pointer and stores src address to be used in
+ * tunnel in param saddr on success, else a pointer encoded error code.
+ */
+
+struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb,
+ struct net_device *dev,
+ struct net *net,
+ struct socket *sock,
+ struct in6_addr *saddr,
+ const struct ip_tunnel_info *info,
+ u8 protocol,
+ bool use_cache)
+{
+ struct dst_entry *dst = NULL;
+#ifdef CONFIG_DST_CACHE
+ struct dst_cache *dst_cache;
+#endif
+ struct flowi6 fl6;
+ __u8 prio;
+
+#ifdef CONFIG_DST_CACHE
+ dst_cache = (struct dst_cache *)&info->dst_cache;
+ if (use_cache) {
+ dst = dst_cache_get_ip6(dst_cache, saddr);
+ if (dst)
+ return dst;
+ }
+#endif
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_mark = skb->mark;
+ fl6.flowi6_proto = protocol;
+ fl6.daddr = info->key.u.ipv6.dst;
+ fl6.saddr = info->key.u.ipv6.src;
+ prio = info->key.tos;
+ fl6.flowlabel = ip6_make_flowinfo(RT_TOS(prio),
+ info->key.label);
+
+ dst = ipv6_stub->ipv6_dst_lookup_flow(net, sock->sk, &fl6,
+ NULL);
+ if (IS_ERR(dst)) {
+ netdev_dbg(dev, "no route to %pI6\n", &fl6.daddr);
+ return ERR_PTR(-ENETUNREACH);
+ }
+ if (dst->dev == dev) { /* is this necessary? */
+ netdev_dbg(dev, "circular route to %pI6\n", &fl6.daddr);
+ dst_release(dst);
+ return ERR_PTR(-ELOOP);
+ }
+#ifdef CONFIG_DST_CACHE
+ if (use_cache)
+ dst_cache_set_ip6(dst_cache, dst, &fl6.saddr);
+#endif
+ *saddr = fl6.saddr;
+ return dst;
+}
+EXPORT_SYMBOL_GPL(ip6_dst_lookup_tunnel);
+
static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
gfp_t gfp)
{
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index bfa49ff70531..65a54d74acc1 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -97,7 +97,8 @@ static void ipmr_expire_process(struct timer_list *t);
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
#define ip6mr_for_each_table(mrt, net) \
- list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
+ list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list, \
+ lockdep_rtnl_is_held())
static struct mr_table *ip6mr_mr_table_iter(struct net *net,
struct mr_table *mrt)
@@ -1690,7 +1691,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
case MRT6_ADD_MFC:
case MRT6_DEL_MFC:
parent = -1;
- /* fall through */
+ fallthrough;
case MRT6_ADD_MFC_PROXY:
case MRT6_DEL_MFC_PROXY:
if (optlen < sizeof(mfc))
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 53caf59c591e..6ffa153e5166 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -197,6 +197,7 @@ static inline int ndisc_is_useropt(const struct net_device *dev,
return opt->nd_opt_type == ND_OPT_RDNSS ||
opt->nd_opt_type == ND_OPT_DNSSL ||
opt->nd_opt_type == ND_OPT_CAPTIVE_PORTAL ||
+ opt->nd_opt_type == ND_OPT_PREF64 ||
ndisc_ops_is_useropt(dev, opt->nd_opt_type);
}
@@ -1782,7 +1783,7 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
case NETDEV_CHANGEADDR:
neigh_changeaddr(&nd_tbl, dev);
fib6_run_gc(0, net, false);
- /* fallthrough */
+ fallthrough;
case NETDEV_UP:
idev = in6_dev_get(dev);
if (!idev)
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index c973ace208c5..e27393498ecb 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1227,7 +1227,7 @@ struct compat_ip6t_replace {
u32 underflow[NF_INET_NUMHOOKS];
u32 num_counters;
compat_uptr_t counters; /* struct xt_counters * */
- struct compat_ip6t_entry entries[0];
+ struct compat_ip6t_entry entries[];
};
static int
@@ -1571,7 +1571,7 @@ compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
struct compat_ip6t_get_entries {
char name[XT_TABLE_MAXNAMELEN];
compat_uint_t size;
- struct compat_ip6t_entry entrytable[0];
+ struct compat_ip6t_entry entrytable[];
};
static int
diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
index 22b80db6d882..da64550a5707 100644
--- a/net/ipv6/netfilter/nf_log_ipv6.c
+++ b/net/ipv6/netfilter/nf_log_ipv6.c
@@ -248,7 +248,7 @@ static void dump_ipv6_packet(struct net *net, struct nf_log_buf *m,
/* Max length: 17 "POINTER=ffffffff " */
nf_log_buf_add(m, "POINTER=%08x ",
ntohl(ic->icmp6_pointer));
- /* Fall through */
+ fallthrough;
case ICMPV6_DEST_UNREACH:
case ICMPV6_PKT_TOOBIG:
case ICMPV6_TIME_EXCEED:
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index dfe5e603ffe1..0028aa1d7869 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1076,7 +1076,7 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname,
if (optname == IPV6_CHECKSUM ||
optname == IPV6_HDRINCL)
break;
- /* fall through */
+ fallthrough;
default:
return ipv6_setsockopt(sk, level, optname, optval, optlen);
}
@@ -1099,7 +1099,7 @@ static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname,
if (optname == IPV6_CHECKSUM ||
optname == IPV6_HDRINCL)
break;
- /* fall through */
+ fallthrough;
default:
return compat_ipv6_setsockopt(sk, level, optname,
optval, optlen);
@@ -1161,7 +1161,7 @@ static int rawv6_getsockopt(struct sock *sk, int level, int optname,
if (optname == IPV6_CHECKSUM ||
optname == IPV6_HDRINCL)
break;
- /* fall through */
+ fallthrough;
default:
return ipv6_getsockopt(sk, level, optname, optval, optlen);
}
@@ -1184,7 +1184,7 @@ static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname,
if (optname == IPV6_CHECKSUM ||
optname == IPV6_HDRINCL)
break;
- /* fall through */
+ fallthrough;
default:
return compat_ipv6_getsockopt(sk, level, optname,
optval, optlen);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 2931224b674e..310cbddaa533 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1062,8 +1062,6 @@ static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
flags |= DST_NOCOUNT;
if (rt->dst_nopolicy)
flags |= DST_NOPOLICY;
- if (rt->dst_host)
- flags |= DST_HOST;
return flags;
}
@@ -1349,7 +1347,6 @@ static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
ip6_rt_copy_init(rt, res);
rt->rt6i_flags |= RTF_CACHE;
- rt->dst.flags |= DST_HOST;
rt->rt6i_dst.addr = *daddr;
rt->rt6i_dst.plen = 128;
@@ -3142,7 +3139,6 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
goto out;
}
- rt->dst.flags |= DST_HOST;
rt->dst.input = ip6_input;
rt->dst.output = ip6_output;
rt->rt6i_gateway = fl6->daddr;
@@ -3475,7 +3471,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
!netif_carrier_ok(dev))
fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
- err = fib_nh_common_init(&fib6_nh->nh_common, cfg->fc_encap,
+ err = fib_nh_common_init(net, &fib6_nh->nh_common, cfg->fc_encap,
cfg->fc_encap_type, cfg, gfp_flags, extack);
if (err)
goto out;
@@ -3645,8 +3641,6 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
rt->fib6_dst.plen = cfg->fc_dst_len;
- if (rt->fib6_dst.plen == 128)
- rt->dst_host = true;
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
@@ -4370,7 +4364,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case IPSTATS_MIB_OUTNOROUTES:
IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
break;
diff --git a/net/ipv6/rpl.c b/net/ipv6/rpl.c
new file mode 100644
index 000000000000..dc4f20e23bf7
--- /dev/null
+++ b/net/ipv6/rpl.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**
+ * Authors:
+ * (C) 2020 Alexander Aring <alex.aring@gmail.com>
+ */
+
+#include <net/ipv6.h>
+#include <net/rpl.h>
+
+#define IPV6_PFXTAIL_LEN(x) (sizeof(struct in6_addr) - (x))
+
+static void ipv6_rpl_addr_decompress(struct in6_addr *dst,
+ const struct in6_addr *daddr,
+ const void *post, unsigned char pfx)
+{
+ memcpy(dst, daddr, pfx);
+ memcpy(&dst->s6_addr[pfx], post, IPV6_PFXTAIL_LEN(pfx));
+}
+
+static void ipv6_rpl_addr_compress(void *dst, const struct in6_addr *addr,
+ unsigned char pfx)
+{
+ memcpy(dst, &addr->s6_addr[pfx], IPV6_PFXTAIL_LEN(pfx));
+}
+
+static void *ipv6_rpl_segdata_pos(const struct ipv6_rpl_sr_hdr *hdr, int i)
+{
+ return (void *)&hdr->rpl_segdata[i * IPV6_PFXTAIL_LEN(hdr->cmpri)];
+}
+
+size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
+ unsigned char cmpre)
+{
+ return (n * IPV6_PFXTAIL_LEN(cmpri)) + IPV6_PFXTAIL_LEN(cmpre);
+}
+
+void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr,
+ const struct ipv6_rpl_sr_hdr *inhdr,
+ const struct in6_addr *daddr, unsigned char n)
+{
+ int i;
+
+ outhdr->nexthdr = inhdr->nexthdr;
+ outhdr->hdrlen = (((n + 1) * sizeof(struct in6_addr)) >> 3);
+ outhdr->pad = 0;
+ outhdr->type = inhdr->type;
+ outhdr->segments_left = inhdr->segments_left;
+ outhdr->cmpri = 0;
+ outhdr->cmpre = 0;
+
+ for (i = 0; i <= n; i++)
+ ipv6_rpl_addr_decompress(&outhdr->rpl_segaddr[i], daddr,
+ ipv6_rpl_segdata_pos(inhdr, i),
+ inhdr->cmpri);
+
+ ipv6_rpl_addr_decompress(&outhdr->rpl_segaddr[n], daddr,
+ ipv6_rpl_segdata_pos(inhdr, n),
+ inhdr->cmpre);
+}
+
+static unsigned char ipv6_rpl_srh_calc_cmpri(const struct ipv6_rpl_sr_hdr *inhdr,
+ const struct in6_addr *daddr,
+ unsigned char n)
+{
+ unsigned char plen;
+ int i;
+
+ for (plen = 0; plen < sizeof(*daddr); plen++) {
+ for (i = 0; i <= n; i++) {
+ if (daddr->s6_addr[plen] !=
+ inhdr->rpl_segaddr[i].s6_addr[plen])
+ return plen;
+ }
+ }
+
+ return plen;
+}
+
+static unsigned char ipv6_rpl_srh_calc_cmpre(const struct in6_addr *daddr,
+ const struct in6_addr *last_segment)
+{
+ unsigned int plen;
+
+ for (plen = 0; plen < sizeof(*daddr); plen++) {
+ if (daddr->s6_addr[plen] != last_segment->s6_addr[plen])
+ break;
+ }
+
+ return plen;
+}
+
+void ipv6_rpl_srh_compress(struct ipv6_rpl_sr_hdr *outhdr,
+ const struct ipv6_rpl_sr_hdr *inhdr,
+ const struct in6_addr *daddr, unsigned char n)
+{
+ unsigned char cmpri, cmpre;
+ size_t seglen;
+ int i;
+
+ cmpri = ipv6_rpl_srh_calc_cmpri(inhdr, daddr, n);
+ cmpre = ipv6_rpl_srh_calc_cmpre(daddr, &inhdr->rpl_segaddr[n]);
+
+ outhdr->nexthdr = inhdr->nexthdr;
+ seglen = (n * IPV6_PFXTAIL_LEN(cmpri)) + IPV6_PFXTAIL_LEN(cmpre);
+ outhdr->hdrlen = seglen >> 3;
+ if (seglen & 0x7) {
+ outhdr->hdrlen++;
+ outhdr->pad = 8 - (seglen & 0x7);
+ } else {
+ outhdr->pad = 0;
+ }
+ outhdr->type = inhdr->type;
+ outhdr->segments_left = inhdr->segments_left;
+ outhdr->cmpri = cmpri;
+ outhdr->cmpre = cmpre;
+
+ for (i = 0; i <= n; i++)
+ ipv6_rpl_addr_compress(ipv6_rpl_segdata_pos(outhdr, i),
+ &inhdr->rpl_segaddr[i], cmpri);
+
+ ipv6_rpl_addr_compress(ipv6_rpl_segdata_pos(outhdr, n),
+ &inhdr->rpl_segaddr[n], cmpre);
+}
diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
new file mode 100644
index 000000000000..a49ddc6cd020
--- /dev/null
+++ b/net/ipv6/rpl_iptunnel.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**
+ * Authors:
+ * (C) 2020 Alexander Aring <alex.aring@gmail.com>
+ */
+
+#include <linux/rpl_iptunnel.h>
+
+#include <net/dst_cache.h>
+#include <net/ip6_route.h>
+#include <net/lwtunnel.h>
+#include <net/ipv6.h>
+#include <net/rpl.h>
+
+struct rpl_iptunnel_encap {
+ struct ipv6_rpl_sr_hdr srh[0];
+};
+
+struct rpl_lwt {
+ struct dst_cache cache;
+ struct rpl_iptunnel_encap tuninfo;
+};
+
+static inline struct rpl_lwt *rpl_lwt_lwtunnel(struct lwtunnel_state *lwt)
+{
+ return (struct rpl_lwt *)lwt->data;
+}
+
+static inline struct rpl_iptunnel_encap *
+rpl_encap_lwtunnel(struct lwtunnel_state *lwt)
+{
+ return &rpl_lwt_lwtunnel(lwt)->tuninfo;
+}
+
+static const struct nla_policy rpl_iptunnel_policy[RPL_IPTUNNEL_MAX + 1] = {
+ [RPL_IPTUNNEL_SRH] = { .type = NLA_BINARY },
+};
+
+static bool rpl_validate_srh(struct net *net, struct ipv6_rpl_sr_hdr *srh,
+ size_t seglen)
+{
+ int err;
+
+ if ((srh->hdrlen << 3) != seglen)
+ return false;
+
+ /* check at least one segment and seglen fit with segments_left */
+ if (!srh->segments_left ||
+ (srh->segments_left * sizeof(struct in6_addr)) != seglen)
+ return false;
+
+ if (srh->cmpri || srh->cmpre)
+ return false;
+
+ err = ipv6_chk_rpl_srh_loop(net, srh->rpl_segaddr,
+ srh->segments_left);
+ if (err)
+ return false;
+
+ if (ipv6_addr_type(&srh->rpl_segaddr[srh->segments_left - 1]) &
+ IPV6_ADDR_MULTICAST)
+ return false;
+
+ return true;
+}
+
+static int rpl_build_state(struct net *net, struct nlattr *nla,
+ unsigned int family, const void *cfg,
+ struct lwtunnel_state **ts,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RPL_IPTUNNEL_MAX + 1];
+ struct lwtunnel_state *newts;
+ struct ipv6_rpl_sr_hdr *srh;
+ struct rpl_lwt *rlwt;
+ int err, srh_len;
+
+ if (family != AF_INET6)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, RPL_IPTUNNEL_MAX, nla,
+ rpl_iptunnel_policy, extack);
+ if (err < 0)
+ return err;
+
+ if (!tb[RPL_IPTUNNEL_SRH])
+ return -EINVAL;
+
+ srh = nla_data(tb[RPL_IPTUNNEL_SRH]);
+ srh_len = nla_len(tb[RPL_IPTUNNEL_SRH]);
+
+ if (srh_len < sizeof(*srh))
+ return -EINVAL;
+
+ /* verify that SRH is consistent */
+ if (!rpl_validate_srh(net, srh, srh_len - sizeof(*srh)))
+ return -EINVAL;
+
+ newts = lwtunnel_state_alloc(srh_len + sizeof(*rlwt));
+ if (!newts)
+ return -ENOMEM;
+
+ rlwt = rpl_lwt_lwtunnel(newts);
+
+ err = dst_cache_init(&rlwt->cache, GFP_ATOMIC);
+ if (err) {
+ kfree(newts);
+ return err;
+ }
+
+ memcpy(&rlwt->tuninfo.srh, srh, srh_len);
+
+ newts->type = LWTUNNEL_ENCAP_RPL;
+ newts->flags |= LWTUNNEL_STATE_INPUT_REDIRECT;
+ newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
+
+ *ts = newts;
+
+ return 0;
+}
+
+static void rpl_destroy_state(struct lwtunnel_state *lwt)
+{
+ dst_cache_destroy(&rpl_lwt_lwtunnel(lwt)->cache);
+}
+
+static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
+ const struct ipv6_rpl_sr_hdr *srh)
+{
+ struct ipv6_rpl_sr_hdr *isrh, *csrh;
+ const struct ipv6hdr *oldhdr;
+ struct ipv6hdr *hdr;
+ unsigned char *buf;
+ size_t hdrlen;
+ int err;
+
+ oldhdr = ipv6_hdr(skb);
+
+ buf = kzalloc(ipv6_rpl_srh_alloc_size(srh->segments_left - 1) * 2,
+ GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ isrh = (struct ipv6_rpl_sr_hdr *)buf;
+ csrh = (struct ipv6_rpl_sr_hdr *)(buf + ((srh->hdrlen + 1) << 3));
+
+ memcpy(isrh, srh, sizeof(*isrh));
+ memcpy(isrh->rpl_segaddr, &srh->rpl_segaddr[1],
+ (srh->segments_left - 1) * 16);
+ isrh->rpl_segaddr[srh->segments_left - 1] = oldhdr->daddr;
+
+ ipv6_rpl_srh_compress(csrh, isrh, &srh->rpl_segaddr[0],
+ isrh->segments_left - 1);
+
+ hdrlen = ((csrh->hdrlen + 1) << 3);
+
+ err = skb_cow_head(skb, hdrlen + skb->mac_len);
+ if (unlikely(err)) {
+ kfree(buf);
+ return err;
+ }
+
+ skb_pull(skb, sizeof(struct ipv6hdr));
+ skb_postpull_rcsum(skb, skb_network_header(skb),
+ sizeof(struct ipv6hdr));
+
+ skb_push(skb, sizeof(struct ipv6hdr) + hdrlen);
+ skb_reset_network_header(skb);
+ skb_mac_header_rebuild(skb);
+
+ hdr = ipv6_hdr(skb);
+ memmove(hdr, oldhdr, sizeof(*hdr));
+ isrh = (void *)hdr + sizeof(*hdr);
+ memcpy(isrh, csrh, hdrlen);
+
+ isrh->nexthdr = hdr->nexthdr;
+ hdr->nexthdr = NEXTHDR_ROUTING;
+ hdr->daddr = srh->rpl_segaddr[0];
+
+ ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+
+ skb_postpush_rcsum(skb, hdr, sizeof(struct ipv6hdr) + hdrlen);
+
+ kfree(buf);
+
+ return 0;
+}
+
+static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct rpl_iptunnel_encap *tinfo;
+ int err = 0;
+
+ if (skb->protocol != htons(ETH_P_IPV6))
+ return -EINVAL;
+
+ tinfo = rpl_encap_lwtunnel(dst->lwtstate);
+
+ err = rpl_do_srh_inline(skb, rlwt, tinfo->srh);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ struct dst_entry *orig_dst = skb_dst(skb);
+ struct dst_entry *dst = NULL;
+ struct rpl_lwt *rlwt;
+ int err = -EINVAL;
+
+ rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
+
+ err = rpl_do_srh(skb, rlwt);
+ if (unlikely(err))
+ goto drop;
+
+ preempt_disable();
+ dst = dst_cache_get(&rlwt->cache);
+ preempt_enable();
+
+ if (unlikely(!dst)) {
+ struct ipv6hdr *hdr = ipv6_hdr(skb);
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.daddr = hdr->daddr;
+ fl6.saddr = hdr->saddr;
+ fl6.flowlabel = ip6_flowinfo(hdr);
+ fl6.flowi6_mark = skb->mark;
+ fl6.flowi6_proto = hdr->nexthdr;
+
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (dst->error) {
+ err = dst->error;
+ dst_release(dst);
+ goto drop;
+ }
+
+ preempt_disable();
+ dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
+ preempt_enable();
+ }
+
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst);
+
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ if (unlikely(err))
+ goto drop;
+
+ return dst_output(net, sk, skb);
+
+drop:
+ kfree_skb(skb);
+ return err;
+}
+
+static int rpl_input(struct sk_buff *skb)
+{
+ struct dst_entry *orig_dst = skb_dst(skb);
+ struct dst_entry *dst = NULL;
+ struct rpl_lwt *rlwt;
+ int err;
+
+ rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
+
+ err = rpl_do_srh(skb, rlwt);
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ return err;
+ }
+
+ preempt_disable();
+ dst = dst_cache_get(&rlwt->cache);
+ preempt_enable();
+
+ skb_dst_drop(skb);
+
+ if (!dst) {
+ ip6_route_input(skb);
+ dst = skb_dst(skb);
+ if (!dst->error) {
+ preempt_disable();
+ dst_cache_set_ip6(&rlwt->cache, dst,
+ &ipv6_hdr(skb)->saddr);
+ preempt_enable();
+ }
+ } else {
+ skb_dst_set(skb, dst);
+ }
+
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+ if (unlikely(err))
+ return err;
+
+ return dst_input(skb);
+}
+
+static int nla_put_rpl_srh(struct sk_buff *skb, int attrtype,
+ struct rpl_iptunnel_encap *tuninfo)
+{
+ struct rpl_iptunnel_encap *data;
+ struct nlattr *nla;
+ int len;
+
+ len = RPL_IPTUNNEL_SRH_SIZE(tuninfo->srh);
+
+ nla = nla_reserve(skb, attrtype, len);
+ if (!nla)
+ return -EMSGSIZE;
+
+ data = nla_data(nla);
+ memcpy(data, tuninfo->srh, len);
+
+ return 0;
+}
+
+static int rpl_fill_encap_info(struct sk_buff *skb,
+ struct lwtunnel_state *lwtstate)
+{
+ struct rpl_iptunnel_encap *tuninfo = rpl_encap_lwtunnel(lwtstate);
+
+ if (nla_put_rpl_srh(skb, RPL_IPTUNNEL_SRH, tuninfo))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int rpl_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+ struct rpl_iptunnel_encap *tuninfo = rpl_encap_lwtunnel(lwtstate);
+
+ return nla_total_size(RPL_IPTUNNEL_SRH_SIZE(tuninfo->srh));
+}
+
+static int rpl_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+ struct rpl_iptunnel_encap *a_hdr = rpl_encap_lwtunnel(a);
+ struct rpl_iptunnel_encap *b_hdr = rpl_encap_lwtunnel(b);
+ int len = RPL_IPTUNNEL_SRH_SIZE(a_hdr->srh);
+
+ if (len != RPL_IPTUNNEL_SRH_SIZE(b_hdr->srh))
+ return 1;
+
+ return memcmp(a_hdr, b_hdr, len);
+}
+
+static const struct lwtunnel_encap_ops rpl_ops = {
+ .build_state = rpl_build_state,
+ .destroy_state = rpl_destroy_state,
+ .output = rpl_output,
+ .input = rpl_input,
+ .fill_encap = rpl_fill_encap_info,
+ .get_encap_size = rpl_encap_nlsize,
+ .cmp_encap = rpl_encap_cmp,
+ .owner = THIS_MODULE,
+};
+
+int __init rpl_init(void)
+{
+ int err;
+
+ err = lwtunnel_encap_add_ops(&rpl_ops, LWTUNNEL_ENCAP_RPL);
+ if (err)
+ goto out;
+
+ pr_info("RPL Segment Routing with IPv6\n");
+
+ return 0;
+
+out:
+ return err;
+}
+
+void rpl_exit(void)
+{
+ lwtunnel_encap_del_ops(&rpl_ops, LWTUNNEL_ENCAP_RPL);
+}
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 8c52efe299cc..c7cbfeae94f5 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -29,7 +29,7 @@
struct seg6_lwt {
struct dst_cache cache;
- struct seg6_iptunnel_encap tuninfo[0];
+ struct seg6_iptunnel_encap tuninfo[];
};
static inline struct seg6_lwt *seg6_lwt_lwtunnel(struct lwtunnel_state *lwt)
@@ -376,7 +376,7 @@ drop:
return err;
}
-static int seg6_build_state(struct nlattr *nla,
+static int seg6_build_state(struct net *net, struct nlattr *nla,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts,
struct netlink_ext_ack *extack)
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 8165802d8e05..52493423f329 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -970,8 +970,9 @@ static int parse_nla_action(struct nlattr **attrs, struct seg6_local_lwt *slwt)
return 0;
}
-static int seg6_local_build_state(struct nlattr *nla, unsigned int family,
- const void *cfg, struct lwtunnel_state **ts,
+static int seg6_local_build_state(struct net *net, struct nlattr *nla,
+ unsigned int family, const void *cfg,
+ struct lwtunnel_state **ts,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[SEG6_LOCAL_MAX + 1];
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index ec8fcfc60a27..63b657aa8d29 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -203,29 +203,16 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
struct ctl_table *ipv6_table;
struct ctl_table *ipv6_route_table;
struct ctl_table *ipv6_icmp_table;
- int err;
+ int err, i;
err = -ENOMEM;
ipv6_table = kmemdup(ipv6_table_template, sizeof(ipv6_table_template),
GFP_KERNEL);
if (!ipv6_table)
goto out;
- ipv6_table[0].data = &net->ipv6.sysctl.bindv6only;
- ipv6_table[1].data = &net->ipv6.sysctl.anycast_src_echo_reply;
- ipv6_table[2].data = &net->ipv6.sysctl.flowlabel_consistency;
- ipv6_table[3].data = &net->ipv6.sysctl.auto_flowlabels;
- ipv6_table[4].data = &net->ipv6.sysctl.fwmark_reflect;
- ipv6_table[5].data = &net->ipv6.sysctl.idgen_retries;
- ipv6_table[6].data = &net->ipv6.sysctl.idgen_delay;
- ipv6_table[7].data = &net->ipv6.sysctl.flowlabel_state_ranges;
- ipv6_table[8].data = &net->ipv6.sysctl.ip_nonlocal_bind;
- ipv6_table[9].data = &net->ipv6.sysctl.flowlabel_reflect;
- ipv6_table[10].data = &net->ipv6.sysctl.max_dst_opts_cnt;
- ipv6_table[11].data = &net->ipv6.sysctl.max_hbh_opts_cnt;
- ipv6_table[12].data = &net->ipv6.sysctl.max_dst_opts_len;
- ipv6_table[13].data = &net->ipv6.sysctl.max_hbh_opts_len;
- ipv6_table[14].data = &net->ipv6.sysctl.multipath_hash_policy,
- ipv6_table[15].data = &net->ipv6.sysctl.seg6_flowlabel;
+ /* Update the variables to point into the current struct net */
+ for (i = 0; i < ARRAY_SIZE(ipv6_table_template) - 1; i++)
+ ipv6_table[i].data += (void *)net - (void *)&init_net;
ipv6_route_table = ipv6_route_sysctl_init(net);
if (!ipv6_route_table)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index eaf09e6b7844..413b3425ac66 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1742,7 +1742,7 @@ do_time_wait:
}
}
/* to ACK */
- /* fall through */
+ fallthrough;
case TCP_TW_ACK:
tcp_v6_timewait_ack(sk, skb);
break;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 5dc439a391fe..7d4151747340 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -843,6 +843,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
struct net *net = dev_net(skb->dev);
struct udphdr *uh;
struct sock *sk;
+ bool refcounted;
u32 ulen = 0;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
@@ -879,7 +880,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
goto csum_error;
/* Check if the socket is already available, e.g. due to early demux */
- sk = skb_steal_sock(skb);
+ sk = skb_steal_sock(skb, &refcounted);
if (sk) {
struct dst_entry *dst = skb_dst(skb);
int ret;
@@ -888,12 +889,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
udp6_sk_rx_dst_set(sk, dst);
if (!uh->check && !udp_sk(sk)->no_check6_rx) {
- sock_put(sk);
+ if (refcounted)
+ sock_put(sk);
goto report_csum_error;
}
ret = udp6_unicast_rcv_skb(sk, skb, uh);
- sock_put(sk);
+ if (refcounted)
+ sock_put(sk);
return ret;
}
diff --git a/net/kcm/kcmproc.c b/net/kcm/kcmproc.c
index 370da2f80e3c..25c1007f1098 100644
--- a/net/kcm/kcmproc.c
+++ b/net/kcm/kcmproc.c
@@ -261,7 +261,7 @@ static int kcm_stats_seq_show(struct seq_file *seq, void *v)
aggregate_strp_stats(&knet->aggregate_strp_stats,
&strp_stats);
- list_for_each_entry_rcu(mux, &knet->mux_list, kcm_mux_list) {
+ list_for_each_entry(mux, &knet->mux_list, kcm_mux_list) {
spin_lock_bh(&mux->lock);
aggregate_mux_stats(&mux->stats, &mux_stats);
aggregate_psock_stats(&mux->aggregate_psock_stats,
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index ea9e73428ed9..56fac24a627a 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -380,9 +380,7 @@ static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
struct bpf_prog *prog = psock->bpf_prog;
int res;
- preempt_disable();
- res = BPF_PROG_RUN(prog, skb);
- preempt_enable();
+ res = bpf_prog_run_pin_on_cpu(prog, skb);
return res;
}
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 2db3d50d10a4..10cf7c3dcbb3 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -116,7 +116,7 @@ struct l2tp_session {
void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len);
void (*session_close)(struct l2tp_session *session);
void (*show)(struct seq_file *m, void *priv);
- uint8_t priv[0]; /* private data */
+ u8 priv[]; /* private data */
};
/* Describes the tunnel. It contains info to track all the associated
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 2922d4150d88..54fb8d452a7b 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -47,7 +47,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout);
#if 0
#define dprintk(args...) printk(KERN_DEBUG args)
#else
-#define dprintk(args...)
+#define dprintk(args...) do {} while (0)
#endif
/* Maybe we'll add some more in the future. */
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index f3a36c16a5e7..a4eccb98220a 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -56,7 +56,7 @@ found:
return sk;
}
-static void *llc_seq_start(struct seq_file *seq, loff_t *pos)
+static void *llc_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU)
{
loff_t l = *pos;
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index 57748cab0e28..b31f1021ad9c 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -26,12 +26,20 @@ void ieee80211_aes_cmac(struct crypto_shash *tfm, const u8 *aad,
{
SHASH_DESC_ON_STACK(desc, tfm);
u8 out[AES_BLOCK_SIZE];
+ const __le16 *fc;
desc->tfm = tfm;
crypto_shash_init(desc);
crypto_shash_update(desc, aad, AAD_LEN);
- crypto_shash_update(desc, data, data_len - CMAC_TLEN);
+ fc = (const __le16 *)aad;
+ if (ieee80211_is_beacon(*fc)) {
+ /* mask Timestamp field to zero */
+ crypto_shash_update(desc, zero, 8);
+ crypto_shash_update(desc, data + 8, data_len - 8 - CMAC_TLEN);
+ } else {
+ crypto_shash_update(desc, data, data_len - CMAC_TLEN);
+ }
crypto_shash_finup(desc, zero, CMAC_TLEN, out);
memcpy(mic, out, CMAC_TLEN);
@@ -41,12 +49,21 @@ void ieee80211_aes_cmac_256(struct crypto_shash *tfm, const u8 *aad,
const u8 *data, size_t data_len, u8 *mic)
{
SHASH_DESC_ON_STACK(desc, tfm);
+ const __le16 *fc;
desc->tfm = tfm;
crypto_shash_init(desc);
crypto_shash_update(desc, aad, AAD_LEN);
- crypto_shash_update(desc, data, data_len - CMAC_TLEN_256);
+ fc = (const __le16 *)aad;
+ if (ieee80211_is_beacon(*fc)) {
+ /* mask Timestamp field to zero */
+ crypto_shash_update(desc, zero, 8);
+ crypto_shash_update(desc, data + 8,
+ data_len - 8 - CMAC_TLEN_256);
+ } else {
+ crypto_shash_update(desc, data, data_len - CMAC_TLEN_256);
+ }
crypto_shash_finup(desc, zero, CMAC_TLEN_256, mic);
}
diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c
index 363ad1c1dc0c..16ba09cb5def 100644
--- a/net/mac80211/aes_gmac.c
+++ b/net/mac80211/aes_gmac.c
@@ -17,10 +17,11 @@
int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
const u8 *data, size_t data_len, u8 *mic)
{
- struct scatterlist sg[4];
+ struct scatterlist sg[5];
u8 *zero, *__aad, iv[AES_BLOCK_SIZE];
struct aead_request *aead_req;
int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
+ const __le16 *fc;
if (data_len < GMAC_MIC_LEN)
return -EINVAL;
@@ -33,11 +34,22 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
__aad = zero + GMAC_MIC_LEN;
memcpy(__aad, aad, GMAC_AAD_LEN);
- sg_init_table(sg, 4);
- sg_set_buf(&sg[0], __aad, GMAC_AAD_LEN);
- sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN);
- sg_set_buf(&sg[2], zero, GMAC_MIC_LEN);
- sg_set_buf(&sg[3], mic, GMAC_MIC_LEN);
+ fc = (const __le16 *)aad;
+ if (ieee80211_is_beacon(*fc)) {
+ /* mask Timestamp field to zero */
+ sg_init_table(sg, 5);
+ sg_set_buf(&sg[0], __aad, GMAC_AAD_LEN);
+ sg_set_buf(&sg[1], zero, 8);
+ sg_set_buf(&sg[2], data + 8, data_len - 8 - GMAC_MIC_LEN);
+ sg_set_buf(&sg[3], zero, GMAC_MIC_LEN);
+ sg_set_buf(&sg[4], mic, GMAC_MIC_LEN);
+ } else {
+ sg_init_table(sg, 4);
+ sg_set_buf(&sg[0], __aad, GMAC_AAD_LEN);
+ sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN);
+ sg_set_buf(&sg[2], zero, GMAC_MIC_LEN);
+ sg_set_buf(&sg[3], mic, GMAC_MIC_LEN);
+ }
memcpy(iv, nonce, GMAC_NONCE_LEN);
memset(iv + GMAC_NONCE_LEN, 0, sizeof(iv) - GMAC_NONCE_LEN);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 6aee699deb28..0f72813fed53 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -5,8 +5,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2019 Intel Corporation
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -568,7 +567,8 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
if (pairwise && key_idx < NUM_DEFAULT_KEYS)
key = rcu_dereference(sta->ptk[key_idx]);
else if (!pairwise &&
- key_idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
+ key_idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
+ NUM_DEFAULT_BEACON_KEYS)
key = rcu_dereference(sta->gtk[key_idx]);
} else
key = rcu_dereference(sdata->keys[key_idx]);
@@ -680,6 +680,17 @@ static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy,
return 0;
}
+static int ieee80211_config_default_beacon_key(struct wiphy *wiphy,
+ struct net_device *dev,
+ u8 key_idx)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ ieee80211_set_default_beacon_key(sdata, key_idx);
+
+ return 0;
+}
+
void sta_set_rate_info_tx(struct sta_info *sta,
const struct ieee80211_tx_rate *rate,
struct rate_info *rinfo)
@@ -981,7 +992,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
BSS_CHANGED_P2P_PS |
BSS_CHANGED_TXPOWER |
BSS_CHANGED_TWT |
- BSS_CHANGED_HE_OBSS_PD;
+ BSS_CHANGED_HE_OBSS_PD |
+ BSS_CHANGED_HE_BSS_COLOR;
int err;
int prev_beacon_int;
@@ -989,28 +1001,25 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (old)
return -EALREADY;
- switch (params->smps_mode) {
- case NL80211_SMPS_OFF:
- sdata->smps_mode = IEEE80211_SMPS_OFF;
- break;
- case NL80211_SMPS_STATIC:
- sdata->smps_mode = IEEE80211_SMPS_STATIC;
- break;
- case NL80211_SMPS_DYNAMIC:
- sdata->smps_mode = IEEE80211_SMPS_DYNAMIC;
- break;
- default:
- return -EINVAL;
- }
- sdata->u.ap.req_smps = sdata->smps_mode;
+ if (params->smps_mode != NL80211_SMPS_OFF)
+ return -ENOTSUPP;
+
+ sdata->smps_mode = IEEE80211_SMPS_OFF;
sdata->needed_rx_chains = sdata->local->rx_chains;
prev_beacon_int = sdata->vif.bss_conf.beacon_int;
sdata->vif.bss_conf.beacon_int = params->beacon_interval;
- if (params->he_cap)
+ if (params->he_cap && params->he_oper) {
sdata->vif.bss_conf.he_support = true;
+ sdata->vif.bss_conf.htc_trig_based_pkt_ext =
+ le32_get_bits(params->he_oper->he_oper_params,
+ IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK);
+ sdata->vif.bss_conf.frame_time_rts_th =
+ le32_get_bits(params->he_oper->he_oper_params,
+ IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK);
+ }
mutex_lock(&local->mtx);
err = ieee80211_vif_use_channel(sdata, &params->chandef,
@@ -1031,6 +1040,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
sdata->control_port_no_encrypt = params->crypto.control_port_no_encrypt;
sdata->control_port_over_nl80211 =
params->crypto.control_port_over_nl80211;
+ sdata->control_port_no_preauth =
+ params->crypto.control_port_no_preauth;
sdata->encrypt_headroom = ieee80211_cs_headroom(sdata->local,
&params->crypto,
sdata->vif.type);
@@ -1042,6 +1053,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
params->crypto.control_port_no_encrypt;
vlan->control_port_over_nl80211 =
params->crypto.control_port_over_nl80211;
+ vlan->control_port_no_preauth =
+ params->crypto.control_port_no_preauth;
vlan->encrypt_headroom =
ieee80211_cs_headroom(sdata->local,
&params->crypto,
@@ -1054,6 +1067,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
sdata->vif.bss_conf.twt_responder = params->twt_responder;
memcpy(&sdata->vif.bss_conf.he_obss_pd, &params->he_obss_pd,
sizeof(struct ieee80211_he_obss_pd));
+ memcpy(&sdata->vif.bss_conf.he_bss_color, &params->he_bss_color,
+ sizeof(struct ieee80211_he_bss_color));
sdata->vif.bss_conf.ssid_len = params->ssid_len;
if (params->ssid_len)
@@ -1166,7 +1181,6 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
kfree_rcu(old_beacon, rcu_head);
if (old_probe_resp)
kfree_rcu(old_probe_resp, rcu_head);
- sdata->u.ap.driver_smps_mode = IEEE80211_SMPS_OFF;
kfree(sdata->vif.bss_conf.ftmr_params);
sdata->vif.bss_conf.ftmr_params = NULL;
@@ -1691,20 +1705,6 @@ static int ieee80211_change_station(struct wiphy *wiphy,
mutex_unlock(&local->sta_mtx);
- if ((sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
- sta->known_smps_mode != sta->sdata->bss->req_smps &&
- test_sta_flag(sta, WLAN_STA_AUTHORIZED) &&
- sta_info_tx_streams(sta) != 1) {
- ht_dbg(sta->sdata,
- "%pM just authorized and MIMO capable - update SMPS\n",
- sta->sta.addr);
- ieee80211_send_smps_action(sta->sdata,
- sta->sdata->bss->req_smps,
- sta->sta.addr,
- sta->sdata->vif.bss_conf.bssid);
- }
-
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
ieee80211_recalc_ps(local);
@@ -2636,74 +2636,6 @@ static int ieee80211_testmode_dump(struct wiphy *wiphy,
}
#endif
-int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata,
- enum ieee80211_smps_mode smps_mode)
-{
- struct sta_info *sta;
- enum ieee80211_smps_mode old_req;
-
- if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP))
- return -EINVAL;
-
- if (sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
- return 0;
-
- old_req = sdata->u.ap.req_smps;
- sdata->u.ap.req_smps = smps_mode;
-
- /* AUTOMATIC doesn't mean much for AP - don't allow it */
- if (old_req == smps_mode ||
- smps_mode == IEEE80211_SMPS_AUTOMATIC)
- return 0;
-
- ht_dbg(sdata,
- "SMPS %d requested in AP mode, sending Action frame to %d stations\n",
- smps_mode, atomic_read(&sdata->u.ap.num_mcast_sta));
-
- mutex_lock(&sdata->local->sta_mtx);
- list_for_each_entry(sta, &sdata->local->sta_list, list) {
- /*
- * Only stations associated to our AP and
- * associated VLANs
- */
- if (sta->sdata->bss != &sdata->u.ap)
- continue;
-
- /* This station doesn't support MIMO - skip it */
- if (sta_info_tx_streams(sta) == 1)
- continue;
-
- /*
- * Don't wake up a STA just to send the action frame
- * unless we are getting more restrictive.
- */
- if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
- !ieee80211_smps_is_restrictive(sta->known_smps_mode,
- smps_mode)) {
- ht_dbg(sdata, "Won't send SMPS to sleeping STA %pM\n",
- sta->sta.addr);
- continue;
- }
-
- /*
- * If the STA is not authorized, wait until it gets
- * authorized and the action frame will be sent then.
- */
- if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
- continue;
-
- ht_dbg(sdata, "Sending SMPS to %pM\n", sta->sta.addr);
- ieee80211_send_smps_action(sdata, smps_mode, sta->sta.addr,
- sdata->vif.bss_conf.bssid);
- }
- mutex_unlock(&sdata->local->sta_mtx);
-
- sdata->smps_mode = smps_mode;
- ieee80211_queue_work(&sdata->local->hw, &sdata->recalc_smps);
-
- return 0;
-}
-
int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode smps_mode)
{
@@ -3964,6 +3896,60 @@ ieee80211_abort_pmsr(struct wiphy *wiphy, struct wireless_dev *dev,
return drv_abort_pmsr(local, sdata, request);
}
+static int ieee80211_set_tid_config(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_tid_config *tid_conf)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct sta_info *sta;
+ int ret;
+
+ if (!sdata->local->ops->set_tid_config)
+ return -EOPNOTSUPP;
+
+ if (!tid_conf->peer)
+ return drv_set_tid_config(sdata->local, sdata, NULL, tid_conf);
+
+ mutex_lock(&sdata->local->sta_mtx);
+ sta = sta_info_get_bss(sdata, tid_conf->peer);
+ if (!sta) {
+ mutex_unlock(&sdata->local->sta_mtx);
+ return -ENOENT;
+ }
+
+ ret = drv_set_tid_config(sdata->local, sdata, &sta->sta, tid_conf);
+ mutex_unlock(&sdata->local->sta_mtx);
+
+ return ret;
+}
+
+static int ieee80211_reset_tid_config(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *peer, u8 tid)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct sta_info *sta;
+ int ret;
+
+ if (!sdata->local->ops->reset_tid_config)
+ return -EOPNOTSUPP;
+
+ if (!peer)
+ return drv_reset_tid_config(sdata->local, sdata, NULL, tid);
+
+ mutex_lock(&sdata->local->sta_mtx);
+ sta = sta_info_get_bss(sdata, peer);
+ if (!sta) {
+ mutex_unlock(&sdata->local->sta_mtx);
+ return -ENOENT;
+ }
+
+ ret = drv_reset_tid_config(sdata->local, sdata, &sta->sta, tid);
+ mutex_unlock(&sdata->local->sta_mtx);
+
+ return ret;
+}
+
const struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -3975,6 +3961,7 @@ const struct cfg80211_ops mac80211_config_ops = {
.get_key = ieee80211_get_key,
.set_default_key = ieee80211_config_default_key,
.set_default_mgmt_key = ieee80211_config_default_mgmt_key,
+ .set_default_beacon_key = ieee80211_config_default_beacon_key,
.start_ap = ieee80211_start_ap,
.change_beacon = ieee80211_change_beacon,
.stop_ap = ieee80211_stop_ap,
@@ -4063,4 +4050,6 @@ const struct cfg80211_ops mac80211_config_ops = {
.start_pmsr = ieee80211_start_pmsr,
.abort_pmsr = ieee80211_abort_pmsr,
.probe_mesh_link = ieee80211_probe_mesh_link,
+ .set_tid_config = ieee80211_set_tid_config,
+ .reset_tid_config = ieee80211_reset_tid_config,
};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index ad41d74530c6..54080290d6e2 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -150,6 +150,59 @@ static const struct file_operations aqm_ops = {
.llseek = default_llseek,
};
+static ssize_t airtime_flags_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ char buf[128] = {}, *pos, *end;
+
+ pos = buf;
+ end = pos + sizeof(buf) - 1;
+
+ if (local->airtime_flags & AIRTIME_USE_TX)
+ pos += scnprintf(pos, end - pos, "AIRTIME_TX\t(%lx)\n",
+ AIRTIME_USE_TX);
+ if (local->airtime_flags & AIRTIME_USE_RX)
+ pos += scnprintf(pos, end - pos, "AIRTIME_RX\t(%lx)\n",
+ AIRTIME_USE_RX);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf,
+ strlen(buf));
+}
+
+static ssize_t airtime_flags_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ char buf[16];
+ size_t len;
+
+ if (count > sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[sizeof(buf) - 1] = 0;
+ len = strlen(buf);
+ if (len > 0 && buf[len - 1] == '\n')
+ buf[len - 1] = 0;
+
+ if (kstrtou16(buf, 0, &local->airtime_flags))
+ return -EINVAL;
+
+ return count;
+}
+
+static const struct file_operations airtime_flags_ops = {
+ .write = airtime_flags_write,
+ .read = airtime_flags_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
static ssize_t aql_txq_limit_read(struct file *file,
char __user *user_buf,
size_t count,
@@ -522,8 +575,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
if (local->ops->wake_tx_queue)
DEBUGFS_ADD_MODE(aqm, 0600);
- debugfs_create_u16("airtime_flags", 0600,
- phyd, &local->airtime_flags);
+ DEBUGFS_ADD_MODE(airtime_flags, 0600);
DEBUGFS_ADD(aql_txq_limit);
debugfs_create_u32("aql_threshold", 0600,
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 7b8735ced2a1..98a713475e0f 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -433,6 +433,37 @@ void ieee80211_debugfs_key_remove_mgmt_default(struct ieee80211_sub_if_data *sda
sdata->debugfs.default_mgmt_key = NULL;
}
+void
+ieee80211_debugfs_key_add_beacon_default(struct ieee80211_sub_if_data *sdata)
+{
+ char buf[50];
+ struct ieee80211_key *key;
+
+ if (!sdata->vif.debugfs_dir)
+ return;
+
+ key = key_mtx_dereference(sdata->local,
+ sdata->default_beacon_key);
+ if (key) {
+ sprintf(buf, "../keys/%d", key->debugfs.cnt);
+ sdata->debugfs.default_beacon_key =
+ debugfs_create_symlink("default_beacon_key",
+ sdata->vif.debugfs_dir, buf);
+ } else {
+ ieee80211_debugfs_key_remove_beacon_default(sdata);
+ }
+}
+
+void
+ieee80211_debugfs_key_remove_beacon_default(struct ieee80211_sub_if_data *sdata)
+{
+ if (!sdata)
+ return;
+
+ debugfs_remove(sdata->debugfs.default_beacon_key);
+ sdata->debugfs.default_beacon_key = NULL;
+}
+
void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
struct sta_info *sta)
{
diff --git a/net/mac80211/debugfs_key.h b/net/mac80211/debugfs_key.h
index 1cd7b8bff56c..af7cf495f8d1 100644
--- a/net/mac80211/debugfs_key.h
+++ b/net/mac80211/debugfs_key.h
@@ -10,6 +10,10 @@ void ieee80211_debugfs_key_add_mgmt_default(
struct ieee80211_sub_if_data *sdata);
void ieee80211_debugfs_key_remove_mgmt_default(
struct ieee80211_sub_if_data *sdata);
+void ieee80211_debugfs_key_add_beacon_default(
+ struct ieee80211_sub_if_data *sdata);
+void ieee80211_debugfs_key_remove_beacon_default(
+ struct ieee80211_sub_if_data *sdata);
void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
struct sta_info *sta);
#else
@@ -26,6 +30,12 @@ static inline void ieee80211_debugfs_key_add_mgmt_default(
static inline void ieee80211_debugfs_key_remove_mgmt_default(
struct ieee80211_sub_if_data *sdata)
{}
+static inline void ieee80211_debugfs_key_add_beacon_default(
+ struct ieee80211_sub_if_data *sdata)
+{}
+static inline void ieee80211_debugfs_key_remove_beacon_default(
+ struct ieee80211_sub_if_data *sdata)
+{}
static inline void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
struct sta_info *sta)
{}
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 64b544ae9966..3dbe7c5cefd1 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (C) 2020 Intel Corporation
*/
#include <linux/kernel.h>
@@ -254,15 +255,11 @@ static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
smps_mode == IEEE80211_SMPS_AUTOMATIC))
return -EINVAL;
- if (sdata->vif.type != NL80211_IFTYPE_STATION &&
- sdata->vif.type != NL80211_IFTYPE_AP)
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
sdata_lock(sdata);
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
- err = __ieee80211_request_smps_mgd(sdata, smps_mode);
- else
- err = __ieee80211_request_smps_ap(sdata, smps_mode);
+ err = __ieee80211_request_smps_mgd(sdata, smps_mode);
sdata_unlock(sdata);
return err;
@@ -282,10 +279,6 @@ static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_sub_if_data *sdata,
return snprintf(buf, buflen, "request: %s\nused: %s\n",
smps_modes[sdata->u.mgd.req_smps],
smps_modes[sdata->smps_mode]);
- if (sdata->vif.type == NL80211_IFTYPE_AP)
- return snprintf(buf, buflen, "request: %s\nused: %s\n",
- smps_modes[sdata->u.ap.req_smps],
- smps_modes[sdata->smps_mode]);
return -EINVAL;
}
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 3419ed66c7b0..829dcad69c2c 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -1026,12 +1026,10 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments);
DEBUGFS_ADD_COUNTER(tx_filtered, status_stats.filtered);
- if (local->ops->wake_tx_queue)
+ if (local->ops->wake_tx_queue) {
DEBUGFS_ADD(aqm);
-
- if (wiphy_ext_feature_isset(local->hw.wiphy,
- NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
DEBUGFS_ADD(airtime);
+ }
if (wiphy_ext_feature_isset(local->hw.wiphy,
NL80211_EXT_FEATURE_AQL))
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 2c9b3eb8b652..3877710e3b48 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1358,4 +1358,31 @@ static inline void drv_del_nan_func(struct ieee80211_local *local,
trace_drv_return_void(local);
}
+static inline int drv_set_tid_config(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_config *tid_conf)
+{
+ int ret;
+
+ might_sleep();
+ ret = local->ops->set_tid_config(&local->hw, &sdata->vif, sta,
+ tid_conf);
+ trace_drv_return_int(local, ret);
+
+ return ret;
+}
+
+static inline int drv_reset_tid_config(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta, u8 tid)
+{
+ int ret;
+
+ might_sleep();
+ ret = local->ops->reset_tid_config(&local->hw, &sdata->vif, sta, tid);
+ trace_drv_return_int(local, ret);
+
+ return ret;
+}
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/he.c b/net/mac80211/he.c
index 736da0035135..1087f715338b 100644
--- a/net/mac80211/he.c
+++ b/net/mac80211/he.c
@@ -3,6 +3,7 @@
* HE handling
*
* Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 - 2020 Intel Corporation
*/
#include "ieee80211_i.h"
@@ -49,6 +50,9 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
he_ppe_size);
he_cap->has_he = true;
+
+ sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(sta);
+ sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta);
}
void
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index a2e4d6b8fd98..e32906202575 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -9,6 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
* Copyright 2017 Intel Deutschland GmbH
+ * Copyright(c) 2020 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -144,7 +145,6 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
int i, max_tx_streams;
bool changed;
enum ieee80211_sta_rx_bandwidth bw;
- enum ieee80211_smps_mode smps_mode;
memset(&ht_cap, 0, sizeof(ht_cap));
@@ -270,24 +270,30 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20;
- switch ((ht_cap.cap & IEEE80211_HT_CAP_SM_PS)
- >> IEEE80211_HT_CAP_SM_PS_SHIFT) {
- case WLAN_HT_CAP_SM_PS_INVALID:
- case WLAN_HT_CAP_SM_PS_STATIC:
- smps_mode = IEEE80211_SMPS_STATIC;
- break;
- case WLAN_HT_CAP_SM_PS_DYNAMIC:
- smps_mode = IEEE80211_SMPS_DYNAMIC;
- break;
- case WLAN_HT_CAP_SM_PS_DISABLED:
- smps_mode = IEEE80211_SMPS_OFF;
- break;
- }
-
- if (smps_mode != sta->sta.smps_mode)
- changed = true;
- sta->sta.smps_mode = smps_mode;
+ if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
+ sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+ enum ieee80211_smps_mode smps_mode;
+
+ switch ((ht_cap.cap & IEEE80211_HT_CAP_SM_PS)
+ >> IEEE80211_HT_CAP_SM_PS_SHIFT) {
+ case WLAN_HT_CAP_SM_PS_INVALID:
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ smps_mode = IEEE80211_SMPS_STATIC;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ smps_mode = IEEE80211_SMPS_OFF;
+ break;
+ }
+ if (smps_mode != sta->sta.smps_mode)
+ changed = true;
+ sta->sta.smps_mode = smps_mode;
+ } else {
+ sta->sta.smps_mode = IEEE80211_SMPS_OFF;
+ }
return changed;
}
@@ -544,19 +550,6 @@ void ieee80211_request_smps_mgd_work(struct work_struct *work)
sdata_unlock(sdata);
}
-void ieee80211_request_smps_ap_work(struct work_struct *work)
-{
- struct ieee80211_sub_if_data *sdata =
- container_of(work, struct ieee80211_sub_if_data,
- u.ap.request_smps_work);
-
- sdata_lock(sdata);
- if (sdata_dereference(sdata->u.ap.beacon, sdata))
- __ieee80211_request_smps_ap(sdata,
- sdata->u.ap.driver_smps_mode);
- sdata_unlock(sdata);
-}
-
void ieee80211_request_smps(struct ieee80211_vif *vif,
enum ieee80211_smps_mode smps_mode)
{
@@ -572,15 +565,6 @@ void ieee80211_request_smps(struct ieee80211_vif *vif,
sdata->u.mgd.driver_smps_mode = smps_mode;
ieee80211_queue_work(&sdata->local->hw,
&sdata->u.mgd.request_smps_work);
- } else {
- /* AUTOMATIC is meaningless in AP mode */
- if (WARN_ON_ONCE(smps_mode == IEEE80211_SMPS_AUTOMATIC))
- return;
- if (sdata->u.ap.driver_smps_mode == smps_mode)
- return;
- sdata->u.ap.driver_smps_mode = smps_mode;
- ieee80211_queue_work(&sdata->local->hw,
- &sdata->u.ap.request_smps_work);
}
}
/* this might change ... don't want non-open drivers using it */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index ad15b3be8bb3..f8ed4f621f7f 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*/
#ifndef IEEE80211_I_H
@@ -292,10 +292,7 @@ struct ieee80211_if_ap {
struct ps_data ps;
atomic_t num_mcast_sta; /* number of stations receiving multicast */
- enum ieee80211_smps_mode req_smps, /* requested smps mode */
- driver_smps_mode; /* smps mode request */
- struct work_struct request_smps_work;
bool multicast_to_unicast;
};
@@ -904,14 +901,18 @@ struct ieee80211_sub_if_data {
/* bit field of ACM bits (BIT(802.1D tag)) */
u8 wmm_acm;
- struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
+ struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS +
+ NUM_DEFAULT_MGMT_KEYS +
+ NUM_DEFAULT_BEACON_KEYS];
struct ieee80211_key __rcu *default_unicast_key;
struct ieee80211_key __rcu *default_multicast_key;
struct ieee80211_key __rcu *default_mgmt_key;
+ struct ieee80211_key __rcu *default_beacon_key;
u16 sequence_number;
__be16 control_port_protocol;
bool control_port_no_encrypt;
+ bool control_port_no_preauth;
bool control_port_over_nl80211;
int encrypt_headroom;
@@ -981,9 +982,12 @@ struct ieee80211_sub_if_data {
struct dentry *default_unicast_key;
struct dentry *default_multicast_key;
struct dentry *default_mgmt_key;
+ struct dentry *default_beacon_key;
} debugfs;
#endif
+ bool hw_80211_encap;
+
/* must be last, dynamically sized area in this! */
struct ieee80211_vif vif;
};
@@ -1473,6 +1477,7 @@ struct ieee802_11_elems {
const struct ieee80211_tim_ie *tim;
const u8 *challenge;
const u8 *rsn;
+ const u8 *rsnx;
const u8 *erp_info;
const u8 *ext_supp_rates;
const u8 *wmm_info;
@@ -1520,6 +1525,7 @@ struct ieee802_11_elems {
u8 tim_len;
u8 challenge_len;
u8 rsn_len;
+ u8 rsnx_len;
u8 ext_supp_rates_len;
u8 wmm_info_len;
u8 wmm_param_len;
@@ -1727,6 +1733,13 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_csa_settings *params);
/* interface handling */
+#define MAC80211_SUPPORTED_FEATURES_TX (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
+ NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_HIGHDMA | NETIF_F_GSO_SOFTWARE)
+#define MAC80211_SUPPORTED_FEATURES_RX (NETIF_F_RXCSUM)
+#define MAC80211_SUPPORTED_FEATURES (MAC80211_SUPPORTED_FEATURES_TX | \
+ MAC80211_SUPPORTED_FEATURES_RX)
+
int ieee80211_iface_init(void);
void ieee80211_iface_exit(void);
int ieee80211_if_add(struct ieee80211_local *local, const char *name,
@@ -1762,6 +1775,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
struct net_device *dev);
netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev);
+netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
+ struct net_device *dev);
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev,
u32 info_flags,
@@ -1948,6 +1963,11 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, int tid,
enum nl80211_band band, u32 txdata_flags);
+/* sta_out needs to be checked for ERR_PTR() before using */
+int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb,
+ struct sta_info **sta_out);
+
static inline void
ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, int tid,
@@ -2132,8 +2152,6 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
enum nl80211_band band, u32 *basic_rates);
int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode smps_mode);
-int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata,
- enum ieee80211_smps_mode smps_mode);
void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata);
void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index af8b09214786..d069825705d6 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -8,7 +8,7 @@
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*/
#include <linux/slab.h>
#include <linux/kernel.h>
@@ -519,6 +519,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
master->control_port_no_encrypt;
sdata->control_port_over_nl80211 =
master->control_port_over_nl80211;
+ sdata->control_port_no_preauth =
+ master->control_port_no_preauth;
sdata->vif.cab_queue = master->vif.cab_queue;
memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
sizeof(sdata->vif.hw_queue));
@@ -824,9 +826,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_ADHOC:
ieee80211_ibss_stop(sdata);
break;
- case NL80211_IFTYPE_AP:
- cancel_work_sync(&sdata->u.ap.request_smps_work);
- break;
case NL80211_IFTYPE_MONITOR:
if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
break;
@@ -1205,6 +1204,72 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
.ndo_get_stats64 = ieee80211_get_stats64,
};
+static const struct net_device_ops ieee80211_dataif_8023_ops = {
+ .ndo_open = ieee80211_open,
+ .ndo_stop = ieee80211_stop,
+ .ndo_uninit = ieee80211_uninit,
+ .ndo_start_xmit = ieee80211_subif_start_xmit_8023,
+ .ndo_set_rx_mode = ieee80211_set_multicast_list,
+ .ndo_set_mac_address = ieee80211_change_mac,
+ .ndo_select_queue = ieee80211_netdev_select_queue,
+ .ndo_get_stats64 = ieee80211_get_stats64,
+};
+
+static void __ieee80211_set_hw_80211_encap(struct ieee80211_sub_if_data *sdata,
+ bool enable)
+{
+ sdata->dev->netdev_ops = enable ? &ieee80211_dataif_8023_ops :
+ &ieee80211_dataif_ops;
+ sdata->hw_80211_encap = enable;
+}
+
+bool ieee80211_set_hw_80211_encap(struct ieee80211_vif *vif, bool enable)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_sub_if_data *iter;
+ struct ieee80211_key *key;
+
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry(iter, &local->interfaces, list) {
+ struct ieee80211_sub_if_data *disable = NULL;
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ disable = iter;
+ __ieee80211_set_hw_80211_encap(iter, false);
+ } else if (iter->vif.type == NL80211_IFTYPE_MONITOR) {
+ disable = sdata;
+ enable = false;
+ }
+ if (disable)
+ sdata_dbg(disable,
+ "disable hw 80211 encap due to mon co-exist\n");
+ }
+ mutex_unlock(&local->iflist_mtx);
+
+ if (enable == sdata->hw_80211_encap)
+ return enable;
+
+ if (!sdata->dev)
+ return false;
+
+ if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG) &&
+ (local->hw.wiphy->frag_threshold != (u32)-1))
+ enable = false;
+
+ mutex_lock(&sdata->local->key_mtx);
+ list_for_each_entry(key, &sdata->key_list, list) {
+ if (key->conf.cipher == WLAN_CIPHER_SUITE_TKIP)
+ enable = false;
+ }
+ mutex_unlock(&sdata->local->key_mtx);
+
+ __ieee80211_set_hw_80211_encap(sdata, enable);
+
+ return enable;
+}
+EXPORT_SYMBOL(ieee80211_set_hw_80211_encap);
+
static void ieee80211_if_free(struct net_device *dev)
{
free_percpu(dev->tstats);
@@ -1400,10 +1465,14 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
sdata->control_port_no_encrypt = false;
+ sdata->control_port_over_nl80211 = false;
+ sdata->control_port_no_preauth = false;
sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
sdata->vif.bss_conf.idle = true;
+ sdata->vif.bss_conf.txpower = INT_MIN; /* unset */
sdata->noack_map = 0;
+ sdata->hw_80211_encap = false;
/* only monitor/p2p-device differ */
if (sdata->dev) {
@@ -1427,10 +1496,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_AP:
skb_queue_head_init(&sdata->u.ap.ps.bc_buf);
INIT_LIST_HEAD(&sdata->u.ap.vlans);
- INIT_WORK(&sdata->u.ap.request_smps_work,
- ieee80211_request_smps_ap_work);
sdata->vif.bss_conf.bssid = sdata->vif.addr;
- sdata->u.ap.req_smps = IEEE80211_SMPS_OFF;
break;
case NL80211_IFTYPE_P2P_CLIENT:
type = NL80211_IFTYPE_STATION;
@@ -1772,6 +1838,10 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
if_setup, txqs, 1);
if (!ndev)
return -ENOMEM;
+
+ if (!local->ops->wake_tx_queue && local->hw.wiphy->tx_queue_len)
+ ndev->tx_queue_len = local->hw.wiphy->tx_queue_len;
+
dev_net_set(ndev, wiphy_net(local->hw.wiphy));
ndev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@@ -1871,6 +1941,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
sdata->u.mgd.use_4addr = params->use_4addr;
ndev->features |= local->hw.netdev_features;
+ ndev->hw_features |= ndev->features &
+ MAC80211_SUPPORTED_FEATURES_TX;
netdev_set_default_ethtool_ops(ndev, &ieee80211_ethtool_ops);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index efc1acc6543c..8f403c1bb908 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -177,6 +177,13 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
}
}
+ /* TKIP countermeasures don't work in encap offload mode */
+ if (key->conf.cipher == WLAN_CIPHER_SUITE_TKIP &&
+ sdata->hw_80211_encap) {
+ sdata_dbg(sdata, "TKIP is not allowed in hw 80211 encap mode\n");
+ return -EINVAL;
+ }
+
ret = drv_set_key(key->local, SET_KEY, sdata,
sta ? &sta->sta : NULL, &key->conf);
@@ -210,12 +217,20 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ /* We cannot do software crypto of data frames with
+ * encapsulation offload enabled. However for 802.11w to
+ * function properly we need cmac/gmac keys.
+ */
+ if (sdata->hw_80211_encap)
+ return -EINVAL;
+ /* Fall through */
+
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
/* all of these we can do in software - if driver can */
if (ret == 1)
return 0;
@@ -399,6 +414,31 @@ void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata,
mutex_unlock(&sdata->local->key_mtx);
}
+static void
+__ieee80211_set_default_beacon_key(struct ieee80211_sub_if_data *sdata, int idx)
+{
+ struct ieee80211_key *key = NULL;
+
+ assert_key_lock(sdata->local);
+
+ if (idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS &&
+ idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
+ NUM_DEFAULT_BEACON_KEYS)
+ key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
+
+ rcu_assign_pointer(sdata->default_beacon_key, key);
+
+ ieee80211_debugfs_key_update_default(sdata);
+}
+
+void ieee80211_set_default_beacon_key(struct ieee80211_sub_if_data *sdata,
+ int idx)
+{
+ mutex_lock(&sdata->local->key_mtx);
+ __ieee80211_set_default_beacon_key(sdata, idx);
+ mutex_unlock(&sdata->local->key_mtx);
+}
+
static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
bool pairwise,
@@ -407,7 +447,7 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
{
int idx;
int ret = 0;
- bool defunikey, defmultikey, defmgmtkey;
+ bool defunikey, defmultikey, defmgmtkey, defbeaconkey;
/* caller must provide at least one old/new */
if (WARN_ON(!new && !old))
@@ -469,6 +509,9 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
defmgmtkey = old &&
old == key_mtx_dereference(sdata->local,
sdata->default_mgmt_key);
+ defbeaconkey = old &&
+ old == key_mtx_dereference(sdata->local,
+ sdata->default_beacon_key);
if (defunikey && !new)
__ieee80211_set_default_key(sdata, -1, true, false);
@@ -476,6 +519,8 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
__ieee80211_set_default_key(sdata, -1, false, true);
if (defmgmtkey && !new)
__ieee80211_set_default_mgmt_key(sdata, -1);
+ if (defbeaconkey && !new)
+ __ieee80211_set_default_beacon_key(sdata, -1);
rcu_assign_pointer(sdata->keys[idx], new);
if (defunikey && new)
@@ -487,6 +532,9 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
if (defmgmtkey && new)
__ieee80211_set_default_mgmt_key(sdata,
new->conf.keyidx);
+ if (defbeaconkey && new)
+ __ieee80211_set_default_beacon_key(sdata,
+ new->conf.keyidx);
}
if (old)
@@ -504,7 +552,9 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
struct ieee80211_key *key;
int i, j, err;
- if (WARN_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS))
+ if (WARN_ON(idx < 0 ||
+ idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
+ NUM_DEFAULT_BEACON_KEYS))
return ERR_PTR(-EINVAL);
key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL);
@@ -967,6 +1017,7 @@ static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata,
sdata->crypto_tx_tailroom_pending_dec = 0;
ieee80211_debugfs_key_remove_mgmt_default(sdata);
+ ieee80211_debugfs_key_remove_beacon_default(sdata);
list_for_each_entry_safe(key, tmp, &sdata->key_list, list) {
ieee80211_key_replace(key->sdata, key->sta,
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index d6d6e89cf7dd..7ad72e9b4991 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -17,6 +17,7 @@
#define NUM_DEFAULT_KEYS 4
#define NUM_DEFAULT_MGMT_KEYS 2
+#define NUM_DEFAULT_BEACON_KEYS 2
#define INVALID_PTK_KEYIDX 2 /* Keyidx always pointing to a NULL key for PTK */
struct ieee80211_local;
@@ -153,6 +154,8 @@ void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx,
bool uni, bool multi);
void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata,
int idx);
+void ieee80211_set_default_beacon_key(struct ieee80211_sub_if_data *sdata,
+ int idx);
void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
bool force_synchronize);
void ieee80211_free_sta_keys(struct ieee80211_local *local,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 4c2b5ba3ac09..8345926193de 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -146,6 +146,8 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
continue;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
continue;
+ if (sdata->vif.bss_conf.txpower == INT_MIN)
+ continue;
power = min(power, sdata->vif.bss_conf.txpower);
}
rcu_read_unlock();
@@ -416,7 +418,20 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
},
[NL80211_IFTYPE_STATION] = {
.tx = 0xffff,
+ /*
+ * To support Pre Association Security Negotiation (PASN) while
+ * already associated to one AP, allow user space to register to
+ * Rx authentication frames, so that the user space logic would
+ * be able to receive/handle authentication frames from a
+ * different AP as part of PASN.
+ * It is expected that user space would intelligently register
+ * for Rx authentication frames, i.e., only when PASN is used
+ * and configure a match filter only for PASN authentication
+ * algorithm, as otherwise the MLME functionality of mac80211
+ * would be broken.
+ */
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
},
[NL80211_IFTYPE_AP] = {
@@ -561,7 +576,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
WIPHY_FLAG_REPORTS_OBSS |
WIPHY_FLAG_OFFCHAN_TX;
- if (ops->remain_on_channel)
+ if (!use_chanctx || ops->remain_on_channel)
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS |
@@ -574,6 +589,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_STA);
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211);
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH);
if (!ops->hw_scan) {
wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
@@ -872,7 +889,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
enum nl80211_band band;
int channels, max_bitrates;
bool supp_ht, supp_vht, supp_he;
- netdev_features_t feature_whitelist;
struct cfg80211_chan_def dflt_chandef = {};
if (ieee80211_hw_check(hw, QUEUE_CONTROL) &&
@@ -931,10 +947,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
}
/* Only HW csum features are currently compatible with mac80211 */
- feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA |
- NETIF_F_GSO_SOFTWARE | NETIF_F_RXCSUM;
- if (WARN_ON(hw->netdev_features & ~feature_whitelist))
+ if (WARN_ON(hw->netdev_features & ~MAC80211_SUPPORTED_FEATURES))
return -EINVAL;
if (hw->max_report_rates == 0)
@@ -981,6 +994,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (!supp_he)
supp_he = !!ieee80211_get_he_sta_cap(sband);
+ /* HT, VHT, HE require QoS, thus >= 4 queues */
+ if (WARN_ON(local->hw.queues < IEEE80211_NUM_ACS &&
+ (supp_ht || supp_vht || supp_he)))
+ return -EINVAL;
+
if (!sband->ht_cap.ht_supported)
continue;
@@ -1065,6 +1083,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
NL80211_EXT_FEATURE_EXT_KEY_ID);
}
+ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_ADHOC))
+ wiphy_ext_feature_set(local->hw.wiphy,
+ NL80211_EXT_FEATURE_DEL_IBSS_STA);
+
/*
* Calculate scan IE length -- we need this to alloc
* memory and to subtract from the driver limit. It
@@ -1184,10 +1206,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (!local->hw.weight_multiplier)
local->hw.weight_multiplier = 1;
- result = ieee80211_wep_init(local);
- if (result < 0)
- wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
- result);
+ ieee80211_wep_init(local);
local->hw.conf.flags = IEEE80211_CONF_IDLE;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 88d7a692a965..16d75da0996a 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -164,7 +164,9 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
chandef->center_freq1 = channel->center_freq;
if (!ht_oper || !sta_ht_cap.ht_supported) {
- ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+ ret = IEEE80211_STA_DISABLE_HT |
+ IEEE80211_STA_DISABLE_VHT |
+ IEEE80211_STA_DISABLE_HE;
goto out;
}
@@ -185,7 +187,9 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
"Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
channel->center_freq, ht_cfreq,
ht_oper->primary_chan, channel->band);
- ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+ ret = IEEE80211_STA_DISABLE_HT |
+ IEEE80211_STA_DISABLE_VHT |
+ IEEE80211_STA_DISABLE_HE;
goto out;
}
@@ -301,13 +305,18 @@ out:
IEEE80211_CHAN_DISABLED)) {
if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
ret = IEEE80211_STA_DISABLE_HT |
- IEEE80211_STA_DISABLE_VHT;
+ IEEE80211_STA_DISABLE_VHT |
+ IEEE80211_STA_DISABLE_HE;
break;
}
ret |= ieee80211_chandef_downgrade(chandef);
}
+ if (!he_oper || !cfg80211_chandef_usable(sdata->wdev.wiphy, chandef,
+ IEEE80211_CHAN_NO_HE))
+ ret |= IEEE80211_STA_DISABLE_HE;
+
if (chandef->width != vht_chandef.width && !tracking)
sdata_info(sdata,
"capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
@@ -393,6 +402,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
if (flags != (ifmgd->flags & (IEEE80211_STA_DISABLE_HT |
IEEE80211_STA_DISABLE_VHT |
+ IEEE80211_STA_DISABLE_HE |
IEEE80211_STA_DISABLE_40MHZ |
IEEE80211_STA_DISABLE_80P80MHZ |
IEEE80211_STA_DISABLE_160MHZ)) ||
@@ -616,10 +626,21 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
{
u8 *pos;
const struct ieee80211_sta_he_cap *he_cap = NULL;
+ struct ieee80211_chanctx_conf *chanctx_conf;
u8 he_cap_size;
+ bool reg_cap = false;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ if (!WARN_ON_ONCE(!chanctx_conf))
+ reg_cap = cfg80211_chandef_usable(sdata->wdev.wiphy,
+ &chanctx_conf->def,
+ IEEE80211_CHAN_NO_HE);
+
+ rcu_read_unlock();
he_cap = ieee80211_get_he_sta_cap(sband);
- if (!he_cap)
+ if (!he_cap || !reg_cap)
return;
/*
@@ -650,6 +671,13 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *chan;
u32 rates = 0;
+ struct element *ext_capa = NULL;
+
+ /* we know it's writable, cast away the const */
+ if (assoc_data->ie_len)
+ ext_capa = (void *)cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY,
+ assoc_data->ie,
+ assoc_data->ie_len);
sdata_assert_lock(sdata);
@@ -800,7 +828,15 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
*pos++ = ieee80211_chandef_max_power(&chanctx_conf->def);
}
- if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
+ /*
+ * Per spec, we shouldn't include the list of channels if we advertise
+ * support for extended channel switching, but we've always done that;
+ * (for now?) apply this restriction only on the (new) 6 GHz band.
+ */
+ if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT &&
+ (sband->band != NL80211_BAND_6GHZ ||
+ !ext_capa || ext_capa->datalen < 1 ||
+ !(ext_capa->data[0] & WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING))) {
/* TODO: get this in reg domain format */
pos = skb_put(skb, 2 * sband->n_channels + 2);
*pos++ = WLAN_EID_SUPPORTED_CHANNELS;
@@ -814,18 +850,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
/* Set MBSSID support for HE AP if needed */
if (ieee80211_hw_check(&local->hw, SUPPORTS_ONLY_HE_MULTI_BSSID) &&
- !(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && assoc_data->ie_len) {
- struct element *elem;
-
- /* we know it's writable, cast away the const */
- elem = (void *)cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY,
- assoc_data->ie,
- assoc_data->ie_len);
-
- /* We can probably assume both always true */
- if (elem && elem->datalen >= 3)
- elem->data[2] |= WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT;
- }
+ !(ifmgd->flags & IEEE80211_STA_DISABLE_HE) && assoc_data->ie_len &&
+ ext_capa && ext_capa->datalen >= 3)
+ ext_capa->data[2] |= WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT;
/* if present, add any custom IEs that go before HT */
if (assoc_data->ie_len) {
@@ -2460,7 +2487,7 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_is_data(hdr->frame_control))
return;
- if (ieee80211_is_nullfunc(hdr->frame_control) &&
+ if (ieee80211_is_any_nullfunc(hdr->frame_control) &&
sdata->u.mgd.probe_send_count > 0) {
if (ack)
ieee80211_sta_reset_conn_monitor(sdata);
@@ -3364,9 +3391,16 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
if (bss_conf->he_support) {
- bss_conf->bss_color =
+ bss_conf->he_bss_color.color =
le32_get_bits(elems->he_operation->he_oper_params,
IEEE80211_HE_OPERATION_BSS_COLOR_MASK);
+ bss_conf->he_bss_color.partial =
+ le32_get_bits(elems->he_operation->he_oper_params,
+ IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR);
+ bss_conf->he_bss_color.disabled =
+ le32_get_bits(elems->he_operation->he_oper_params,
+ IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED);
+ changed |= BSS_CHANGED_HE_BSS_COLOR;
bss_conf->htc_trig_based_pkt_ext =
le32_get_bits(elems->he_operation->he_oper_params,
@@ -3645,13 +3679,28 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ieee80211_if_managed *ifmgd;
struct ieee80211_rx_status *rx_status = (void *) skb->cb;
+ struct ieee80211_channel *channel;
size_t baselen, len = skb->len;
ifmgd = &sdata->u.mgd;
sdata_assert_lock(sdata);
- if (!ether_addr_equal(mgmt->da, sdata->vif.addr))
+ /*
+ * According to Draft P802.11ax D6.0 clause 26.17.2.3.2:
+ * "If a 6 GHz AP receives a Probe Request frame and responds with
+ * a Probe Response frame [..], the Address 1 field of the Probe
+ * Response frame shall be set to the broadcast address [..]"
+ * So, on 6GHz band we should also accept broadcast responses.
+ */
+ channel = ieee80211_get_channel(sdata->local->hw.wiphy,
+ rx_status->freq);
+ if (!channel)
+ return;
+
+ if (!ether_addr_equal(mgmt->da, sdata->vif.addr) &&
+ (channel->band != NL80211_BAND_6GHZ ||
+ !is_broadcast_ether_addr(mgmt->da)))
return; /* ignore ProbeResp to foreign address */
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -4749,10 +4798,22 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
IEEE80211_STA_DISABLE_80P80MHZ |
IEEE80211_STA_DISABLE_160MHZ);
+ /* disable HT/VHT/HE if we don't support them */
+ if (!sband->ht_cap.ht_supported) {
+ ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
+ ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+ ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+ }
+
+ if (!sband->vht_cap.vht_supported)
+ ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+
+ if (!ieee80211_get_he_sta_cap(sband))
+ ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+
rcu_read_lock();
- if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
- sband->ht_cap.ht_supported) {
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
const u8 *ht_oper_ie, *ht_cap_ie;
ht_oper_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_OPERATION);
@@ -4769,8 +4830,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
}
}
- if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
- sband->vht_cap.vht_supported) {
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
const u8 *vht_oper_ie, *vht_cap;
vht_oper_ie = ieee80211_bss_get_ie(cbss,
@@ -4780,9 +4840,10 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
if (vht_oper && !ht_oper) {
vht_oper = NULL;
sdata_info(sdata,
- "AP advertised VHT without HT, disabling both\n");
+ "AP advertised VHT without HT, disabling HT/VHT/HE\n");
ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+ ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
}
vht_cap = ieee80211_bss_get_ie(cbss, WLAN_EID_VHT_CAPABILITY);
@@ -4792,9 +4853,6 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
}
}
- if (!ieee80211_get_he_sta_cap(sband))
- ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
-
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) {
const struct cfg80211_bss_ies *ies;
const u8 *he_oper_ie;
@@ -5293,27 +5351,15 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
}
}
- /* Also disable HT if we don't support it or the AP doesn't use WMM */
sband = local->hw.wiphy->bands[req->bss->channel->band];
- if (!sband->ht_cap.ht_supported ||
- local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used ||
- ifmgd->flags & IEEE80211_STA_DISABLE_WMM) {
- ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
- if (!bss->wmm_used &&
- !(ifmgd->flags & IEEE80211_STA_DISABLE_WMM))
- netdev_info(sdata->dev,
- "disabling HT as WMM/QoS is not supported by the AP\n");
- }
- /* disable VHT if we don't support it or the AP doesn't use WMM */
- if (!sband->vht_cap.vht_supported ||
- local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used ||
- ifmgd->flags & IEEE80211_STA_DISABLE_WMM) {
+ /* also disable HT/VHT/HE if the AP doesn't use WMM */
+ if (!bss->wmm_used) {
+ ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
- if (!bss->wmm_used &&
- !(ifmgd->flags & IEEE80211_STA_DISABLE_WMM))
- netdev_info(sdata->dev,
- "disabling VHT as WMM/QoS is not supported by the AP\n");
+ ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+ netdev_info(sdata->dev,
+ "disabling HT/VHT/HE as WMM/QoS is not supported by the AP\n");
}
memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
@@ -5412,6 +5458,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
sdata->control_port_no_encrypt = req->crypto.control_port_no_encrypt;
sdata->control_port_over_nl80211 =
req->crypto.control_port_over_nl80211;
+ sdata->control_port_no_preauth = req->crypto.control_port_no_preauth;
sdata->encrypt_headroom = ieee80211_cs_headroom(local, &req->crypto,
sdata->vif.type);
@@ -5445,6 +5492,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
if (req->flags & ASSOC_REQ_DISABLE_HT) {
ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+ ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
}
if (req->flags & ASSOC_REQ_DISABLE_VHT)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0ba98ad9bc85..91a13aee4378 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -6,7 +6,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*/
#include <linux/jiffies.h>
@@ -983,7 +983,8 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
return -1;
- if (!ieee80211_is_robust_mgmt_frame(skb))
+ if (!ieee80211_is_robust_mgmt_frame(skb) &&
+ !ieee80211_is_beacon(hdr->frame_control))
return -1; /* not a robust management frame */
mmie = (struct ieee80211_mmie *)
@@ -1450,8 +1451,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
if (ieee80211_is_ctl(hdr->frame_control) ||
- ieee80211_is_nullfunc(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control) ||
+ ieee80211_is_any_nullfunc(hdr->frame_control) ||
is_multicast_ether_addr(hdr->addr1))
return RX_CONTINUE;
@@ -1838,8 +1838,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
* Drop (qos-)data::nullfunc frames silently, since they
* are used only to control station power saving mode.
*/
- if (ieee80211_is_nullfunc(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+ if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
/*
@@ -1870,6 +1869,41 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
} /* ieee80211_rx_h_sta_process */
+static struct ieee80211_key *
+ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx)
+{
+ struct ieee80211_key *key = NULL;
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ int idx2;
+
+ /* Make sure key gets set if either BIGTK key index is set so that
+ * ieee80211_drop_unencrypted_mgmt() can properly drop both unprotected
+ * Beacon frames and Beacon frames that claim to use another BIGTK key
+ * index (i.e., a key that we do not have).
+ */
+
+ if (idx < 0) {
+ idx = NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS;
+ idx2 = idx + 1;
+ } else {
+ if (idx == NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
+ idx2 = idx + 1;
+ else
+ idx2 = idx - 1;
+ }
+
+ if (rx->sta)
+ key = rcu_dereference(rx->sta->gtk[idx]);
+ if (!key)
+ key = rcu_dereference(sdata->keys[idx]);
+ if (!key && rx->sta)
+ key = rcu_dereference(rx->sta->gtk[idx2]);
+ if (!key)
+ key = rcu_dereference(sdata->keys[idx2]);
+
+ return key;
+}
+
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
{
@@ -1887,17 +1921,18 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
/*
* Key selection 101
*
- * There are four types of keys:
+ * There are five types of keys:
* - GTK (group keys)
* - IGTK (group keys for management frames)
+ * - BIGTK (group keys for Beacon frames)
* - PTK (pairwise keys)
* - STK (station-to-station pairwise keys)
*
* When selecting a key, we have to distinguish between multicast
* (including broadcast) and unicast frames, the latter can only
- * use PTKs and STKs while the former always use GTKs and IGTKs.
- * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
- * unicast frames can also use key indices like GTKs. Hence, if we
+ * use PTKs and STKs while the former always use GTKs, IGTKs, and
+ * BIGTKs. Unless, of course, actual WEP keys ("pre-RSNA") are used,
+ * then unicast frames can also use key indices like GTKs. Hence, if we
* don't have a PTK/STK we check the key index for a WEP key.
*
* Note that in a regular BSS, multicast frames are sent by the
@@ -1941,6 +1976,20 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
/* Skip decryption if the frame is not protected. */
if (!ieee80211_has_protected(fc))
return RX_CONTINUE;
+ } else if (mmie_keyidx >= 0 && ieee80211_is_beacon(fc)) {
+ /* Broadcast/multicast robust management frame / BIP */
+ if ((status->flag & RX_FLAG_DECRYPTED) &&
+ (status->flag & RX_FLAG_IV_STRIPPED))
+ return RX_CONTINUE;
+
+ if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS ||
+ mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
+ NUM_DEFAULT_BEACON_KEYS)
+ return RX_DROP_MONITOR; /* unexpected BIP keyidx */
+
+ rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx);
+ if (!rx->key)
+ return RX_CONTINUE; /* Beacon protection not in use */
} else if (mmie_keyidx >= 0) {
/* Broadcast/multicast robust management frame / BIP */
if ((status->flag & RX_FLAG_DECRYPTED) &&
@@ -1970,11 +2019,12 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
struct ieee80211_sub_if_data *sdata = rx->sdata;
int i;
- if (ieee80211_is_mgmt(fc) &&
- is_multicast_ether_addr(hdr->addr1) &&
- (key = rcu_dereference(rx->sdata->default_mgmt_key)))
- rx->key = key;
- else {
+ if (ieee80211_is_beacon(fc)) {
+ key = ieee80211_rx_get_bigtk(rx, -1);
+ } else if (ieee80211_is_mgmt(fc) &&
+ is_multicast_ether_addr(hdr->addr1)) {
+ key = rcu_dereference(rx->sdata->default_mgmt_key);
+ } else {
if (rx->sta) {
for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
key = rcu_dereference(rx->sta->gtk[i]);
@@ -1989,9 +2039,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
break;
}
}
- if (key)
- rx->key = key;
}
+ if (key)
+ rx->key = key;
return RX_CONTINUE;
} else {
/*
@@ -2319,7 +2369,7 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
/* Drop unencrypted frames if key is set. */
if (unlikely(!ieee80211_has_protected(fc) &&
- !ieee80211_is_nullfunc(fc) &&
+ !ieee80211_is_any_nullfunc(fc) &&
ieee80211_is_data(fc) && rx->key))
return -EACCES;
@@ -2360,6 +2410,9 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
rx->skb->len);
return -EACCES;
}
+ if (unlikely(ieee80211_is_beacon(fc) && rx->key &&
+ ieee80211_get_mmie_keyidx(rx->skb) < 0))
+ return -EACCES;
/*
* When using MFP, Action frames are not allowed prior to
* having configured keys.
@@ -2444,7 +2497,8 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
struct net_device *dev = sdata->dev;
if (unlikely((skb->protocol == sdata->control_port_protocol ||
- skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) &&
+ (skb->protocol == cpu_to_be16(ETH_P_PREAUTH) &&
+ !sdata->control_port_no_preauth)) &&
sdata->control_port_over_nl80211)) {
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
@@ -3084,6 +3138,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
enum ieee80211_smps_mode smps_mode;
struct sta_opmode_info sta_opmode = {};
+ if (sdata->vif.type != NL80211_IFTYPE_AP &&
+ sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
+ goto handled;
+
/* convert to HT capability */
switch (mgmt->u.action.u.ht_smps.smps_control) {
case WLAN_HT_SMPS_CONTROL_DISABLED:
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 4d31d9688dc2..fdac8192a519 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -201,8 +201,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
mgmt->bssid, cbss->bssid);
/* In case the signal is invalid update the status */
- signal_valid = abs(channel->center_freq - cbss->channel->center_freq)
- <= local->hw.wiphy->max_adj_channel_rssi_comp;
+ signal_valid = channel == cbss->channel;
if (!signal_valid)
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index e3572be307d6..f8d5c2515829 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1356,20 +1356,6 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
atomic_dec(&ps->num_sta_ps);
- /* This station just woke up and isn't aware of our SMPS state */
- if (!ieee80211_vif_is_mesh(&sdata->vif) &&
- !ieee80211_smps_is_restrictive(sta->known_smps_mode,
- sdata->smps_mode) &&
- sta->known_smps_mode != sdata->bss->req_smps &&
- sta_info_tx_streams(sta) != 1) {
- ht_dbg(sdata,
- "%pM just woke up and MIMO capable - update SMPS\n",
- sta->sta.addr);
- ieee80211_send_smps_action(sdata, sdata->bss->req_smps,
- sta->sta.addr,
- sdata->vif.bss_conf.bssid);
- }
-
local->total_ps_buffered -= buffered;
sta_info_recalc_tim(sta);
@@ -2169,19 +2155,41 @@ static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
return 0;
}
+static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats,
+ int tid)
+{
+ unsigned int start;
+ u64 value;
+
+ do {
+ start = u64_stats_fetch_begin(&rxstats->syncp);
+ value = rxstats->msdu[tid];
+ } while (u64_stats_fetch_retry(&rxstats->syncp, start));
+
+ return value;
+}
+
static void sta_set_tidstats(struct sta_info *sta,
struct cfg80211_tid_stats *tidstats,
int tid)
{
struct ieee80211_local *local = sta->local;
+ int cpu;
if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
- unsigned int start;
+ if (!ieee80211_hw_check(&local->hw, USES_RSS))
+ tidstats->rx_msdu +=
+ sta_get_tidstats_msdu(&sta->rx_stats, tid);
- do {
- start = u64_stats_fetch_begin(&sta->rx_stats.syncp);
- tidstats->rx_msdu = sta->rx_stats.msdu[tid];
- } while (u64_stats_fetch_retry(&sta->rx_stats.syncp, start));
+ if (sta->pcpu_rx_stats) {
+ for_each_possible_cpu(cpu) {
+ struct ieee80211_sta_rx_stats *cpurxs;
+
+ cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
+ tidstats->rx_msdu +=
+ sta_get_tidstats_msdu(cpurxs, tid);
+ }
+ }
tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU);
}
@@ -2285,7 +2293,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) |
BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) {
- sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);
+ if (!ieee80211_hw_check(&local->hw, USES_RSS))
+ sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);
if (sta->pcpu_rx_stats) {
for_each_possible_cpu(cpu) {
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 552eed36faca..36f1abaab9ff 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -534,7 +534,9 @@ struct sta_info {
u8 addr[ETH_ALEN];
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
- struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
+ struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS +
+ NUM_DEFAULT_MGMT_KEYS +
+ NUM_DEFAULT_BEACON_KEYS];
struct ieee80211_key __rcu *ptk[NUM_DEFAULT_KEYS];
u8 ptk_idx;
struct rate_control_ref *rate_ctrl;
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index b720feaf9a74..22512805eafb 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -643,8 +643,7 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
rcu_read_lock();
sdata = ieee80211_sdata_from_skb(local, skb);
if (sdata) {
- if (ieee80211_is_nullfunc(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control))
+ if (ieee80211_is_any_nullfunc(hdr->frame_control))
cfg80211_probe_status(sdata->dev, hdr->addr1,
cookie, acked,
info->status.ack_signal,
@@ -888,6 +887,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
int rates_idx;
bool send_to_cooked;
bool acked;
+ bool noack_success;
struct ieee80211_bar *bar;
int shift = 0;
int tid = IEEE80211_NUM_TIDS;
@@ -906,6 +906,8 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
clear_sta_flag(sta, WLAN_STA_SP);
acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ noack_success = !!(info->flags &
+ IEEE80211_TX_STAT_NOACK_TRANSMITTED);
/* mesh Peer Service Period support */
if (ieee80211_vif_is_mesh(&sta->sdata->vif) &&
@@ -970,12 +972,12 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
ieee80211_handle_filtered_frame(local, sta, skb);
return;
} else {
- if (!acked)
+ if (!acked && !noack_success)
sta->status_stats.retry_failed++;
sta->status_stats.retry_count += retry_count;
if (ieee80211_is_data_present(fc)) {
- if (!acked)
+ if (!acked && !noack_success)
sta->status_stats.msdu_failed[tid]++;
sta->status_stats.msdu_retries[tid] +=
@@ -1013,7 +1015,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
}
if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
- if (info->flags & IEEE80211_TX_STAT_ACK) {
+ if (acked) {
if (sta->status_stats.lost_packets)
sta->status_stats.lost_packets = 0;
@@ -1021,6 +1023,8 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
sta->status_stats.last_tdls_pkt_time =
jiffies;
+ } else if (noack_success) {
+ /* nothing to do here, do not account as lost */
} else {
ieee80211_lost_packet(sta, info);
}
@@ -1056,7 +1060,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
I802_DEBUG_INC(local->dot11FailedCount);
}
- if ((ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
+ if (ieee80211_is_any_nullfunc(fc) &&
ieee80211_has_pm(fc) &&
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) &&
!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
@@ -1141,7 +1145,7 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
sta = container_of(pubsta, struct sta_info, sta);
- if (!acked)
+ if (!acked && !noack_success)
sta->status_stats.retry_failed++;
sta->status_stats.retry_count += retry_count;
@@ -1156,6 +1160,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
sta->status_stats.last_tdls_pkt_time = jiffies;
} else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
return;
+ } else if (noack_success) {
+ /* nothing to do here, do not account as lost */
} else {
ieee80211_lost_packet(sta, info);
}
@@ -1198,6 +1204,77 @@ void ieee80211_tx_rate_update(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_tx_rate_update);
+void ieee80211_tx_status_8023(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct sta_info *sta;
+ int retry_count;
+ int rates_idx;
+ bool acked;
+
+ sdata = vif_to_sdata(vif);
+
+ acked = info->flags & IEEE80211_TX_STAT_ACK;
+ rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
+
+ rcu_read_lock();
+
+ if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
+ goto counters_update;
+
+ if (IS_ERR(sta))
+ goto counters_update;
+
+ if (!acked)
+ sta->status_stats.retry_failed++;
+
+ if (rates_idx != -1)
+ sta->tx_stats.last_rate = info->status.rates[rates_idx];
+
+ sta->status_stats.retry_count += retry_count;
+
+ if (ieee80211_hw_check(hw, REPORTS_TX_ACK_STATUS)) {
+ if (acked && vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_sta_reset_conn_monitor(sdata);
+
+ sta->status_stats.last_ack = jiffies;
+ if (info->flags & IEEE80211_TX_STAT_ACK) {
+ if (sta->status_stats.lost_packets)
+ sta->status_stats.lost_packets = 0;
+
+ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
+ sta->status_stats.last_tdls_pkt_time = jiffies;
+ } else {
+ ieee80211_lost_packet(sta, info);
+ }
+ }
+
+counters_update:
+ rcu_read_unlock();
+ ieee80211_led_tx(local);
+
+ if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
+ !(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED))
+ goto skip_stats_update;
+
+ I802_DEBUG_INC(local->dot11TransmittedFrameCount);
+ if (is_multicast_ether_addr(skb->data))
+ I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount);
+ if (retry_count > 0)
+ I802_DEBUG_INC(local->dot11RetryCount);
+ if (retry_count > 1)
+ I802_DEBUG_INC(local->dot11MultipleRetryCount);
+
+skip_stats_update:
+ ieee80211_report_used_skb(local, skb, false);
+ dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(ieee80211_tx_status_8023);
+
void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index d9cca6dbd870..82846aca86d9 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2018, 2020 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*
* Transmit and frame generation functions.
*/
@@ -297,7 +297,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
!ieee80211_is_probe_req(hdr->frame_control) &&
- !ieee80211_is_nullfunc(hdr->frame_control))
+ !ieee80211_is_any_nullfunc(hdr->frame_control))
/*
* When software scanning only nullfunc frames (to notify
* the sleep state to the AP) and probe requests (for the
@@ -1256,7 +1256,8 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
return NULL;
- if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) {
+ if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) &&
+ unlikely(!ieee80211_is_data_present(hdr->frame_control))) {
if ((!ieee80211_is_mgmt(hdr->frame_control) ||
ieee80211_is_bufferable_mmpdu(hdr->frame_control) ||
vif->type == NL80211_IFTYPE_STATION) &&
@@ -2366,9 +2367,9 @@ static inline bool ieee80211_is_tdls_setup(struct sk_buff *skb)
skb->data[14] == WLAN_TDLS_SNAP_RFTYPE;
}
-static int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb,
- struct sta_info **sta_out)
+int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb,
+ struct sta_info **sta_out)
{
struct sta_info *sta;
@@ -3610,7 +3611,8 @@ begin:
* Drop unicast frames to unauthorised stations unless they are
* EAPOL frames from the local station.
*/
- if (unlikely(!ieee80211_vif_is_mesh(&tx.sdata->vif) &&
+ if (unlikely(ieee80211_is_data(hdr->frame_control) &&
+ !ieee80211_vif_is_mesh(&tx.sdata->vif) &&
tx.sdata->vif.type != NL80211_IFTYPE_OCB &&
!is_multicast_ether_addr(hdr->addr1) &&
!test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) &&
@@ -3639,6 +3641,9 @@ begin:
else
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+ if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)
+ goto encap_out;
+
if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
struct sta_info *sta = container_of(txq->sta, struct sta_info,
sta);
@@ -3698,9 +3703,11 @@ begin:
break;
}
+encap_out:
IEEE80211_SKB_CB(skb)->control.vif = vif;
- if (wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) {
+ if (vif &&
+ wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) {
u32 airtime;
airtime = ieee80211_calc_expected_tx_airtime(hw, vif, txq->sta,
@@ -4126,6 +4133,153 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static bool ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, int led_len,
+ struct sta_info *sta,
+ bool txpending)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_tx_control control = {};
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *pubsta = NULL;
+ unsigned long flags;
+ int q = info->hw_queue;
+
+ if (ieee80211_queue_skb(local, sdata, sta, skb))
+ return true;
+
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+
+ if (local->queue_stop_reasons[q] ||
+ (!txpending && !skb_queue_empty(&local->pending[q]))) {
+ if (txpending)
+ skb_queue_head(&local->pending[q], skb);
+ else
+ skb_queue_tail(&local->pending[q], skb);
+
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+
+ return false;
+ }
+
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+
+ if (sta && sta->uploaded)
+ pubsta = &sta->sta;
+
+ control.sta = pubsta;
+
+ drv_tx(local, &control, skb);
+
+ return true;
+}
+
+static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
+ struct net_device *dev, struct sta_info *sta,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ethhdr *ehdr = (struct ethhdr *)skb->data;
+ struct ieee80211_local *local = sdata->local;
+ bool authorized = false;
+ bool multicast;
+ unsigned char *ra = ehdr->h_dest;
+
+ if (IS_ERR(sta) || (sta && !sta->uploaded))
+ sta = NULL;
+
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER)))
+ ra = sdata->u.mgd.bssid;
+
+ if (!is_valid_ether_addr(ra))
+ goto out_free;
+
+ multicast = is_multicast_ether_addr(ra);
+
+ if (sta)
+ authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
+
+ if (!multicast && !authorized &&
+ (ehdr->h_proto != sdata->control_port_protocol ||
+ !ether_addr_equal(sdata->vif.addr, ehdr->h_source)))
+ goto out_free;
+
+ if (multicast && sdata->vif.type == NL80211_IFTYPE_AP &&
+ !atomic_read(&sdata->u.ap.num_mcast_sta))
+ goto out_free;
+
+ if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)) &&
+ test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
+ goto out_free;
+
+ if (unlikely(!multicast && skb->sk &&
+ skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS))
+ ieee80211_store_ack_skb(local, skb, &info->flags);
+
+ memset(info, 0, sizeof(*info));
+
+ if (unlikely(sdata->control_port_protocol == ehdr->h_proto)) {
+ if (sdata->control_port_no_encrypt)
+ info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+ }
+
+ if (multicast)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+
+ info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
+
+ ieee80211_tx_stats(dev, skb->len);
+
+ if (sta) {
+ sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
+ sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
+ }
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss,
+ struct ieee80211_sub_if_data, u.ap);
+
+ info->control.flags |= IEEE80211_TX_CTRL_HW_80211_ENCAP;
+ info->control.vif = &sdata->vif;
+
+ ieee80211_tx_8023(sdata, skb, skb->len, sta, false);
+
+ return;
+
+out_free:
+ kfree_skb(skb);
+}
+
+netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct sta_info *sta;
+
+ if (WARN_ON(!sdata->hw_80211_encap)) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (unlikely(skb->len < ETH_HLEN)) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ rcu_read_lock();
+
+ if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
+ kfree_skb(skb);
+ else
+ ieee80211_8023_xmit(sdata, dev, sta, skb);
+
+ rcu_read_unlock();
+
+ return NETDEV_TX_OK;
+}
+
struct sk_buff *
ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u32 info_flags)
@@ -4204,6 +4358,16 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
}
info->band = chanctx_conf->def.chan->band;
result = ieee80211_tx(sdata, NULL, skb, true, 0);
+ } else if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) {
+ if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
+ dev_kfree_skb(skb);
+ return true;
+ }
+
+ if (IS_ERR(sta) || (sta && !sta->uploaded))
+ sta = NULL;
+
+ result = ieee80211_tx_8023(sdata, skb, skb->len, sta, true);
} else {
struct sk_buff_head skbs;
@@ -4524,6 +4688,32 @@ bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
}
EXPORT_SYMBOL(ieee80211_csa_is_complete);
+static int ieee80211_beacon_protect(struct sk_buff *skb,
+ struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
+{
+ ieee80211_tx_result res;
+ struct ieee80211_tx_data tx;
+ struct sk_buff *check_skb;
+
+ memset(&tx, 0, sizeof(tx));
+ tx.key = rcu_dereference(sdata->default_beacon_key);
+ if (!tx.key)
+ return 0;
+ tx.local = local;
+ tx.sdata = sdata;
+ __skb_queue_head_init(&tx.skbs);
+ __skb_queue_tail(&tx.skbs, skb);
+ res = ieee80211_tx_h_encrypt(&tx);
+ check_skb = __skb_dequeue(&tx.skbs);
+ /* we may crash after this, but it'd be a bug in crypto */
+ WARN_ON(check_skb != skb);
+ if (WARN_ON_ONCE(res != TX_CONTINUE))
+ return -EINVAL;
+
+ return 0;
+}
+
static struct sk_buff *
__ieee80211_beacon_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -4591,6 +4781,9 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
if (beacon->tail)
skb_put_data(skb, beacon->tail,
beacon->tail_len);
+
+ if (ieee80211_beacon_protect(skb, local, sdata) < 0)
+ goto out;
} else
goto out;
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index decd46b38393..20436c86b9bf 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -6,7 +6,7 @@
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*
* utilities for mac80211
*/
@@ -39,7 +39,6 @@ const void *const mac80211_wiphy_privid = &mac80211_wiphy_privid;
struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
{
struct ieee80211_local *local;
- BUG_ON(!wiphy);
local = wiphy_priv(wiphy);
return &local->hw;
@@ -891,6 +890,55 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_queue_delayed_work);
+static void ieee80211_parse_extension_element(u32 *crc,
+ const struct element *elem,
+ struct ieee802_11_elems *elems)
+{
+ const void *data = elem->data + 1;
+ u8 len = elem->datalen - 1;
+
+ switch (elem->data[0]) {
+ case WLAN_EID_EXT_HE_MU_EDCA:
+ if (len == sizeof(*elems->mu_edca_param_set)) {
+ elems->mu_edca_param_set = data;
+ if (crc)
+ *crc = crc32_be(*crc, (void *)elem,
+ elem->datalen + 2);
+ }
+ break;
+ case WLAN_EID_EXT_HE_CAPABILITY:
+ elems->he_cap = data;
+ elems->he_cap_len = len;
+ break;
+ case WLAN_EID_EXT_HE_OPERATION:
+ if (len >= sizeof(*elems->he_operation) &&
+ len == ieee80211_he_oper_size(data) - 1) {
+ if (crc)
+ *crc = crc32_be(*crc, (void *)elem,
+ elem->datalen + 2);
+ elems->he_operation = data;
+ }
+ break;
+ case WLAN_EID_EXT_UORA:
+ if (len == 1)
+ elems->uora_element = data;
+ break;
+ case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME:
+ if (len == 3)
+ elems->max_channel_switch_time = data;
+ break;
+ case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION:
+ if (len == sizeof(*elems->mbssid_config_ie))
+ elems->mbssid_config_ie = data;
+ break;
+ case WLAN_EID_EXT_HE_SPR:
+ if (len >= sizeof(*elems->he_spr) &&
+ len >= ieee80211_he_spr_size(data))
+ elems->he_spr = data;
+ break;
+ }
+}
+
static u32
_ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
struct ieee802_11_elems *elems,
@@ -950,6 +998,7 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
case WLAN_EID_CHAN_SWITCH_TIMING:
case WLAN_EID_LINK_ID:
case WLAN_EID_BSS_MAX_IDLE_PERIOD:
+ case WLAN_EID_RSNX:
/*
* not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible
* that if the content gets bigger it might be needed more than once
@@ -1226,34 +1275,14 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
if (elen >= sizeof(*elems->max_idle_period_ie))
elems->max_idle_period_ie = (void *)pos;
break;
+ case WLAN_EID_RSNX:
+ elems->rsnx = pos;
+ elems->rsnx_len = elen;
+ break;
case WLAN_EID_EXTENSION:
- if (pos[0] == WLAN_EID_EXT_HE_MU_EDCA &&
- elen >= (sizeof(*elems->mu_edca_param_set) + 1)) {
- elems->mu_edca_param_set = (void *)&pos[1];
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
- } else if (pos[0] == WLAN_EID_EXT_HE_CAPABILITY) {
- elems->he_cap = (void *)&pos[1];
- elems->he_cap_len = elen - 1;
- } else if (pos[0] == WLAN_EID_EXT_HE_OPERATION &&
- elen >= sizeof(*elems->he_operation) &&
- elen >= ieee80211_he_oper_size(&pos[1])) {
- elems->he_operation = (void *)&pos[1];
- } else if (pos[0] == WLAN_EID_EXT_UORA && elen >= 1) {
- elems->uora_element = (void *)&pos[1];
- } else if (pos[0] ==
- WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME &&
- elen == 4) {
- elems->max_channel_switch_time = pos + 1;
- } else if (pos[0] ==
- WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION &&
- elen == 3) {
- elems->mbssid_config_ie = (void *)&pos[1];
- } else if (pos[0] == WLAN_EID_EXT_HE_SPR &&
- elen >= sizeof(*elems->he_spr) &&
- elen >= ieee80211_he_spr_size(&pos[1])) {
- elems->he_spr = (void *)&pos[1];
- }
+ ieee80211_parse_extension_element(calc_crc ?
+ &crc : NULL,
+ elem, elems);
break;
default:
break;
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index ccdcb9ad9ac7..632f07401850 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -333,11 +333,33 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
}
}
+/* FIXME: move this to some better location - parses HE now */
enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
{
struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap;
u32 cap_width;
+ if (he_cap->has_he) {
+ u8 info = he_cap->he_cap_elem.phy_cap_info[0];
+
+ if (sta->sdata->vif.bss_conf.chandef.chan->band ==
+ NL80211_BAND_2GHZ) {
+ if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
+ return IEEE80211_STA_RX_BW_40;
+ else
+ return IEEE80211_STA_RX_BW_20;
+ }
+
+ if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G ||
+ info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ return IEEE80211_STA_RX_BW_160;
+ else if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)
+ return IEEE80211_STA_RX_BW_80;
+
+ return IEEE80211_STA_RX_BW_20;
+ }
+
if (!vht_cap->vht_supported)
return sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
IEEE80211_STA_RX_BW_40 :
@@ -433,6 +455,7 @@ ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
}
}
+/* FIXME: rename/move - this deals with everything not just VHT */
enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -458,12 +481,40 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
void ieee80211_sta_set_rx_nss(struct sta_info *sta)
{
- u8 ht_rx_nss = 0, vht_rx_nss = 0;
+ u8 ht_rx_nss = 0, vht_rx_nss = 0, he_rx_nss = 0, rx_nss;
/* if we received a notification already don't overwrite it */
if (sta->sta.rx_nss)
return;
+ if (sta->sta.he_cap.has_he) {
+ int i;
+ u8 rx_mcs_80 = 0, rx_mcs_160 = 0;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap;
+ u16 mcs_160_map =
+ le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ u16 mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+
+ for (i = 7; i >= 0; i--) {
+ u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
+
+ if (mcs_160 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
+ rx_mcs_160 = i + 1;
+ break;
+ }
+ }
+ for (i = 7; i >= 0; i--) {
+ u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
+
+ if (mcs_80 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
+ rx_mcs_80 = i + 1;
+ break;
+ }
+ }
+
+ he_rx_nss = min(rx_mcs_80, rx_mcs_160);
+ }
+
if (sta->sta.ht_cap.ht_supported) {
if (sta->sta.ht_cap.mcs.rx_mask[0])
ht_rx_nss++;
@@ -493,8 +544,9 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta)
/* FIXME: consider rx_highest? */
}
- ht_rx_nss = max(ht_rx_nss, vht_rx_nss);
- sta->sta.rx_nss = max_t(u8, 1, ht_rx_nss);
+ rx_nss = max(vht_rx_nss, ht_rx_nss);
+ rx_nss = max(he_rx_nss, rx_nss);
+ sta->sta.rx_nss = max_t(u8, 1, rx_nss);
}
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index b75c2c54e665..9a6e11d7b4db 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -22,12 +22,10 @@
#include "wep.h"
-int ieee80211_wep_init(struct ieee80211_local *local)
+void ieee80211_wep_init(struct ieee80211_local *local)
{
/* start WEP IV from a random value */
get_random_bytes(&local->wep_iv, IEEE80211_WEP_IV_LEN);
-
- return 0;
}
static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen)
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h
index 997a034233c2..4ffe83554c67 100644
--- a/net/mac80211/wep.h
+++ b/net/mac80211/wep.h
@@ -13,7 +13,7 @@
#include "ieee80211_i.h"
#include "key.h"
-int ieee80211_wep_init(struct ieee80211_local *local);
+void ieee80211_wep_init(struct ieee80211_local *local);
int ieee80211_wep_encrypt_data(struct arc4_ctx *ctx, u8 *rc4key,
size_t klen, u8 *data, size_t data_len);
int ieee80211_wep_encrypt(struct ieee80211_local *local,
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 768a302879b4..0e9aa94adc07 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -98,7 +98,7 @@ struct mpls_nh { /* next hop label forwarding entry */
u8 nh_via_table;
u8 nh_reserved1;
- u32 nh_label[0];
+ u32 nh_label[];
};
/* offset of via from beginning of mpls_nh */
@@ -154,7 +154,7 @@ struct mpls_route { /* next hop label forwarding entry */
u8 rt_nh_size;
u8 rt_via_offset;
u8 rt_reserved1;
- struct mpls_nh rt_nh[0];
+ struct mpls_nh rt_nh[];
};
#define for_nexthops(rt) { \
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 44b675016393..2def85718d94 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -162,7 +162,7 @@ drop:
return -EINVAL;
}
-static int mpls_build_state(struct nlattr *nla,
+static int mpls_build_state(struct net *net, struct nlattr *nla,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts,
struct netlink_ext_ack *extack)
diff --git a/net/mptcp/Makefile b/net/mptcp/Makefile
index 4e98d9edfd0a..baa0640527c7 100644
--- a/net/mptcp/Makefile
+++ b/net/mptcp/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MPTCP) += mptcp.o
-mptcp-y := protocol.o subflow.o options.o token.o crypto.o ctrl.o
+mptcp-y := protocol.o subflow.o options.o token.o crypto.o ctrl.o pm.o diag.o \
+ mib.o pm_netlink.o
diff --git a/net/mptcp/crypto.c b/net/mptcp/crypto.c
index 40d1bb18fd60..c151628bd416 100644
--- a/net/mptcp/crypto.c
+++ b/net/mptcp/crypto.c
@@ -44,8 +44,7 @@ void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn)
*idsn = be64_to_cpu(*((__be64 *)&mptcp_hashed_key[6]));
}
-void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
- void *hmac)
+void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac)
{
u8 input[SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE];
__be32 mptcp_hashed_key[SHA256_DIGEST_WORDS];
@@ -55,6 +54,9 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
u8 key2be[8];
int i;
+ if (WARN_ON_ONCE(len > SHA256_DIGEST_SIZE))
+ len = SHA256_DIGEST_SIZE;
+
put_unaligned_be64(key1, key1be);
put_unaligned_be64(key2, key2be);
@@ -65,11 +67,10 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
for (i = 0; i < 8; i++)
input[i + 8] ^= key2be[i];
- put_unaligned_be32(nonce1, &input[SHA256_BLOCK_SIZE]);
- put_unaligned_be32(nonce2, &input[SHA256_BLOCK_SIZE + 4]);
+ memcpy(&input[SHA256_BLOCK_SIZE], msg, len);
sha256_init(&state);
- sha256_update(&state, input, SHA256_BLOCK_SIZE + 8);
+ sha256_update(&state, input, SHA256_BLOCK_SIZE + len);
/* emit sha256(K1 || msg) on the second input block, so we can
* reuse 'input' for the last hashing
@@ -125,6 +126,7 @@ static int __init test_mptcp_crypto(void)
char hmac[20], hmac_hex[41];
u32 nonce1, nonce2;
u64 key1, key2;
+ u8 msg[8];
int i, j;
for (i = 0; i < ARRAY_SIZE(tests); ++i) {
@@ -134,7 +136,10 @@ static int __init test_mptcp_crypto(void)
nonce1 = be32_to_cpu(*((__be32 *)&tests[i].msg[0]));
nonce2 = be32_to_cpu(*((__be32 *)&tests[i].msg[4]));
- mptcp_crypto_hmac_sha(key1, key2, nonce1, nonce2, hmac);
+ put_unaligned_be32(nonce1, &msg[0]);
+ put_unaligned_be32(nonce2, &msg[4]);
+
+ mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
for (j = 0; j < 20; ++j)
sprintf(&hmac_hex[j << 1], "%02x", hmac[j] & 0xff);
hmac_hex[40] = 0;
diff --git a/net/mptcp/diag.c b/net/mptcp/diag.c
new file mode 100644
index 000000000000..a536586742f2
--- /dev/null
+++ b/net/mptcp/diag.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/* MPTCP socket monitoring support
+ *
+ * Copyright (c) 2019 Red Hat
+ *
+ * Author: Davide Caratti <dcaratti@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/net.h>
+#include <linux/inet_diag.h>
+#include <net/netlink.h>
+#include <uapi/linux/mptcp.h>
+#include "protocol.h"
+
+static int subflow_get_info(const struct sock *sk, struct sk_buff *skb)
+{
+ struct mptcp_subflow_context *sf;
+ struct nlattr *start;
+ u32 flags = 0;
+ int err;
+
+ start = nla_nest_start_noflag(skb, INET_ULP_INFO_MPTCP);
+ if (!start)
+ return -EMSGSIZE;
+
+ rcu_read_lock();
+ sf = rcu_dereference(inet_csk(sk)->icsk_ulp_data);
+ if (!sf) {
+ err = 0;
+ goto nla_failure;
+ }
+
+ if (sf->mp_capable)
+ flags |= MPTCP_SUBFLOW_FLAG_MCAP_REM;
+ if (sf->request_mptcp)
+ flags |= MPTCP_SUBFLOW_FLAG_MCAP_LOC;
+ if (sf->mp_join)
+ flags |= MPTCP_SUBFLOW_FLAG_JOIN_REM;
+ if (sf->request_join)
+ flags |= MPTCP_SUBFLOW_FLAG_JOIN_LOC;
+ if (sf->backup)
+ flags |= MPTCP_SUBFLOW_FLAG_BKUP_REM;
+ if (sf->request_bkup)
+ flags |= MPTCP_SUBFLOW_FLAG_BKUP_LOC;
+ if (sf->fully_established)
+ flags |= MPTCP_SUBFLOW_FLAG_FULLY_ESTABLISHED;
+ if (sf->conn_finished)
+ flags |= MPTCP_SUBFLOW_FLAG_CONNECTED;
+ if (sf->map_valid)
+ flags |= MPTCP_SUBFLOW_FLAG_MAPVALID;
+
+ if (nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_TOKEN_REM, sf->remote_token) ||
+ nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_TOKEN_LOC, sf->token) ||
+ nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ,
+ sf->rel_write_seq) ||
+ nla_put_u64_64bit(skb, MPTCP_SUBFLOW_ATTR_MAP_SEQ, sf->map_seq,
+ MPTCP_SUBFLOW_ATTR_PAD) ||
+ nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_MAP_SFSEQ,
+ sf->map_subflow_seq) ||
+ nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_SSN_OFFSET, sf->ssn_offset) ||
+ nla_put_u16(skb, MPTCP_SUBFLOW_ATTR_MAP_DATALEN,
+ sf->map_data_len) ||
+ nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_FLAGS, flags) ||
+ nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_REM, sf->remote_id) ||
+ nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_LOC, sf->local_id)) {
+ err = -EMSGSIZE;
+ goto nla_failure;
+ }
+
+ rcu_read_unlock();
+ nla_nest_end(skb, start);
+ return 0;
+
+nla_failure:
+ rcu_read_unlock();
+ nla_nest_cancel(skb, start);
+ return err;
+}
+
+static size_t subflow_get_info_size(const struct sock *sk)
+{
+ size_t size = 0;
+
+ size += nla_total_size(0) + /* INET_ULP_INFO_MPTCP */
+ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_TOKEN_REM */
+ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_TOKEN_LOC */
+ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ */
+ nla_total_size_64bit(8) + /* MPTCP_SUBFLOW_ATTR_MAP_SEQ */
+ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_MAP_SFSEQ */
+ nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */
+ nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_MAP_DATALEN */
+ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_FLAGS */
+ nla_total_size(1) + /* MPTCP_SUBFLOW_ATTR_ID_REM */
+ nla_total_size(1) + /* MPTCP_SUBFLOW_ATTR_ID_LOC */
+ 0;
+ return size;
+}
+
+void mptcp_diag_subflow_init(struct tcp_ulp_ops *ops)
+{
+ ops->get_info = subflow_get_info;
+ ops->get_info_size = subflow_get_info_size;
+}
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
new file mode 100644
index 000000000000..0a6a15f3456d
--- /dev/null
+++ b/net/mptcp/mib.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/seq_file.h>
+#include <net/ip.h>
+#include <net/mptcp.h>
+#include <net/snmp.h>
+#include <net/net_namespace.h>
+
+#include "mib.h"
+
+static const struct snmp_mib mptcp_snmp_list[] = {
+ SNMP_MIB_ITEM("MPCapableSYNRX", MPTCP_MIB_MPCAPABLEPASSIVE),
+ SNMP_MIB_ITEM("MPCapableACKRX", MPTCP_MIB_MPCAPABLEPASSIVEACK),
+ SNMP_MIB_ITEM("MPCapableFallbackACK", MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK),
+ SNMP_MIB_ITEM("MPCapableFallbackSYNACK", MPTCP_MIB_MPCAPABLEACTIVEFALLBACK),
+ SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS),
+ SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN),
+ SNMP_MIB_ITEM("MPJoinSynRx", MPTCP_MIB_JOINSYNRX),
+ SNMP_MIB_ITEM("MPJoinSynAckRx", MPTCP_MIB_JOINSYNACKRX),
+ SNMP_MIB_ITEM("MPJoinSynAckHMacFailure", MPTCP_MIB_JOINSYNACKMAC),
+ SNMP_MIB_ITEM("MPJoinAckRx", MPTCP_MIB_JOINACKRX),
+ SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
+ SNMP_MIB_ITEM("DSSNotMatching", MPTCP_MIB_DSSNOMATCH),
+ SNMP_MIB_ITEM("InfiniteMapRx", MPTCP_MIB_INFINITEMAPRX),
+ SNMP_MIB_SENTINEL
+};
+
+/* mptcp_mib_alloc - allocate percpu mib counters
+ *
+ * These are allocated when the first mptcp socket is created so
+ * we do not waste percpu memory if mptcp isn't in use.
+ */
+bool mptcp_mib_alloc(struct net *net)
+{
+ struct mptcp_mib __percpu *mib = alloc_percpu(struct mptcp_mib);
+
+ if (!mib)
+ return false;
+
+ if (cmpxchg(&net->mib.mptcp_statistics, NULL, mib))
+ free_percpu(mib);
+
+ return true;
+}
+
+void mptcp_seq_show(struct seq_file *seq)
+{
+ struct net *net = seq->private;
+ int i;
+
+ seq_puts(seq, "MPTcpExt:");
+ for (i = 0; mptcp_snmp_list[i].name; i++)
+ seq_printf(seq, " %s", mptcp_snmp_list[i].name);
+
+ seq_puts(seq, "\nMPTcpExt:");
+
+ if (!net->mib.mptcp_statistics) {
+ for (i = 0; mptcp_snmp_list[i].name; i++)
+ seq_puts(seq, " 0");
+
+ return;
+ }
+
+ for (i = 0; mptcp_snmp_list[i].name; i++)
+ seq_printf(seq, " %lu",
+ snmp_fold_field(net->mib.mptcp_statistics,
+ mptcp_snmp_list[i].entry));
+ seq_putc(seq, '\n');
+}
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
new file mode 100644
index 000000000000..d7de340fc997
--- /dev/null
+++ b/net/mptcp/mib.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+enum linux_mptcp_mib_field {
+ MPTCP_MIB_NUM = 0,
+ MPTCP_MIB_MPCAPABLEPASSIVE, /* Received SYN with MP_CAPABLE */
+ MPTCP_MIB_MPCAPABLEPASSIVEACK, /* Received third ACK with MP_CAPABLE */
+ MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK,/* Server-side fallback during 3-way handshake */
+ MPTCP_MIB_MPCAPABLEACTIVEFALLBACK, /* Client-side fallback during 3-way handshake */
+ MPTCP_MIB_RETRANSSEGS, /* Segments retransmitted at the MPTCP-level */
+ MPTCP_MIB_JOINNOTOKEN, /* Received MP_JOIN but the token was not found */
+ MPTCP_MIB_JOINSYNRX, /* Received a SYN + MP_JOIN */
+ MPTCP_MIB_JOINSYNACKRX, /* Received a SYN/ACK + MP_JOIN */
+ MPTCP_MIB_JOINSYNACKMAC, /* HMAC was wrong on SYN/ACK + MP_JOIN */
+ MPTCP_MIB_JOINACKRX, /* Received an ACK + MP_JOIN */
+ MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */
+ MPTCP_MIB_DSSNOMATCH, /* Received a new mapping that did not match the previous one */
+ MPTCP_MIB_INFINITEMAPRX, /* Received an infinite mapping */
+ __MPTCP_MIB_MAX
+};
+
+#define LINUX_MIB_MPTCP_MAX __MPTCP_MIB_MAX
+struct mptcp_mib {
+ unsigned long mibs[LINUX_MIB_MPTCP_MAX];
+};
+
+static inline void MPTCP_INC_STATS(struct net *net,
+ enum linux_mptcp_mib_field field)
+{
+ if (likely(net->mib.mptcp_statistics))
+ SNMP_INC_STATS(net->mib.mptcp_statistics, field);
+}
+
+static inline void __MPTCP_INC_STATS(struct net *net,
+ enum linux_mptcp_mib_field field)
+{
+ if (likely(net->mib.mptcp_statistics))
+ __SNMP_INC_STATS(net->mib.mptcp_statistics, field);
+}
+
+bool mptcp_mib_alloc(struct net *net);
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index fd2c3150e591..bd220ee4aac9 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -96,6 +96,38 @@ void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr,
mp_opt->rcvr_key, mp_opt->data_len);
break;
+ case MPTCPOPT_MP_JOIN:
+ mp_opt->mp_join = 1;
+ if (opsize == TCPOLEN_MPTCP_MPJ_SYN) {
+ mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
+ mp_opt->join_id = *ptr++;
+ mp_opt->token = get_unaligned_be32(ptr);
+ ptr += 4;
+ mp_opt->nonce = get_unaligned_be32(ptr);
+ ptr += 4;
+ pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u",
+ mp_opt->backup, mp_opt->join_id,
+ mp_opt->token, mp_opt->nonce);
+ } else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
+ mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP;
+ mp_opt->join_id = *ptr++;
+ mp_opt->thmac = get_unaligned_be64(ptr);
+ ptr += 8;
+ mp_opt->nonce = get_unaligned_be32(ptr);
+ ptr += 4;
+ pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u",
+ mp_opt->backup, mp_opt->join_id,
+ mp_opt->thmac, mp_opt->nonce);
+ } else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
+ ptr += 2;
+ memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
+ pr_debug("MP_JOIN hmac");
+ } else {
+ pr_warn("MP_JOIN bad option size");
+ mp_opt->mp_join = 0;
+ }
+ break;
+
case MPTCPOPT_DSS:
pr_debug("DSS");
ptr++;
@@ -178,6 +210,71 @@ void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr,
break;
+ case MPTCPOPT_ADD_ADDR:
+ mp_opt->echo = (*ptr++) & MPTCP_ADDR_ECHO;
+ if (!mp_opt->echo) {
+ if (opsize == TCPOLEN_MPTCP_ADD_ADDR ||
+ opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT)
+ mp_opt->family = MPTCP_ADDR_IPVERSION_4;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6 ||
+ opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT)
+ mp_opt->family = MPTCP_ADDR_IPVERSION_6;
+#endif
+ else
+ break;
+ } else {
+ if (opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE ||
+ opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT)
+ mp_opt->family = MPTCP_ADDR_IPVERSION_4;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ else if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE ||
+ opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT)
+ mp_opt->family = MPTCP_ADDR_IPVERSION_6;
+#endif
+ else
+ break;
+ }
+
+ mp_opt->add_addr = 1;
+ mp_opt->port = 0;
+ mp_opt->addr_id = *ptr++;
+ pr_debug("ADD_ADDR: id=%d", mp_opt->addr_id);
+ if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) {
+ memcpy((u8 *)&mp_opt->addr.s_addr, (u8 *)ptr, 4);
+ ptr += 4;
+ if (opsize == TCPOLEN_MPTCP_ADD_ADDR_PORT ||
+ opsize == TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT) {
+ mp_opt->port = get_unaligned_be16(ptr);
+ ptr += 2;
+ }
+ }
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ else {
+ memcpy(mp_opt->addr6.s6_addr, (u8 *)ptr, 16);
+ ptr += 16;
+ if (opsize == TCPOLEN_MPTCP_ADD_ADDR6_PORT ||
+ opsize == TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT) {
+ mp_opt->port = get_unaligned_be16(ptr);
+ ptr += 2;
+ }
+ }
+#endif
+ if (!mp_opt->echo) {
+ mp_opt->ahmac = get_unaligned_be64(ptr);
+ ptr += 8;
+ }
+ break;
+
+ case MPTCPOPT_RM_ADDR:
+ if (opsize != TCPOLEN_MPTCP_RM_ADDR_BASE)
+ break;
+
+ mp_opt->rm_addr = 1;
+ mp_opt->rm_id = *ptr++;
+ pr_debug("RM_ADDR: id=%d", mp_opt->rm_id);
+ break;
+
default:
break;
}
@@ -231,6 +328,16 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
opts->sndr_key = subflow->local_key;
*size = TCPOLEN_MPTCP_MPC_SYN;
return true;
+ } else if (subflow->request_join) {
+ pr_debug("remote_token=%u, nonce=%u", subflow->remote_token,
+ subflow->local_nonce);
+ opts->suboptions = OPTION_MPTCP_MPJ_SYN;
+ opts->join_id = subflow->local_id;
+ opts->token = subflow->remote_token;
+ opts->nonce = subflow->local_nonce;
+ opts->backup = subflow->request_bkup;
+ *size = TCPOLEN_MPTCP_MPJ_SYN;
+ return true;
}
return false;
}
@@ -240,16 +347,55 @@ void mptcp_rcv_synsent(struct sock *sk)
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct tcp_sock *tp = tcp_sk(sk);
- pr_debug("subflow=%p", subflow);
if (subflow->request_mptcp && tp->rx_opt.mptcp.mp_capable) {
subflow->mp_capable = 1;
subflow->can_ack = 1;
subflow->remote_key = tp->rx_opt.mptcp.sndr_key;
- } else {
+ pr_debug("subflow=%p, remote_key=%llu", subflow,
+ subflow->remote_key);
+ } else if (subflow->request_join && tp->rx_opt.mptcp.mp_join) {
+ subflow->mp_join = 1;
+ subflow->thmac = tp->rx_opt.mptcp.thmac;
+ subflow->remote_nonce = tp->rx_opt.mptcp.nonce;
+ pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
+ subflow->thmac, subflow->remote_nonce);
+ } else if (subflow->request_mptcp) {
tcp_sk(sk)->is_mptcp = 0;
}
}
+/* MP_JOIN client subflow must wait for 4th ack before sending any data:
+ * TCP can't schedule delack timer before the subflow is fully established.
+ * MPTCP uses the delack timer to do 3rd ack retransmissions
+ */
+static void schedule_3rdack_retransmission(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ unsigned long timeout;
+
+ /* reschedule with a timeout above RTT, as we must look only for drop */
+ if (tp->srtt_us)
+ timeout = tp->srtt_us << 1;
+ else
+ timeout = TCP_TIMEOUT_INIT;
+
+ WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
+ icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
+ icsk->icsk_ack.timeout = timeout;
+ sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
+}
+
+static void clear_3rdack_retransmission(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ sk_stop_timer(sk, &icsk->icsk_delack_timer);
+ icsk->icsk_ack.timeout = 0;
+ icsk->icsk_ack.ato = 0;
+ icsk->icsk_ack.pending &= ~(ICSK_ACK_SCHED | ICSK_ACK_TIMER);
+}
+
static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
unsigned int *size,
unsigned int remaining,
@@ -259,17 +405,21 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
struct mptcp_ext *mpext;
unsigned int data_len;
- pr_debug("subflow=%p fourth_ack=%d seq=%x:%x remaining=%d", subflow,
- subflow->fourth_ack, subflow->snd_isn,
- skb ? TCP_SKB_CB(skb)->seq : 0, remaining);
+ /* When skb is not available, we better over-estimate the emitted
+ * options len. A full DSS option (28 bytes) is longer than
+ * TCPOLEN_MPTCP_MPC_ACK_DATA(22) or TCPOLEN_MPTCP_MPJ_ACK(24), so
+ * tell the caller to defer the estimate to
+ * mptcp_established_options_dss(), which will reserve enough space.
+ */
+ if (!skb)
+ return false;
- if (subflow->mp_capable && !subflow->fourth_ack && skb &&
- subflow->snd_isn == TCP_SKB_CB(skb)->seq) {
- /* When skb is not available, we better over-estimate the
- * emitted options len. A full DSS option is longer than
- * TCPOLEN_MPTCP_MPC_ACK_DATA, so let's the caller try to fit
- * that.
- */
+ /* MPC/MPJ needed only on 3rd ack packet */
+ if (subflow->fully_established ||
+ subflow->snd_isn != TCP_SKB_CB(skb)->seq)
+ return false;
+
+ if (subflow->mp_capable) {
mpext = mptcp_get_ext(skb);
data_len = mpext ? mpext->data_len : 0;
@@ -297,6 +447,14 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
data_len);
return true;
+ } else if (subflow->mp_join) {
+ opts->suboptions = OPTION_MPTCP_MPJ_ACK;
+ memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN);
+ *size = TCPOLEN_MPTCP_MPJ_ACK;
+ pr_debug("subflow=%p", subflow);
+
+ schedule_3rdack_retransmission(sk);
+ return true;
}
return false;
}
@@ -304,21 +462,22 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
struct mptcp_ext *ext)
{
- ext->data_fin = 1;
-
if (!ext->use_map) {
/* RFC6824 requires a DSS mapping with specific values
* if DATA_FIN is set but no data payload is mapped
*/
+ ext->data_fin = 1;
ext->use_map = 1;
ext->dsn64 = 1;
- ext->data_seq = mptcp_sk(subflow->conn)->write_seq;
+ ext->data_seq = subflow->data_fin_tx_seq;
ext->subflow_seq = 0;
ext->data_len = 1;
- } else {
- /* If there's an existing DSS mapping, DATA_FIN consumes
- * 1 additional byte of mapping space.
+ } else if (ext->data_seq + ext->data_len == subflow->data_fin_tx_seq) {
+ /* If there's an existing DSS mapping and it is the
+ * final mapping, DATA_FIN consumes 1 additional byte of
+ * mapping space.
*/
+ ext->data_fin = 1;
ext->data_len++;
}
}
@@ -334,8 +493,6 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
struct mptcp_sock *msk;
unsigned int ack_size;
bool ret = false;
- bool can_ack;
- u64 ack_seq;
u8 tcp_fin;
if (skb) {
@@ -356,8 +513,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
if (mpext)
opts->ext_copy = *mpext;
- if (skb && tcp_fin &&
- subflow->conn->sk_state != TCP_ESTABLISHED)
+ if (skb && tcp_fin && subflow->data_fin_tx_enable)
mptcp_write_data_fin(subflow, &opts->ext_copy);
ret = true;
}
@@ -365,19 +521,9 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
/* passive sockets msk will set the 'can_ack' after accept(), even
* if the first subflow may have the already the remote key handy
*/
- can_ack = true;
opts->ext_copy.use_ack = 0;
msk = mptcp_sk(subflow->conn);
- if (likely(msk && READ_ONCE(msk->can_ack))) {
- ack_seq = msk->ack_seq;
- } else if (subflow->can_ack) {
- mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
- ack_seq++;
- } else {
- can_ack = false;
- }
-
- if (unlikely(!can_ack)) {
+ if (!READ_ONCE(msk->can_ack)) {
*size = ALIGN(dss_size, 4);
return ret;
}
@@ -390,7 +536,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
dss_size += ack_size;
- opts->ext_copy.data_ack = ack_seq;
+ opts->ext_copy.data_ack = msk->ack_seq;
opts->ext_copy.ack64 = 1;
opts->ext_copy.use_ack = 1;
@@ -398,6 +544,83 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
return true;
}
+static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id,
+ struct in_addr *addr)
+{
+ u8 hmac[MPTCP_ADDR_HMAC_LEN];
+ u8 msg[7];
+
+ msg[0] = addr_id;
+ memcpy(&msg[1], &addr->s_addr, 4);
+ msg[5] = 0;
+ msg[6] = 0;
+
+ mptcp_crypto_hmac_sha(key1, key2, msg, 7, hmac);
+
+ return get_unaligned_be64(hmac);
+}
+
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+static u64 add_addr6_generate_hmac(u64 key1, u64 key2, u8 addr_id,
+ struct in6_addr *addr)
+{
+ u8 hmac[MPTCP_ADDR_HMAC_LEN];
+ u8 msg[19];
+
+ msg[0] = addr_id;
+ memcpy(&msg[1], &addr->s6_addr, 16);
+ msg[17] = 0;
+ msg[18] = 0;
+
+ mptcp_crypto_hmac_sha(key1, key2, msg, 19, hmac);
+
+ return get_unaligned_be64(hmac);
+}
+#endif
+
+static bool mptcp_established_options_addr(struct sock *sk,
+ unsigned int *size,
+ unsigned int remaining,
+ struct mptcp_out_options *opts)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ struct mptcp_addr_info saddr;
+ int len;
+
+ if (!mptcp_pm_should_signal(msk) ||
+ !(mptcp_pm_addr_signal(msk, remaining, &saddr)))
+ return false;
+
+ len = mptcp_add_addr_len(saddr.family);
+ if (remaining < len)
+ return false;
+
+ *size = len;
+ opts->addr_id = saddr.id;
+ if (saddr.family == AF_INET) {
+ opts->suboptions |= OPTION_MPTCP_ADD_ADDR;
+ opts->addr = saddr.addr;
+ opts->ahmac = add_addr_generate_hmac(msk->local_key,
+ msk->remote_key,
+ opts->addr_id,
+ &opts->addr);
+ }
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ else if (saddr.family == AF_INET6) {
+ opts->suboptions |= OPTION_MPTCP_ADD_ADDR6;
+ opts->addr6 = saddr.addr6;
+ opts->ahmac = add_addr6_generate_hmac(msk->local_key,
+ msk->remote_key,
+ opts->addr_id,
+ &opts->addr6);
+ }
+#endif
+ pr_debug("addr_id=%d, ahmac=%llu", opts->addr_id, opts->ahmac);
+
+ return true;
+}
+
bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
unsigned int *size, unsigned int remaining,
struct mptcp_out_options *opts)
@@ -405,6 +628,8 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
unsigned int opt_size = 0;
bool ret = false;
+ opts->suboptions = 0;
+
if (mptcp_established_options_mp(sk, skb, &opt_size, remaining, opts))
ret = true;
else if (mptcp_established_options_dss(sk, skb, &opt_size, remaining,
@@ -419,6 +644,11 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
*size += opt_size;
remaining -= opt_size;
+ if (mptcp_established_options_addr(sk, &opt_size, remaining, opts)) {
+ *size += opt_size;
+ remaining -= opt_size;
+ ret = true;
+ }
return ret;
}
@@ -435,54 +665,194 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
pr_debug("subflow_req=%p, local_key=%llu",
subflow_req, subflow_req->local_key);
return true;
+ } else if (subflow_req->mp_join) {
+ opts->suboptions = OPTION_MPTCP_MPJ_SYNACK;
+ opts->backup = subflow_req->backup;
+ opts->join_id = subflow_req->local_id;
+ opts->thmac = subflow_req->thmac;
+ opts->nonce = subflow_req->local_nonce;
+ pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u",
+ subflow_req, opts->backup, opts->join_id,
+ opts->thmac, opts->nonce);
+ *size = TCPOLEN_MPTCP_MPJ_SYNACK;
+ return true;
}
return false;
}
-static bool check_fourth_ack(struct mptcp_subflow_context *subflow,
- struct sk_buff *skb,
- struct mptcp_options_received *mp_opt)
+static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
+ struct mptcp_subflow_context *subflow,
+ struct sk_buff *skb,
+ struct mptcp_options_received *mp_opt)
{
/* here we can process OoO, in-window pkts, only in-sequence 4th ack
- * are relevant
+ * will make the subflow fully established
*/
- if (likely(subflow->fourth_ack ||
- TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1))
- return true;
+ if (likely(subflow->fully_established)) {
+ /* on passive sockets, check for 3rd ack retransmission
+ * note that msk is always set by subflow_syn_recv_sock()
+ * for mp_join subflows
+ */
+ if (TCP_SKB_CB(skb)->seq == subflow->ssn_offset + 1 &&
+ TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq &&
+ subflow->mp_join && mp_opt->mp_join &&
+ READ_ONCE(msk->pm.server_side))
+ tcp_send_ack(sk);
+ goto fully_established;
+ }
- if (mp_opt->use_ack)
- subflow->fourth_ack = 1;
+ /* we should process OoO packets before the first subflow is fully
+ * established, but not expected for MP_JOIN subflows
+ */
+ if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1)
+ return subflow->mp_capable;
- if (subflow->can_ack)
- return true;
+ if (mp_opt->use_ack) {
+ /* subflows are fully established as soon as we get any
+ * additional ack.
+ */
+ subflow->fully_established = 1;
+ goto fully_established;
+ }
+
+ WARN_ON_ONCE(subflow->can_ack);
/* If the first established packet does not contain MP_CAPABLE + data
* then fallback to TCP
*/
if (!mp_opt->mp_capable) {
subflow->mp_capable = 0;
- tcp_sk(mptcp_subflow_tcp_sock(subflow))->is_mptcp = 0;
+ tcp_sk(sk)->is_mptcp = 0;
return false;
}
+
+ subflow->fully_established = 1;
subflow->remote_key = mp_opt->sndr_key;
subflow->can_ack = 1;
+
+fully_established:
+ if (likely(subflow->pm_notified))
+ return true;
+
+ subflow->pm_notified = 1;
+ if (subflow->mp_join) {
+ clear_3rdack_retransmission(sk);
+ mptcp_pm_subflow_established(msk, subflow);
+ } else {
+ mptcp_pm_fully_established(msk);
+ }
return true;
}
+static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
+{
+ u32 old_ack32, cur_ack32;
+
+ if (use_64bit)
+ return cur_ack;
+
+ old_ack32 = (u32)old_ack;
+ cur_ack32 = (u32)cur_ack;
+ cur_ack = (old_ack & GENMASK_ULL(63, 32)) + cur_ack32;
+ if (unlikely(before(cur_ack32, old_ack32)))
+ return cur_ack + (1LL << 32);
+ return cur_ack;
+}
+
+static void update_una(struct mptcp_sock *msk,
+ struct mptcp_options_received *mp_opt)
+{
+ u64 new_snd_una, snd_una, old_snd_una = atomic64_read(&msk->snd_una);
+ u64 write_seq = READ_ONCE(msk->write_seq);
+
+ /* avoid ack expansion on update conflict, to reduce the risk of
+ * wrongly expanding to a future ack sequence number, which is way
+ * more dangerous than missing an ack
+ */
+ new_snd_una = expand_ack(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
+
+ /* ACK for data not even sent yet? Ignore. */
+ if (after64(new_snd_una, write_seq))
+ new_snd_una = old_snd_una;
+
+ while (after64(new_snd_una, old_snd_una)) {
+ snd_una = old_snd_una;
+ old_snd_una = atomic64_cmpxchg(&msk->snd_una, snd_una,
+ new_snd_una);
+ if (old_snd_una == snd_una) {
+ mptcp_data_acked((struct sock *)msk);
+ break;
+ }
+ }
+}
+
+static bool add_addr_hmac_valid(struct mptcp_sock *msk,
+ struct mptcp_options_received *mp_opt)
+{
+ u64 hmac = 0;
+
+ if (mp_opt->echo)
+ return true;
+
+ if (mp_opt->family == MPTCP_ADDR_IPVERSION_4)
+ hmac = add_addr_generate_hmac(msk->remote_key,
+ msk->local_key,
+ mp_opt->addr_id, &mp_opt->addr);
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ else
+ hmac = add_addr6_generate_hmac(msk->remote_key,
+ msk->local_key,
+ mp_opt->addr_id, &mp_opt->addr6);
+#endif
+
+ pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n",
+ msk, (unsigned long long)hmac,
+ (unsigned long long)mp_opt->ahmac);
+
+ return hmac == mp_opt->ahmac;
+}
+
void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
struct tcp_options_received *opt_rx)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
struct mptcp_options_received *mp_opt;
struct mptcp_ext *mpext;
mp_opt = &opt_rx->mptcp;
- if (!check_fourth_ack(subflow, skb, mp_opt))
+ if (!check_fully_established(msk, sk, subflow, skb, mp_opt))
return;
+ if (mp_opt->add_addr && add_addr_hmac_valid(msk, mp_opt)) {
+ struct mptcp_addr_info addr;
+
+ addr.port = htons(mp_opt->port);
+ addr.id = mp_opt->addr_id;
+ if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) {
+ addr.family = AF_INET;
+ addr.addr = mp_opt->addr;
+ }
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ else if (mp_opt->family == MPTCP_ADDR_IPVERSION_6) {
+ addr.family = AF_INET6;
+ addr.addr6 = mp_opt->addr6;
+ }
+#endif
+ if (!mp_opt->echo)
+ mptcp_pm_add_addr_received(msk, &addr);
+ mp_opt->add_addr = 0;
+ }
+
if (!mp_opt->dss)
return;
+ /* we can't wait for recvmsg() to update the ack_seq, otherwise
+ * monodirectional flows will stuck
+ */
+ if (mp_opt->use_ack)
+ update_una(msk, mp_opt);
+
mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
if (!mpext)
return;
@@ -509,12 +879,6 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
mpext->use_map = 1;
}
- if (mp_opt->use_ack) {
- mpext->data_ack = mp_opt->data_ack;
- mpext->use_ack = 1;
- mpext->ack64 = mp_opt->ack64;
- }
-
mpext->data_fin = mp_opt->data_fin;
}
@@ -533,10 +897,9 @@ void mptcp_write_options(__be32 *ptr, struct mptcp_out_options *opts)
else
len = TCPOLEN_MPTCP_MPC_ACK;
- *ptr++ = htonl((TCPOPT_MPTCP << 24) | (len << 16) |
- (MPTCPOPT_MP_CAPABLE << 12) |
- (MPTCP_SUPPORTED_VERSION << 8) |
- MPTCP_CAP_HMAC_SHA256);
+ *ptr++ = mptcp_option(MPTCPOPT_MP_CAPABLE, len,
+ MPTCP_SUPPORTED_VERSION,
+ MPTCP_CAP_HMAC_SHA256);
if (!((OPTION_MPTCP_MPC_SYNACK | OPTION_MPTCP_MPC_ACK) &
opts->suboptions))
@@ -558,6 +921,77 @@ void mptcp_write_options(__be32 *ptr, struct mptcp_out_options *opts)
}
mp_capable_done:
+ if (OPTION_MPTCP_ADD_ADDR & opts->suboptions) {
+ if (opts->ahmac)
+ *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR,
+ TCPOLEN_MPTCP_ADD_ADDR, 0,
+ opts->addr_id);
+ else
+ *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR,
+ TCPOLEN_MPTCP_ADD_ADDR_BASE,
+ MPTCP_ADDR_ECHO,
+ opts->addr_id);
+ memcpy((u8 *)ptr, (u8 *)&opts->addr.s_addr, 4);
+ ptr += 1;
+ if (opts->ahmac) {
+ put_unaligned_be64(opts->ahmac, ptr);
+ ptr += 2;
+ }
+ }
+
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ if (OPTION_MPTCP_ADD_ADDR6 & opts->suboptions) {
+ if (opts->ahmac)
+ *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR,
+ TCPOLEN_MPTCP_ADD_ADDR6, 0,
+ opts->addr_id);
+ else
+ *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR,
+ TCPOLEN_MPTCP_ADD_ADDR6_BASE,
+ MPTCP_ADDR_ECHO,
+ opts->addr_id);
+ memcpy((u8 *)ptr, opts->addr6.s6_addr, 16);
+ ptr += 4;
+ if (opts->ahmac) {
+ put_unaligned_be64(opts->ahmac, ptr);
+ ptr += 2;
+ }
+ }
+#endif
+
+ if (OPTION_MPTCP_RM_ADDR & opts->suboptions) {
+ *ptr++ = mptcp_option(MPTCPOPT_RM_ADDR,
+ TCPOLEN_MPTCP_RM_ADDR_BASE,
+ 0, opts->rm_id);
+ }
+
+ if (OPTION_MPTCP_MPJ_SYN & opts->suboptions) {
+ *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
+ TCPOLEN_MPTCP_MPJ_SYN,
+ opts->backup, opts->join_id);
+ put_unaligned_be32(opts->token, ptr);
+ ptr += 1;
+ put_unaligned_be32(opts->nonce, ptr);
+ ptr += 1;
+ }
+
+ if (OPTION_MPTCP_MPJ_SYNACK & opts->suboptions) {
+ *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
+ TCPOLEN_MPTCP_MPJ_SYNACK,
+ opts->backup, opts->join_id);
+ put_unaligned_be64(opts->thmac, ptr);
+ ptr += 2;
+ put_unaligned_be32(opts->nonce, ptr);
+ ptr += 1;
+ }
+
+ if (OPTION_MPTCP_MPJ_ACK & opts->suboptions) {
+ *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
+ TCPOLEN_MPTCP_MPJ_ACK, 0, 0);
+ memcpy(ptr, opts->hmac, MPTCPOPT_HMAC_LEN);
+ ptr += 5;
+ }
+
if (opts->ext_copy.use_ack || opts->ext_copy.use_map) {
struct mptcp_ext *mpext = &opts->ext_copy;
u8 len = TCPOLEN_MPTCP_DSS_BASE;
@@ -579,10 +1013,7 @@ mp_capable_done:
flags |= MPTCP_DSS_DATA_FIN;
}
- *ptr++ = htonl((TCPOPT_MPTCP << 24) |
- (len << 16) |
- (MPTCPOPT_DSS << 12) |
- (flags));
+ *ptr++ = mptcp_option(MPTCPOPT_DSS, len, 0, flags);
if (mpext->use_ack) {
put_unaligned_be64(mpext->data_ack, ptr);
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
new file mode 100644
index 000000000000..064639f72487
--- /dev/null
+++ b/net/mptcp/pm.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Multipath TCP
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ */
+#include <linux/kernel.h>
+#include <net/tcp.h>
+#include <net/mptcp.h>
+#include "protocol.h"
+
+static struct workqueue_struct *pm_wq;
+
+/* path manager command handlers */
+
+int mptcp_pm_announce_addr(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr)
+{
+ pr_debug("msk=%p, local_id=%d", msk, addr->id);
+
+ msk->pm.local = *addr;
+ WRITE_ONCE(msk->pm.addr_signal, true);
+ return 0;
+}
+
+int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
+{
+ return -ENOTSUPP;
+}
+
+int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 remote_id)
+{
+ return -ENOTSUPP;
+}
+
+/* path manager event handlers */
+
+void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side)
+{
+ struct mptcp_pm_data *pm = &msk->pm;
+
+ pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
+
+ WRITE_ONCE(pm->server_side, server_side);
+}
+
+bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
+{
+ struct mptcp_pm_data *pm = &msk->pm;
+ int ret;
+
+ pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
+ pm->subflows_max, READ_ONCE(pm->accept_subflow));
+
+ /* try to avoid acquiring the lock below */
+ if (!READ_ONCE(pm->accept_subflow))
+ return false;
+
+ spin_lock_bh(&pm->lock);
+ ret = pm->subflows < pm->subflows_max;
+ if (ret && ++pm->subflows == pm->subflows_max)
+ WRITE_ONCE(pm->accept_subflow, false);
+ spin_unlock_bh(&pm->lock);
+
+ return ret;
+}
+
+/* return true if the new status bit is currently cleared, that is, this event
+ * can be server, eventually by an already scheduled work
+ */
+static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
+ enum mptcp_pm_status new_status)
+{
+ pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
+ BIT(new_status));
+ if (msk->pm.status & BIT(new_status))
+ return false;
+
+ msk->pm.status |= BIT(new_status);
+ if (queue_work(pm_wq, &msk->pm.work))
+ sock_hold((struct sock *)msk);
+ return true;
+}
+
+void mptcp_pm_fully_established(struct mptcp_sock *msk)
+{
+ struct mptcp_pm_data *pm = &msk->pm;
+
+ pr_debug("msk=%p", msk);
+
+ /* try to avoid acquiring the lock below */
+ if (!READ_ONCE(pm->work_pending))
+ return;
+
+ spin_lock_bh(&pm->lock);
+
+ if (READ_ONCE(pm->work_pending))
+ mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
+
+ spin_unlock_bh(&pm->lock);
+}
+
+void mptcp_pm_connection_closed(struct mptcp_sock *msk)
+{
+ pr_debug("msk=%p", msk);
+}
+
+void mptcp_pm_subflow_established(struct mptcp_sock *msk,
+ struct mptcp_subflow_context *subflow)
+{
+ struct mptcp_pm_data *pm = &msk->pm;
+
+ pr_debug("msk=%p", msk);
+
+ if (!READ_ONCE(pm->work_pending))
+ return;
+
+ spin_lock_bh(&pm->lock);
+
+ if (READ_ONCE(pm->work_pending))
+ mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
+
+ spin_unlock_bh(&pm->lock);
+}
+
+void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id)
+{
+ pr_debug("msk=%p", msk);
+}
+
+void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr)
+{
+ struct mptcp_pm_data *pm = &msk->pm;
+
+ pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
+ READ_ONCE(pm->accept_addr));
+
+ /* avoid acquiring the lock if there is no room for fouther addresses */
+ if (!READ_ONCE(pm->accept_addr))
+ return;
+
+ spin_lock_bh(&pm->lock);
+
+ /* be sure there is something to signal re-checking under PM lock */
+ if (READ_ONCE(pm->accept_addr) &&
+ mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED))
+ pm->remote = *addr;
+
+ spin_unlock_bh(&pm->lock);
+}
+
+/* path manager helpers */
+
+bool mptcp_pm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+ struct mptcp_addr_info *saddr)
+{
+ int ret = false;
+
+ spin_lock_bh(&msk->pm.lock);
+
+ /* double check after the lock is acquired */
+ if (!mptcp_pm_should_signal(msk))
+ goto out_unlock;
+
+ if (remaining < mptcp_add_addr_len(msk->pm.local.family))
+ goto out_unlock;
+
+ *saddr = msk->pm.local;
+ WRITE_ONCE(msk->pm.addr_signal, false);
+ ret = true;
+
+out_unlock:
+ spin_unlock_bh(&msk->pm.lock);
+ return ret;
+}
+
+int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
+{
+ return mptcp_pm_nl_get_local_id(msk, skc);
+}
+
+static void pm_worker(struct work_struct *work)
+{
+ struct mptcp_pm_data *pm = container_of(work, struct mptcp_pm_data,
+ work);
+ struct mptcp_sock *msk = container_of(pm, struct mptcp_sock, pm);
+ struct sock *sk = (struct sock *)msk;
+
+ lock_sock(sk);
+ spin_lock_bh(&msk->pm.lock);
+
+ pr_debug("msk=%p status=%x", msk, pm->status);
+ if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
+ pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
+ mptcp_pm_nl_add_addr_received(msk);
+ }
+ if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
+ pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
+ mptcp_pm_nl_fully_established(msk);
+ }
+ if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
+ pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
+ mptcp_pm_nl_subflow_established(msk);
+ }
+
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+ sock_put(sk);
+}
+
+void mptcp_pm_data_init(struct mptcp_sock *msk)
+{
+ msk->pm.add_addr_signaled = 0;
+ msk->pm.add_addr_accepted = 0;
+ msk->pm.local_addr_used = 0;
+ msk->pm.subflows = 0;
+ WRITE_ONCE(msk->pm.work_pending, false);
+ WRITE_ONCE(msk->pm.addr_signal, false);
+ WRITE_ONCE(msk->pm.accept_addr, false);
+ WRITE_ONCE(msk->pm.accept_subflow, false);
+ msk->pm.status = 0;
+
+ spin_lock_init(&msk->pm.lock);
+ INIT_WORK(&msk->pm.work, pm_worker);
+
+ mptcp_pm_nl_data_init(msk);
+}
+
+void mptcp_pm_close(struct mptcp_sock *msk)
+{
+ if (cancel_work_sync(&msk->pm.work))
+ sock_put((struct sock *)msk);
+}
+
+void mptcp_pm_init(void)
+{
+ pm_wq = alloc_workqueue("pm_wq", WQ_UNBOUND | WQ_MEM_RECLAIM, 8);
+ if (!pm_wq)
+ panic("Failed to allocate workqueue");
+
+ mptcp_pm_nl_init();
+}
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
new file mode 100644
index 000000000000..a0ce7f324499
--- /dev/null
+++ b/net/mptcp/pm_netlink.c
@@ -0,0 +1,857 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Multipath TCP
+ *
+ * Copyright (c) 2020, Red Hat, Inc.
+ */
+
+#include <linux/inet.h>
+#include <linux/kernel.h>
+#include <net/tcp.h>
+#include <net/netns/generic.h>
+#include <net/mptcp.h>
+#include <net/genetlink.h>
+#include <uapi/linux/mptcp.h>
+
+#include "protocol.h"
+
+/* forward declaration */
+static struct genl_family mptcp_genl_family;
+
+static int pm_nl_pernet_id;
+
+struct mptcp_pm_addr_entry {
+ struct list_head list;
+ unsigned int flags;
+ int ifindex;
+ struct mptcp_addr_info addr;
+ struct rcu_head rcu;
+};
+
+struct pm_nl_pernet {
+ /* protects pernet updates */
+ spinlock_t lock;
+ struct list_head local_addr_list;
+ unsigned int addrs;
+ unsigned int add_addr_signal_max;
+ unsigned int add_addr_accept_max;
+ unsigned int local_addr_max;
+ unsigned int subflows_max;
+ unsigned int next_id;
+};
+
+#define MPTCP_PM_ADDR_MAX 8
+
+static bool addresses_equal(const struct mptcp_addr_info *a,
+ struct mptcp_addr_info *b, bool use_port)
+{
+ bool addr_equals = false;
+
+ if (a->family != b->family)
+ return false;
+
+ if (a->family == AF_INET)
+ addr_equals = a->addr.s_addr == b->addr.s_addr;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ else
+ addr_equals = !ipv6_addr_cmp(&a->addr6, &b->addr6);
+#endif
+
+ if (!addr_equals)
+ return false;
+ if (!use_port)
+ return true;
+
+ return a->port == b->port;
+}
+
+static void local_address(const struct sock_common *skc,
+ struct mptcp_addr_info *addr)
+{
+ addr->port = 0;
+ addr->family = skc->skc_family;
+ if (addr->family == AF_INET)
+ addr->addr.s_addr = skc->skc_rcv_saddr;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ else if (addr->family == AF_INET6)
+ addr->addr6 = skc->skc_v6_rcv_saddr;
+#endif
+}
+
+static void remote_address(const struct sock_common *skc,
+ struct mptcp_addr_info *addr)
+{
+ addr->family = skc->skc_family;
+ addr->port = skc->skc_dport;
+ if (addr->family == AF_INET)
+ addr->addr.s_addr = skc->skc_daddr;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ else if (addr->family == AF_INET6)
+ addr->addr6 = skc->skc_v6_daddr;
+#endif
+}
+
+static bool lookup_subflow_by_saddr(const struct list_head *list,
+ struct mptcp_addr_info *saddr)
+{
+ struct mptcp_subflow_context *subflow;
+ struct mptcp_addr_info cur;
+ struct sock_common *skc;
+
+ list_for_each_entry(subflow, list, node) {
+ skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
+
+ local_address(skc, &cur);
+ if (addresses_equal(&cur, saddr, false))
+ return true;
+ }
+
+ return false;
+}
+
+static struct mptcp_pm_addr_entry *
+select_local_address(const struct pm_nl_pernet *pernet,
+ struct mptcp_sock *msk)
+{
+ struct mptcp_pm_addr_entry *entry, *ret = NULL;
+
+ rcu_read_lock();
+ spin_lock_bh(&msk->join_list_lock);
+ list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+ if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW))
+ continue;
+
+ /* avoid any address already in use by subflows and
+ * pending join
+ */
+ if (entry->addr.family == ((struct sock *)msk)->sk_family &&
+ !lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
+ !lookup_subflow_by_saddr(&msk->join_list, &entry->addr)) {
+ ret = entry;
+ break;
+ }
+ }
+ spin_unlock_bh(&msk->join_list_lock);
+ rcu_read_unlock();
+ return ret;
+}
+
+static struct mptcp_pm_addr_entry *
+select_signal_address(struct pm_nl_pernet *pernet, unsigned int pos)
+{
+ struct mptcp_pm_addr_entry *entry, *ret = NULL;
+ int i = 0;
+
+ rcu_read_lock();
+ /* do not keep any additional per socket state, just signal
+ * the address list in order.
+ * Note: removal from the local address list during the msk life-cycle
+ * can lead to additional addresses not being announced.
+ */
+ list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+ if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
+ continue;
+ if (i++ == pos) {
+ ret = entry;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+static void check_work_pending(struct mptcp_sock *msk)
+{
+ if (msk->pm.add_addr_signaled == msk->pm.add_addr_signal_max &&
+ (msk->pm.local_addr_used == msk->pm.local_addr_max ||
+ msk->pm.subflows == msk->pm.subflows_max))
+ WRITE_ONCE(msk->pm.work_pending, false);
+}
+
+static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+{
+ struct sock *sk = (struct sock *)msk;
+ struct mptcp_pm_addr_entry *local;
+ struct mptcp_addr_info remote;
+ struct pm_nl_pernet *pernet;
+
+ pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
+
+ pr_debug("local %d:%d signal %d:%d subflows %d:%d\n",
+ msk->pm.local_addr_used, msk->pm.local_addr_max,
+ msk->pm.add_addr_signaled, msk->pm.add_addr_signal_max,
+ msk->pm.subflows, msk->pm.subflows_max);
+
+ /* check first for announce */
+ if (msk->pm.add_addr_signaled < msk->pm.add_addr_signal_max) {
+ local = select_signal_address(pernet,
+ msk->pm.add_addr_signaled);
+
+ if (local) {
+ msk->pm.add_addr_signaled++;
+ mptcp_pm_announce_addr(msk, &local->addr);
+ } else {
+ /* pick failed, avoid fourther attempts later */
+ msk->pm.local_addr_used = msk->pm.add_addr_signal_max;
+ }
+
+ check_work_pending(msk);
+ }
+
+ /* check if should create a new subflow */
+ if (msk->pm.local_addr_used < msk->pm.local_addr_max &&
+ msk->pm.subflows < msk->pm.subflows_max) {
+ remote_address((struct sock_common *)sk, &remote);
+
+ local = select_local_address(pernet, msk);
+ if (local) {
+ msk->pm.local_addr_used++;
+ msk->pm.subflows++;
+ check_work_pending(msk);
+ spin_unlock_bh(&msk->pm.lock);
+ __mptcp_subflow_connect(sk, local->ifindex,
+ &local->addr, &remote);
+ spin_lock_bh(&msk->pm.lock);
+ return;
+ }
+
+ /* lookup failed, avoid fourther attempts later */
+ msk->pm.local_addr_used = msk->pm.local_addr_max;
+ check_work_pending(msk);
+ }
+}
+
+void mptcp_pm_nl_fully_established(struct mptcp_sock *msk)
+{
+ mptcp_pm_create_subflow_or_signal_addr(msk);
+}
+
+void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk)
+{
+ mptcp_pm_create_subflow_or_signal_addr(msk);
+}
+
+void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
+{
+ struct sock *sk = (struct sock *)msk;
+ struct mptcp_addr_info remote;
+ struct mptcp_addr_info local;
+
+ pr_debug("accepted %d:%d remote family %d",
+ msk->pm.add_addr_accepted, msk->pm.add_addr_accept_max,
+ msk->pm.remote.family);
+ msk->pm.add_addr_accepted++;
+ msk->pm.subflows++;
+ if (msk->pm.add_addr_accepted >= msk->pm.add_addr_accept_max ||
+ msk->pm.subflows >= msk->pm.subflows_max)
+ WRITE_ONCE(msk->pm.accept_addr, false);
+
+ /* connect to the specified remote address, using whatever
+ * local address the routing configuration will pick.
+ */
+ remote = msk->pm.remote;
+ if (!remote.port)
+ remote.port = sk->sk_dport;
+ memset(&local, 0, sizeof(local));
+ local.family = remote.family;
+
+ spin_unlock_bh(&msk->pm.lock);
+ __mptcp_subflow_connect((struct sock *)msk, 0, &local, &remote);
+ spin_lock_bh(&msk->pm.lock);
+}
+
+static bool address_use_port(struct mptcp_pm_addr_entry *entry)
+{
+ return (entry->flags &
+ (MPTCP_PM_ADDR_FLAG_SIGNAL | MPTCP_PM_ADDR_FLAG_SUBFLOW)) ==
+ MPTCP_PM_ADDR_FLAG_SIGNAL;
+}
+
+static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
+ struct mptcp_pm_addr_entry *entry)
+{
+ struct mptcp_pm_addr_entry *cur;
+ int ret = -EINVAL;
+
+ spin_lock_bh(&pernet->lock);
+ /* to keep the code simple, don't do IDR-like allocation for address ID,
+ * just bail when we exceed limits
+ */
+ if (pernet->next_id > 255)
+ goto out;
+ if (pernet->addrs >= MPTCP_PM_ADDR_MAX)
+ goto out;
+
+ /* do not insert duplicate address, differentiate on port only
+ * singled addresses
+ */
+ list_for_each_entry(cur, &pernet->local_addr_list, list) {
+ if (addresses_equal(&cur->addr, &entry->addr,
+ address_use_port(entry) &&
+ address_use_port(cur)))
+ goto out;
+ }
+
+ if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)
+ pernet->add_addr_signal_max++;
+ if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
+ pernet->local_addr_max++;
+
+ entry->addr.id = pernet->next_id++;
+ pernet->addrs++;
+ list_add_tail_rcu(&entry->list, &pernet->local_addr_list);
+ ret = entry->addr.id;
+
+out:
+ spin_unlock_bh(&pernet->lock);
+ return ret;
+}
+
+int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
+{
+ struct mptcp_pm_addr_entry *entry;
+ struct mptcp_addr_info skc_local;
+ struct mptcp_addr_info msk_local;
+ struct pm_nl_pernet *pernet;
+ int ret = -1;
+
+ if (WARN_ON_ONCE(!msk))
+ return -1;
+
+ /* The 0 ID mapping is defined by the first subflow, copied into the msk
+ * addr
+ */
+ local_address((struct sock_common *)msk, &msk_local);
+ local_address((struct sock_common *)msk, &skc_local);
+ if (addresses_equal(&msk_local, &skc_local, false))
+ return 0;
+
+ pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+ if (addresses_equal(&entry->addr, &skc_local, false)) {
+ ret = entry->addr.id;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (ret >= 0)
+ return ret;
+
+ /* address not found, add to local list */
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->flags = 0;
+ entry->addr = skc_local;
+ ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
+ if (ret < 0)
+ kfree(entry);
+
+ return ret;
+}
+
+void mptcp_pm_nl_data_init(struct mptcp_sock *msk)
+{
+ struct mptcp_pm_data *pm = &msk->pm;
+ struct pm_nl_pernet *pernet;
+ bool subflows;
+
+ pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
+
+ pm->add_addr_signal_max = READ_ONCE(pernet->add_addr_signal_max);
+ pm->add_addr_accept_max = READ_ONCE(pernet->add_addr_accept_max);
+ pm->local_addr_max = READ_ONCE(pernet->local_addr_max);
+ pm->subflows_max = READ_ONCE(pernet->subflows_max);
+ subflows = !!pm->subflows_max;
+ WRITE_ONCE(pm->work_pending, (!!pm->local_addr_max && subflows) ||
+ !!pm->add_addr_signal_max);
+ WRITE_ONCE(pm->accept_addr, !!pm->add_addr_accept_max && subflows);
+ WRITE_ONCE(pm->accept_subflow, subflows);
+}
+
+#define MPTCP_PM_CMD_GRP_OFFSET 0
+
+static const struct genl_multicast_group mptcp_pm_mcgrps[] = {
+ [MPTCP_PM_CMD_GRP_OFFSET] = { .name = MPTCP_PM_CMD_GRP_NAME, },
+};
+
+static const struct nla_policy
+mptcp_pm_addr_policy[MPTCP_PM_ADDR_ATTR_MAX + 1] = {
+ [MPTCP_PM_ADDR_ATTR_FAMILY] = { .type = NLA_U16, },
+ [MPTCP_PM_ADDR_ATTR_ID] = { .type = NLA_U8, },
+ [MPTCP_PM_ADDR_ATTR_ADDR4] = { .type = NLA_U32, },
+ [MPTCP_PM_ADDR_ATTR_ADDR6] = { .type = NLA_EXACT_LEN,
+ .len = sizeof(struct in6_addr), },
+ [MPTCP_PM_ADDR_ATTR_PORT] = { .type = NLA_U16 },
+ [MPTCP_PM_ADDR_ATTR_FLAGS] = { .type = NLA_U32 },
+ [MPTCP_PM_ADDR_ATTR_IF_IDX] = { .type = NLA_S32 },
+};
+
+static const struct nla_policy mptcp_pm_policy[MPTCP_PM_ATTR_MAX + 1] = {
+ [MPTCP_PM_ATTR_ADDR] =
+ NLA_POLICY_NESTED(mptcp_pm_addr_policy),
+ [MPTCP_PM_ATTR_RCV_ADD_ADDRS] = { .type = NLA_U32, },
+ [MPTCP_PM_ATTR_SUBFLOWS] = { .type = NLA_U32, },
+};
+
+static int mptcp_pm_family_to_addr(int family)
+{
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ if (family == AF_INET6)
+ return MPTCP_PM_ADDR_ATTR_ADDR6;
+#endif
+ return MPTCP_PM_ADDR_ATTR_ADDR4;
+}
+
+static int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info,
+ bool require_family,
+ struct mptcp_pm_addr_entry *entry)
+{
+ struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
+ int err, addr_addr;
+
+ if (!attr) {
+ GENL_SET_ERR_MSG(info, "missing address info");
+ return -EINVAL;
+ }
+
+ /* no validation needed - was already done via nested policy */
+ err = nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr,
+ mptcp_pm_addr_policy, info->extack);
+ if (err)
+ return err;
+
+ memset(entry, 0, sizeof(*entry));
+ if (!tb[MPTCP_PM_ADDR_ATTR_FAMILY]) {
+ if (!require_family)
+ goto skip_family;
+
+ NL_SET_ERR_MSG_ATTR(info->extack, attr,
+ "missing family");
+ return -EINVAL;
+ }
+
+ entry->addr.family = nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_FAMILY]);
+ if (entry->addr.family != AF_INET
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ && entry->addr.family != AF_INET6
+#endif
+ ) {
+ NL_SET_ERR_MSG_ATTR(info->extack, attr,
+ "unknown address family");
+ return -EINVAL;
+ }
+ addr_addr = mptcp_pm_family_to_addr(entry->addr.family);
+ if (!tb[addr_addr]) {
+ NL_SET_ERR_MSG_ATTR(info->extack, attr,
+ "missing address data");
+ return -EINVAL;
+ }
+
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ if (entry->addr.family == AF_INET6)
+ entry->addr.addr6 = nla_get_in6_addr(tb[addr_addr]);
+ else
+#endif
+ entry->addr.addr.s_addr = nla_get_in_addr(tb[addr_addr]);
+
+skip_family:
+ if (tb[MPTCP_PM_ADDR_ATTR_IF_IDX])
+ entry->ifindex = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]);
+
+ if (tb[MPTCP_PM_ADDR_ATTR_ID])
+ entry->addr.id = nla_get_u8(tb[MPTCP_PM_ADDR_ATTR_ID]);
+
+ if (tb[MPTCP_PM_ADDR_ATTR_FLAGS])
+ entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]);
+
+ return 0;
+}
+
+static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info)
+{
+ return net_generic(genl_info_net(info), pm_nl_pernet_id);
+}
+
+static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
+ struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
+ struct mptcp_pm_addr_entry addr, *entry;
+ int ret;
+
+ ret = mptcp_pm_parse_addr(attr, info, true, &addr);
+ if (ret < 0)
+ return ret;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ GENL_SET_ERR_MSG(info, "can't allocate addr");
+ return -ENOMEM;
+ }
+
+ *entry = addr;
+ ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
+ if (ret < 0) {
+ GENL_SET_ERR_MSG(info, "too many addresses or duplicate one");
+ kfree(entry);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct mptcp_pm_addr_entry *
+__lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id)
+{
+ struct mptcp_pm_addr_entry *entry;
+
+ list_for_each_entry(entry, &pernet->local_addr_list, list) {
+ if (entry->addr.id == id)
+ return entry;
+ }
+ return NULL;
+}
+
+static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
+ struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
+ struct mptcp_pm_addr_entry addr, *entry;
+ int ret;
+
+ ret = mptcp_pm_parse_addr(attr, info, false, &addr);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_bh(&pernet->lock);
+ entry = __lookup_addr_by_id(pernet, addr.addr.id);
+ if (!entry) {
+ GENL_SET_ERR_MSG(info, "address not found");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)
+ pernet->add_addr_signal_max--;
+ if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
+ pernet->local_addr_max--;
+
+ pernet->addrs--;
+ list_del_rcu(&entry->list);
+ kfree_rcu(entry, rcu);
+out:
+ spin_unlock_bh(&pernet->lock);
+ return ret;
+}
+
+static void __flush_addrs(struct pm_nl_pernet *pernet)
+{
+ while (!list_empty(&pernet->local_addr_list)) {
+ struct mptcp_pm_addr_entry *cur;
+
+ cur = list_entry(pernet->local_addr_list.next,
+ struct mptcp_pm_addr_entry, list);
+ list_del_rcu(&cur->list);
+ kfree_rcu(cur, rcu);
+ }
+}
+
+static void __reset_counters(struct pm_nl_pernet *pernet)
+{
+ pernet->add_addr_signal_max = 0;
+ pernet->add_addr_accept_max = 0;
+ pernet->local_addr_max = 0;
+ pernet->addrs = 0;
+}
+
+static int mptcp_nl_cmd_flush_addrs(struct sk_buff *skb, struct genl_info *info)
+{
+ struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
+
+ spin_lock_bh(&pernet->lock);
+ __flush_addrs(pernet);
+ __reset_counters(pernet);
+ spin_unlock_bh(&pernet->lock);
+ return 0;
+}
+
+static int mptcp_nl_fill_addr(struct sk_buff *skb,
+ struct mptcp_pm_addr_entry *entry)
+{
+ struct mptcp_addr_info *addr = &entry->addr;
+ struct nlattr *attr;
+
+ attr = nla_nest_start(skb, MPTCP_PM_ATTR_ADDR);
+ if (!attr)
+ return -EMSGSIZE;
+
+ if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_FAMILY, addr->family))
+ goto nla_put_failure;
+ if (nla_put_u8(skb, MPTCP_PM_ADDR_ATTR_ID, addr->id))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, MPTCP_PM_ADDR_ATTR_FLAGS, entry->flags))
+ goto nla_put_failure;
+ if (entry->ifindex &&
+ nla_put_s32(skb, MPTCP_PM_ADDR_ATTR_IF_IDX, entry->ifindex))
+ goto nla_put_failure;
+
+ if (addr->family == AF_INET)
+ nla_put_in_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR4,
+ addr->addr.s_addr);
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ else if (addr->family == AF_INET6)
+ nla_put_in6_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR6, &addr->addr6);
+#endif
+ nla_nest_end(skb, attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, attr);
+ return -EMSGSIZE;
+}
+
+static int mptcp_nl_cmd_get_addr(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
+ struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
+ struct mptcp_pm_addr_entry addr, *entry;
+ struct sk_buff *msg;
+ void *reply;
+ int ret;
+
+ ret = mptcp_pm_parse_addr(attr, info, false, &addr);
+ if (ret < 0)
+ return ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0,
+ info->genlhdr->cmd);
+ if (!reply) {
+ GENL_SET_ERR_MSG(info, "not enough space in Netlink message");
+ ret = -EMSGSIZE;
+ goto fail;
+ }
+
+ spin_lock_bh(&pernet->lock);
+ entry = __lookup_addr_by_id(pernet, addr.addr.id);
+ if (!entry) {
+ GENL_SET_ERR_MSG(info, "address not found");
+ ret = -EINVAL;
+ goto unlock_fail;
+ }
+
+ ret = mptcp_nl_fill_addr(msg, entry);
+ if (ret)
+ goto unlock_fail;
+
+ genlmsg_end(msg, reply);
+ ret = genlmsg_reply(msg, info);
+ spin_unlock_bh(&pernet->lock);
+ return ret;
+
+unlock_fail:
+ spin_unlock_bh(&pernet->lock);
+
+fail:
+ nlmsg_free(msg);
+ return ret;
+}
+
+static int mptcp_nl_cmd_dump_addrs(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct net *net = sock_net(msg->sk);
+ struct mptcp_pm_addr_entry *entry;
+ struct pm_nl_pernet *pernet;
+ int id = cb->args[0];
+ void *hdr;
+
+ pernet = net_generic(net, pm_nl_pernet_id);
+
+ spin_lock_bh(&pernet->lock);
+ list_for_each_entry(entry, &pernet->local_addr_list, list) {
+ if (entry->addr.id <= id)
+ continue;
+
+ hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, &mptcp_genl_family,
+ NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR);
+ if (!hdr)
+ break;
+
+ if (mptcp_nl_fill_addr(msg, entry) < 0) {
+ genlmsg_cancel(msg, hdr);
+ break;
+ }
+
+ id = entry->addr.id;
+ genlmsg_end(msg, hdr);
+ }
+ spin_unlock_bh(&pernet->lock);
+
+ cb->args[0] = id;
+ return msg->len;
+}
+
+static int parse_limit(struct genl_info *info, int id, unsigned int *limit)
+{
+ struct nlattr *attr = info->attrs[id];
+
+ if (!attr)
+ return 0;
+
+ *limit = nla_get_u32(attr);
+ if (*limit > MPTCP_PM_ADDR_MAX) {
+ GENL_SET_ERR_MSG(info, "limit greater than maximum");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+mptcp_nl_cmd_set_limits(struct sk_buff *skb, struct genl_info *info)
+{
+ struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
+ unsigned int rcv_addrs, subflows;
+ int ret;
+
+ spin_lock_bh(&pernet->lock);
+ rcv_addrs = pernet->add_addr_accept_max;
+ ret = parse_limit(info, MPTCP_PM_ATTR_RCV_ADD_ADDRS, &rcv_addrs);
+ if (ret)
+ goto unlock;
+
+ subflows = pernet->subflows_max;
+ ret = parse_limit(info, MPTCP_PM_ATTR_SUBFLOWS, &subflows);
+ if (ret)
+ goto unlock;
+
+ WRITE_ONCE(pernet->add_addr_accept_max, rcv_addrs);
+ WRITE_ONCE(pernet->subflows_max, subflows);
+
+unlock:
+ spin_unlock_bh(&pernet->lock);
+ return ret;
+}
+
+static int
+mptcp_nl_cmd_get_limits(struct sk_buff *skb, struct genl_info *info)
+{
+ struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
+ struct sk_buff *msg;
+ void *reply;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0,
+ MPTCP_PM_CMD_GET_LIMITS);
+ if (!reply)
+ goto fail;
+
+ if (nla_put_u32(msg, MPTCP_PM_ATTR_RCV_ADD_ADDRS,
+ READ_ONCE(pernet->add_addr_accept_max)))
+ goto fail;
+
+ if (nla_put_u32(msg, MPTCP_PM_ATTR_SUBFLOWS,
+ READ_ONCE(pernet->subflows_max)))
+ goto fail;
+
+ genlmsg_end(msg, reply);
+ return genlmsg_reply(msg, info);
+
+fail:
+ GENL_SET_ERR_MSG(info, "not enough space in Netlink message");
+ nlmsg_free(msg);
+ return -EMSGSIZE;
+}
+
+static struct genl_ops mptcp_pm_ops[] = {
+ {
+ .cmd = MPTCP_PM_CMD_ADD_ADDR,
+ .doit = mptcp_nl_cmd_add_addr,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_DEL_ADDR,
+ .doit = mptcp_nl_cmd_del_addr,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_FLUSH_ADDRS,
+ .doit = mptcp_nl_cmd_flush_addrs,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_GET_ADDR,
+ .doit = mptcp_nl_cmd_get_addr,
+ .dumpit = mptcp_nl_cmd_dump_addrs,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_SET_LIMITS,
+ .doit = mptcp_nl_cmd_set_limits,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_GET_LIMITS,
+ .doit = mptcp_nl_cmd_get_limits,
+ },
+};
+
+static struct genl_family mptcp_genl_family __ro_after_init = {
+ .name = MPTCP_PM_NAME,
+ .version = MPTCP_PM_VER,
+ .maxattr = MPTCP_PM_ATTR_MAX,
+ .policy = mptcp_pm_policy,
+ .netnsok = true,
+ .module = THIS_MODULE,
+ .ops = mptcp_pm_ops,
+ .n_ops = ARRAY_SIZE(mptcp_pm_ops),
+ .mcgrps = mptcp_pm_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(mptcp_pm_mcgrps),
+};
+
+static int __net_init pm_nl_init_net(struct net *net)
+{
+ struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id);
+
+ INIT_LIST_HEAD_RCU(&pernet->local_addr_list);
+ __reset_counters(pernet);
+ pernet->next_id = 1;
+ spin_lock_init(&pernet->lock);
+ return 0;
+}
+
+static void __net_exit pm_nl_exit_net(struct list_head *net_list)
+{
+ struct net *net;
+
+ list_for_each_entry(net, net_list, exit_list) {
+ /* net is removed from namespace list, can't race with
+ * other modifiers
+ */
+ __flush_addrs(net_generic(net, pm_nl_pernet_id));
+ }
+}
+
+static struct pernet_operations mptcp_pm_pernet_ops = {
+ .init = pm_nl_init_net,
+ .exit_batch = pm_nl_exit_net,
+ .id = &pm_nl_pernet_id,
+ .size = sizeof(struct pm_nl_pernet),
+};
+
+void mptcp_pm_nl_init(void)
+{
+ if (register_pernet_subsys(&mptcp_pm_pernet_ops) < 0)
+ panic("Failed to register MPTCP PM pernet subsystem.\n");
+
+ if (genl_register_family(&mptcp_genl_family))
+ panic("Failed to register MPTCP PM netlink family\n");
+}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 3c19a8efdcea..1833bc1f4a43 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -21,6 +21,7 @@
#endif
#include <net/mptcp.h>
#include "protocol.h"
+#include "mib.h"
#define MPTCP_SAME_STATE TCP_MAX_STATES
@@ -31,6 +32,14 @@ struct mptcp6_sock {
};
#endif
+struct mptcp_skb_cb {
+ u32 offset;
+};
+
+#define MPTCP_SKB_CB(__skb) ((struct mptcp_skb_cb *)&((__skb)->cb[0]))
+
+static struct percpu_counter mptcp_sockets_allocated;
+
/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
* completed yet or has failed, return the subflow socket.
* Otherwise return NULL.
@@ -98,17 +107,195 @@ set_state:
return ssock;
}
-static struct sock *mptcp_subflow_get(const struct mptcp_sock *msk)
+static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
+ struct sk_buff *skb,
+ unsigned int offset, size_t copy_len)
{
- struct mptcp_subflow_context *subflow;
+ struct sock *sk = (struct sock *)msk;
- sock_owned_by_me((const struct sock *)msk);
+ __skb_unlink(skb, &ssk->sk_receive_queue);
+ skb_set_owner_r(skb, sk);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
- mptcp_for_each_subflow(msk, subflow) {
- return mptcp_subflow_tcp_sock(subflow);
+ msk->ack_seq += copy_len;
+ MPTCP_SKB_CB(skb)->offset = offset;
+}
+
+static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
+ struct sock *ssk,
+ unsigned int *bytes)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ struct sock *sk = (struct sock *)msk;
+ unsigned int moved = 0;
+ bool more_data_avail;
+ struct tcp_sock *tp;
+ bool done = false;
+
+ if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
+ int rcvbuf = max(ssk->sk_rcvbuf, sk->sk_rcvbuf);
+
+ if (rcvbuf > sk->sk_rcvbuf)
+ sk->sk_rcvbuf = rcvbuf;
}
- return NULL;
+ tp = tcp_sk(ssk);
+ do {
+ u32 map_remaining, offset;
+ u32 seq = tp->copied_seq;
+ struct sk_buff *skb;
+ bool fin;
+
+ /* try to move as much data as available */
+ map_remaining = subflow->map_data_len -
+ mptcp_subflow_get_map_offset(subflow);
+
+ skb = skb_peek(&ssk->sk_receive_queue);
+ if (!skb)
+ break;
+
+ offset = seq - TCP_SKB_CB(skb)->seq;
+ fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
+ if (fin) {
+ done = true;
+ seq++;
+ }
+
+ if (offset < skb->len) {
+ size_t len = skb->len - offset;
+
+ if (tp->urg_data)
+ done = true;
+
+ __mptcp_move_skb(msk, ssk, skb, offset, len);
+ seq += len;
+ moved += len;
+
+ if (WARN_ON_ONCE(map_remaining < len))
+ break;
+ } else {
+ WARN_ON_ONCE(!fin);
+ sk_eat_skb(ssk, skb);
+ done = true;
+ }
+
+ WRITE_ONCE(tp->copied_seq, seq);
+ more_data_avail = mptcp_subflow_data_available(ssk);
+
+ if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf)) {
+ done = true;
+ break;
+ }
+ } while (more_data_avail);
+
+ *bytes = moved;
+
+ return done;
+}
+
+/* In most cases we will be able to lock the mptcp socket. If its already
+ * owned, we need to defer to the work queue to avoid ABBA deadlock.
+ */
+static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
+{
+ struct sock *sk = (struct sock *)msk;
+ unsigned int moved = 0;
+
+ if (READ_ONCE(sk->sk_lock.owned))
+ return false;
+
+ if (unlikely(!spin_trylock_bh(&sk->sk_lock.slock)))
+ return false;
+
+ /* must re-check after taking the lock */
+ if (!READ_ONCE(sk->sk_lock.owned))
+ __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
+
+ spin_unlock_bh(&sk->sk_lock.slock);
+
+ return moved > 0;
+}
+
+void mptcp_data_ready(struct sock *sk, struct sock *ssk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ set_bit(MPTCP_DATA_READY, &msk->flags);
+
+ if (atomic_read(&sk->sk_rmem_alloc) < READ_ONCE(sk->sk_rcvbuf) &&
+ move_skbs_to_msk(msk, ssk))
+ goto wake;
+
+ /* don't schedule if mptcp sk is (still) over limit */
+ if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf))
+ goto wake;
+
+ /* mptcp socket is owned, release_cb should retry */
+ if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
+ &sk->sk_tsq_flags)) {
+ sock_hold(sk);
+
+ /* need to try again, its possible release_cb() has already
+ * been called after the test_and_set_bit() above.
+ */
+ move_skbs_to_msk(msk, ssk);
+ }
+wake:
+ sk->sk_data_ready(sk);
+}
+
+static void __mptcp_flush_join_list(struct mptcp_sock *msk)
+{
+ if (likely(list_empty(&msk->join_list)))
+ return;
+
+ spin_lock_bh(&msk->join_list_lock);
+ list_splice_tail_init(&msk->join_list, &msk->conn_list);
+ spin_unlock_bh(&msk->join_list_lock);
+}
+
+static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
+{
+ long tout = ssk && inet_csk(ssk)->icsk_pending ?
+ inet_csk(ssk)->icsk_timeout - jiffies : 0;
+
+ if (tout <= 0)
+ tout = mptcp_sk(sk)->timer_ival;
+ mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
+}
+
+static bool mptcp_timer_pending(struct sock *sk)
+{
+ return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
+}
+
+static void mptcp_reset_timer(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ unsigned long tout;
+
+ /* should never be called with mptcp level timer cleared */
+ tout = READ_ONCE(mptcp_sk(sk)->timer_ival);
+ if (WARN_ON_ONCE(!tout))
+ tout = TCP_RTO_MIN;
+ sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout);
+}
+
+void mptcp_data_acked(struct sock *sk)
+{
+ mptcp_reset_timer(sk);
+
+ if (!sk_stream_is_writeable(sk) &&
+ schedule_work(&mptcp_sk(sk)->work))
+ sock_hold(sk);
+}
+
+static void mptcp_stop_timer(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
+ mptcp_sk(sk)->timer_ival = 0;
}
static bool mptcp_ext_cache_refill(struct mptcp_sock *msk)
@@ -134,41 +321,149 @@ static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
return NULL;
}
-static inline bool mptcp_skb_can_collapse_to(const struct mptcp_sock *msk,
- const struct sk_buff *skb,
- const struct mptcp_ext *mpext)
+static bool mptcp_skb_can_collapse_to(u64 write_seq,
+ const struct sk_buff *skb,
+ const struct mptcp_ext *mpext)
{
if (!tcp_skb_can_collapse_to(skb))
return false;
/* can collapse only if MPTCP level sequence is in order */
- return mpext && mpext->data_seq + mpext->data_len == msk->write_seq;
+ return mpext && mpext->data_seq + mpext->data_len == write_seq;
+}
+
+static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
+ const struct page_frag *pfrag,
+ const struct mptcp_data_frag *df)
+{
+ return df && pfrag->page == df->page &&
+ df->data_seq + df->data_len == msk->write_seq;
+}
+
+static void dfrag_uncharge(struct sock *sk, int len)
+{
+ sk_mem_uncharge(sk, len);
+ sk_wmem_queued_add(sk, -len);
+}
+
+static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
+{
+ int len = dfrag->data_len + dfrag->overhead;
+
+ list_del(&dfrag->list);
+ dfrag_uncharge(sk, len);
+ put_page(dfrag->page);
+}
+
+static void mptcp_clean_una(struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct mptcp_data_frag *dtmp, *dfrag;
+ u64 snd_una = atomic64_read(&msk->snd_una);
+ bool cleaned = false;
+
+ list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
+ if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
+ break;
+
+ dfrag_clear(sk, dfrag);
+ cleaned = true;
+ }
+
+ dfrag = mptcp_rtx_head(sk);
+ if (dfrag && after64(snd_una, dfrag->data_seq)) {
+ u64 delta = dfrag->data_seq + dfrag->data_len - snd_una;
+
+ dfrag->data_seq += delta;
+ dfrag->data_len -= delta;
+
+ dfrag_uncharge(sk, delta);
+ cleaned = true;
+ }
+
+ if (cleaned) {
+ sk_mem_reclaim_partial(sk);
+
+ /* Only wake up writers if a subflow is ready */
+ if (test_bit(MPTCP_SEND_SPACE, &msk->flags))
+ sk_stream_write_space(sk);
+ }
+}
+
+/* ensure we get enough memory for the frag hdr, beyond some minimal amount of
+ * data
+ */
+static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
+{
+ if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag),
+ pfrag, sk->sk_allocation)))
+ return true;
+
+ sk->sk_prot->enter_memory_pressure(sk);
+ sk_stream_moderate_sndbuf(sk);
+ return false;
+}
+
+static struct mptcp_data_frag *
+mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
+ int orig_offset)
+{
+ int offset = ALIGN(orig_offset, sizeof(long));
+ struct mptcp_data_frag *dfrag;
+
+ dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset);
+ dfrag->data_len = 0;
+ dfrag->data_seq = msk->write_seq;
+ dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
+ dfrag->offset = offset + sizeof(struct mptcp_data_frag);
+ dfrag->page = pfrag->page;
+
+ return dfrag;
}
static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
- struct msghdr *msg, long *timeo, int *pmss_now,
+ struct msghdr *msg, struct mptcp_data_frag *dfrag,
+ long *timeo, int *pmss_now,
int *ps_goal)
{
- int mss_now, avail_size, size_goal, ret;
+ int mss_now, avail_size, size_goal, offset, ret, frag_truesize = 0;
+ bool dfrag_collapsed, can_collapse = false;
struct mptcp_sock *msk = mptcp_sk(sk);
struct mptcp_ext *mpext = NULL;
+ bool retransmission = !!dfrag;
struct sk_buff *skb, *tail;
- bool can_collapse = false;
struct page_frag *pfrag;
+ struct page *page;
+ u64 *write_seq;
size_t psize;
/* use the mptcp page cache so that we can easily move the data
* from one substream to another, but do per subflow memory accounting
+ * Note: pfrag is used only !retransmission, but the compiler if
+ * fooled into a warning if we don't init here
*/
pfrag = sk_page_frag(sk);
- while (!sk_page_frag_refill(ssk, pfrag) ||
+ while ((!retransmission && !mptcp_page_frag_refill(ssk, pfrag)) ||
!mptcp_ext_cache_refill(msk)) {
ret = sk_stream_wait_memory(ssk, timeo);
if (ret)
return ret;
+
+ /* if sk_stream_wait_memory() sleeps snd_una can change
+ * significantly, refresh the rtx queue
+ */
+ mptcp_clean_una(sk);
+
if (unlikely(__mptcp_needs_tcp_fallback(msk)))
return 0;
}
+ if (!retransmission) {
+ write_seq = &msk->write_seq;
+ page = pfrag->page;
+ } else {
+ write_seq = &dfrag->data_seq;
+ page = dfrag->page;
+ }
/* compute copy limit */
mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
@@ -186,32 +481,74 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
* SSN association set here
*/
can_collapse = (size_goal - skb->len > 0) &&
- mptcp_skb_can_collapse_to(msk, skb, mpext);
+ mptcp_skb_can_collapse_to(*write_seq, skb, mpext);
if (!can_collapse)
TCP_SKB_CB(skb)->eor = 1;
else
avail_size = size_goal - skb->len;
}
- psize = min_t(size_t, pfrag->size - pfrag->offset, avail_size);
-
- /* Copy to page */
- pr_debug("left=%zu", msg_data_left(msg));
- psize = copy_page_from_iter(pfrag->page, pfrag->offset,
- min_t(size_t, msg_data_left(msg), psize),
- &msg->msg_iter);
- pr_debug("left=%zu", msg_data_left(msg));
- if (!psize)
- return -EINVAL;
+
+ if (!retransmission) {
+ /* reuse tail pfrag, if possible, or carve a new one from the
+ * page allocator
+ */
+ dfrag = mptcp_rtx_tail(sk);
+ offset = pfrag->offset;
+ dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
+ if (!dfrag_collapsed) {
+ dfrag = mptcp_carve_data_frag(msk, pfrag, offset);
+ offset = dfrag->offset;
+ frag_truesize = dfrag->overhead;
+ }
+ psize = min_t(size_t, pfrag->size - offset, avail_size);
+
+ /* Copy to page */
+ pr_debug("left=%zu", msg_data_left(msg));
+ psize = copy_page_from_iter(pfrag->page, offset,
+ min_t(size_t, msg_data_left(msg),
+ psize),
+ &msg->msg_iter);
+ pr_debug("left=%zu", msg_data_left(msg));
+ if (!psize)
+ return -EINVAL;
+
+ if (!sk_wmem_schedule(sk, psize + dfrag->overhead))
+ return -ENOMEM;
+ } else {
+ offset = dfrag->offset;
+ psize = min_t(size_t, dfrag->data_len, avail_size);
+ }
/* tell the TCP stack to delay the push so that we can safely
* access the skb after the sendpages call
*/
- ret = do_tcp_sendpages(ssk, pfrag->page, pfrag->offset, psize,
+ ret = do_tcp_sendpages(ssk, page, offset, psize,
msg->msg_flags | MSG_SENDPAGE_NOTLAST);
if (ret <= 0)
return ret;
- if (unlikely(ret < psize))
- iov_iter_revert(&msg->msg_iter, psize - ret);
+
+ frag_truesize += ret;
+ if (!retransmission) {
+ if (unlikely(ret < psize))
+ iov_iter_revert(&msg->msg_iter, psize - ret);
+
+ /* send successful, keep track of sent data for mptcp-level
+ * retransmission
+ */
+ dfrag->data_len += ret;
+ if (!dfrag_collapsed) {
+ get_page(dfrag->page);
+ list_add_tail(&dfrag->list, &msk->rtx_queue);
+ sk_wmem_queued_add(sk, frag_truesize);
+ } else {
+ sk_wmem_queued_add(sk, ret);
+ }
+
+ /* charge data on mptcp rtx queue to the master socket
+ * Note: we charge such data both to sk and ssk
+ */
+ sk->sk_forward_alloc -= frag_truesize;
+ }
/* if the tail skb extension is still the cached one, collapsing
* really happened. Note: we can't check for 'same skb' as the sk_buff
@@ -230,7 +567,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
msk->cached_ext = NULL;
memset(mpext, 0, sizeof(*mpext));
- mpext->data_seq = msk->write_seq;
+ mpext->data_seq = *write_seq;
mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
mpext->data_len = ret;
mpext->use_map = 1;
@@ -241,13 +578,51 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
mpext->dsn64);
out:
- pfrag->offset += ret;
- msk->write_seq += ret;
+ if (!retransmission)
+ pfrag->offset += frag_truesize;
+ *write_seq += ret;
mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
return ret;
}
+static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+ struct sock *backup = NULL;
+
+ sock_owned_by_me((const struct sock *)msk);
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ if (!sk_stream_memory_free(ssk)) {
+ struct socket *sock = ssk->sk_socket;
+
+ if (sock) {
+ clear_bit(MPTCP_SEND_SPACE, &msk->flags);
+ smp_mb__after_atomic();
+
+ /* enables sk->write_space() callbacks */
+ set_bit(SOCK_NOSPACE, &sock->flags);
+ }
+
+ return NULL;
+ }
+
+ if (subflow->backup) {
+ if (!backup)
+ backup = ssk;
+
+ continue;
+ }
+
+ return ssk;
+ }
+
+ return backup;
+}
+
static void ssk_check_wmem(struct mptcp_sock *msk, struct sock *ssk)
{
struct socket *sock;
@@ -278,6 +653,15 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
return -EOPNOTSUPP;
lock_sock(sk);
+
+ timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+
+ if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
+ ret = sk_stream_wait_connect(sk, &timeo);
+ if (ret)
+ goto out;
+ }
+
ssock = __mptcp_tcp_fallback(msk);
if (unlikely(ssock)) {
fallback:
@@ -286,19 +670,29 @@ fallback:
return ret >= 0 ? ret + copied : (copied ? copied : ret);
}
- timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+ mptcp_clean_una(sk);
- ssk = mptcp_subflow_get(msk);
- if (!ssk) {
- release_sock(sk);
- return -ENOTCONN;
+ __mptcp_flush_join_list(msk);
+ ssk = mptcp_subflow_get_send(msk);
+ while (!sk_stream_memory_free(sk) || !ssk) {
+ ret = sk_stream_wait_memory(sk, &timeo);
+ if (ret)
+ goto out;
+
+ mptcp_clean_una(sk);
+
+ ssk = mptcp_subflow_get_send(msk);
+ if (list_empty(&msk->conn_list)) {
+ ret = -ENOTCONN;
+ goto out;
+ }
}
pr_debug("conn_list->subflow=%p", ssk);
lock_sock(ssk);
while (msg_data_left(msg)) {
- ret = mptcp_sendmsg_frag(sk, ssk, msg, &timeo, &mss_now,
+ ret = mptcp_sendmsg_frag(sk, ssk, msg, NULL, &timeo, &mss_now,
&size_goal);
if (ret < 0)
break;
@@ -311,73 +705,101 @@ fallback:
copied += ret;
}
+ mptcp_set_timeout(sk, ssk);
if (copied) {
ret = copied;
tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle,
size_goal);
+
+ /* start the timer, if it's not pending */
+ if (!mptcp_timer_pending(sk))
+ mptcp_reset_timer(sk);
}
ssk_check_wmem(msk, ssk);
release_sock(ssk);
+out:
release_sock(sk);
return ret;
}
-int mptcp_read_actor(read_descriptor_t *desc, struct sk_buff *skb,
- unsigned int offset, size_t len)
+static void mptcp_wait_data(struct sock *sk, long *timeo)
{
- struct mptcp_read_arg *arg = desc->arg.data;
- size_t copy_len;
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+
+ sk_wait_event(sk, timeo,
+ test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait);
+
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ remove_wait_queue(sk_sleep(sk), &wait);
+}
- copy_len = min(desc->count, len);
+static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
+ struct msghdr *msg,
+ size_t len)
+{
+ struct sock *sk = (struct sock *)msk;
+ struct sk_buff *skb;
+ int copied = 0;
- if (likely(arg->msg)) {
+ while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
+ u32 offset = MPTCP_SKB_CB(skb)->offset;
+ u32 data_len = skb->len - offset;
+ u32 count = min_t(size_t, len - copied, data_len);
int err;
- err = skb_copy_datagram_msg(skb, offset, arg->msg, copy_len);
- if (err) {
- pr_debug("error path");
- desc->error = err;
- return err;
+ err = skb_copy_datagram_msg(skb, offset, msg, count);
+ if (unlikely(err < 0)) {
+ if (!copied)
+ return err;
+ break;
+ }
+
+ copied += count;
+
+ if (count < data_len) {
+ MPTCP_SKB_CB(skb)->offset += count;
+ break;
}
- } else {
- pr_debug("Flushing skb payload");
- }
- desc->count -= copy_len;
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ __kfree_skb(skb);
- pr_debug("consumed %zu bytes, %zu left", copy_len, desc->count);
- return copy_len;
+ if (copied >= len)
+ break;
+ }
+
+ return copied;
}
-static void mptcp_wait_data(struct sock *sk, long *timeo)
+static bool __mptcp_move_skbs(struct mptcp_sock *msk)
{
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- struct mptcp_sock *msk = mptcp_sk(sk);
+ unsigned int moved = 0;
+ bool done;
- add_wait_queue(sk_sleep(sk), &wait);
- sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ do {
+ struct sock *ssk = mptcp_subflow_recv_lookup(msk);
- sk_wait_event(sk, timeo,
- test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait);
+ if (!ssk)
+ break;
- sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- remove_wait_queue(sk_sleep(sk), &wait);
+ lock_sock(ssk);
+ done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
+ release_sock(ssk);
+ } while (!done);
+
+ return moved > 0;
}
static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- struct mptcp_subflow_context *subflow;
- bool more_data_avail = false;
- struct mptcp_read_arg arg;
- read_descriptor_t desc;
- bool wait_data = false;
struct socket *ssock;
- struct tcp_sock *tp;
- bool done = false;
- struct sock *ssk;
int copied = 0;
int target;
long timeo;
@@ -395,65 +817,27 @@ fallback:
return copied;
}
- arg.msg = msg;
- desc.arg.data = &arg;
- desc.error = 0;
-
timeo = sock_rcvtimeo(sk, nonblock);
len = min_t(size_t, len, INT_MAX);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+ __mptcp_flush_join_list(msk);
- while (!done) {
- u32 map_remaining;
+ while (len > (size_t)copied) {
int bytes_read;
- ssk = mptcp_subflow_recv_lookup(msk);
- pr_debug("msk=%p ssk=%p", msk, ssk);
- if (!ssk)
- goto wait_for_data;
-
- subflow = mptcp_subflow_ctx(ssk);
- tp = tcp_sk(ssk);
-
- lock_sock(ssk);
- do {
- /* try to read as much data as available */
- map_remaining = subflow->map_data_len -
- mptcp_subflow_get_map_offset(subflow);
- desc.count = min_t(size_t, len - copied, map_remaining);
- pr_debug("reading %zu bytes, copied %d", desc.count,
- copied);
- bytes_read = tcp_read_sock(ssk, &desc,
- mptcp_read_actor);
- if (bytes_read < 0) {
- if (!copied)
- copied = bytes_read;
- done = true;
- goto next;
- }
+ bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied);
+ if (unlikely(bytes_read < 0)) {
+ if (!copied)
+ copied = bytes_read;
+ goto out_err;
+ }
- pr_debug("msk ack_seq=%llx -> %llx", msk->ack_seq,
- msk->ack_seq + bytes_read);
- msk->ack_seq += bytes_read;
- copied += bytes_read;
- if (copied >= len) {
- done = true;
- goto next;
- }
- if (tp->urg_data && tp->urg_seq == tp->copied_seq) {
- pr_err("Urgent data present, cannot proceed");
- done = true;
- goto next;
- }
-next:
- more_data_avail = mptcp_subflow_data_available(ssk);
- } while (more_data_avail && !done);
- release_sock(ssk);
- continue;
+ copied += bytes_read;
-wait_for_data:
- more_data_avail = false;
+ if (skb_queue_empty(&sk->sk_receive_queue) &&
+ __mptcp_move_skbs(msk))
+ continue;
/* only the master socket status is relevant here. The exit
* conditions mirror closely tcp_recvmsg()
@@ -494,30 +878,92 @@ wait_for_data:
}
pr_debug("block timeout %ld", timeo);
- wait_data = true;
mptcp_wait_data(sk, &timeo);
if (unlikely(__mptcp_tcp_fallback(msk)))
goto fallback;
}
- if (more_data_avail) {
- if (!test_bit(MPTCP_DATA_READY, &msk->flags))
- set_bit(MPTCP_DATA_READY, &msk->flags);
- } else if (!wait_data) {
+ if (skb_queue_empty(&sk->sk_receive_queue)) {
+ /* entire backlog drained, clear DATA_READY. */
clear_bit(MPTCP_DATA_READY, &msk->flags);
- /* .. race-breaker: ssk might get new data after last
- * data_available() returns false.
+ /* .. race-breaker: ssk might have gotten new data
+ * after last __mptcp_move_skbs() returned false.
*/
- ssk = mptcp_subflow_recv_lookup(msk);
- if (unlikely(ssk))
+ if (unlikely(__mptcp_move_skbs(msk)))
set_bit(MPTCP_DATA_READY, &msk->flags);
+ } else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) {
+ /* data to read but mptcp_wait_data() cleared DATA_READY */
+ set_bit(MPTCP_DATA_READY, &msk->flags);
}
-
+out_err:
release_sock(sk);
return copied;
}
+static void mptcp_retransmit_handler(struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ if (atomic64_read(&msk->snd_una) == msk->write_seq) {
+ mptcp_stop_timer(sk);
+ } else {
+ set_bit(MPTCP_WORK_RTX, &msk->flags);
+ if (schedule_work(&msk->work))
+ sock_hold(sk);
+ }
+}
+
+static void mptcp_retransmit_timer(struct timer_list *t)
+{
+ struct inet_connection_sock *icsk = from_timer(icsk, t,
+ icsk_retransmit_timer);
+ struct sock *sk = &icsk->icsk_inet.sk;
+
+ bh_lock_sock(sk);
+ if (!sock_owned_by_user(sk)) {
+ mptcp_retransmit_handler(sk);
+ } else {
+ /* delegate our work to tcp_release_cb() */
+ if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED,
+ &sk->sk_tsq_flags))
+ sock_hold(sk);
+ }
+ bh_unlock_sock(sk);
+ sock_put(sk);
+}
+
+/* Find an idle subflow. Return NULL if there is unacked data at tcp
+ * level.
+ *
+ * A backup subflow is returned only if that is the only kind available.
+ */
+static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+ struct sock *backup = NULL;
+
+ sock_owned_by_me((const struct sock *)msk);
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ /* still data outstanding at TCP level? Don't retransmit. */
+ if (!tcp_write_queue_empty(ssk))
+ return NULL;
+
+ if (subflow->backup) {
+ if (!backup)
+ backup = ssk;
+ continue;
+ }
+
+ return ssk;
+ }
+
+ return backup;
+}
+
/* subflow sockets can be either outgoing (connect) or incoming
* (accept).
*
@@ -548,28 +994,135 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
return 0;
}
+static void mptcp_worker(struct work_struct *work)
+{
+ struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
+ struct sock *ssk, *sk = &msk->sk.icsk_inet.sk;
+ int orig_len, orig_offset, ret, mss_now = 0, size_goal = 0;
+ struct mptcp_data_frag *dfrag;
+ u64 orig_write_seq;
+ size_t copied = 0;
+ struct msghdr msg;
+ long timeo = 0;
+
+ lock_sock(sk);
+ mptcp_clean_una(sk);
+ __mptcp_flush_join_list(msk);
+ __mptcp_move_skbs(msk);
+
+ if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
+ goto unlock;
+
+ dfrag = mptcp_rtx_head(sk);
+ if (!dfrag)
+ goto unlock;
+
+ ssk = mptcp_subflow_get_retrans(msk);
+ if (!ssk)
+ goto reset_unlock;
+
+ lock_sock(ssk);
+
+ msg.msg_flags = MSG_DONTWAIT;
+ orig_len = dfrag->data_len;
+ orig_offset = dfrag->offset;
+ orig_write_seq = dfrag->data_seq;
+ while (dfrag->data_len > 0) {
+ ret = mptcp_sendmsg_frag(sk, ssk, &msg, dfrag, &timeo, &mss_now,
+ &size_goal);
+ if (ret < 0)
+ break;
+
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
+ copied += ret;
+ dfrag->data_len -= ret;
+ dfrag->offset += ret;
+ }
+ if (copied)
+ tcp_push(ssk, msg.msg_flags, mss_now, tcp_sk(ssk)->nonagle,
+ size_goal);
+
+ dfrag->data_seq = orig_write_seq;
+ dfrag->offset = orig_offset;
+ dfrag->data_len = orig_len;
+
+ mptcp_set_timeout(sk, ssk);
+ release_sock(ssk);
+
+reset_unlock:
+ if (!mptcp_timer_pending(sk))
+ mptcp_reset_timer(sk);
+
+unlock:
+ release_sock(sk);
+ sock_put(sk);
+}
+
static int __mptcp_init_sock(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
+ spin_lock_init(&msk->join_list_lock);
+
INIT_LIST_HEAD(&msk->conn_list);
+ INIT_LIST_HEAD(&msk->join_list);
+ INIT_LIST_HEAD(&msk->rtx_queue);
__set_bit(MPTCP_SEND_SPACE, &msk->flags);
+ INIT_WORK(&msk->work, mptcp_worker);
msk->first = NULL;
inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
+ mptcp_pm_data_init(msk);
+
+ /* re-use the csk retrans timer for MPTCP-level retrans */
+ timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
+
return 0;
}
static int mptcp_init_sock(struct sock *sk)
{
- if (!mptcp_is_enabled(sock_net(sk)))
+ struct net *net = sock_net(sk);
+ int ret;
+
+ if (!mptcp_is_enabled(net))
return -ENOPROTOOPT;
- return __mptcp_init_sock(sk);
+ if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
+ return -ENOMEM;
+
+ ret = __mptcp_init_sock(sk);
+ if (ret)
+ return ret;
+
+ sk_sockets_allocated_inc(sk);
+ sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[2];
+
+ return 0;
}
-static void mptcp_subflow_shutdown(struct sock *ssk, int how)
+static void __mptcp_clear_xmit(struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct mptcp_data_frag *dtmp, *dfrag;
+
+ sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);
+
+ list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
+ dfrag_clear(sk, dfrag);
+}
+
+static void mptcp_cancel_work(struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ if (cancel_work_sync(&msk->work))
+ sock_put(sk);
+}
+
+static void mptcp_subflow_shutdown(struct sock *ssk, int how,
+ bool data_fin_tx_enable, u64 data_fin_tx_seq)
{
lock_sock(ssk);
@@ -582,6 +1135,14 @@ static void mptcp_subflow_shutdown(struct sock *ssk, int how)
tcp_disconnect(ssk, O_NONBLOCK);
break;
default:
+ if (data_fin_tx_enable) {
+ struct mptcp_subflow_context *subflow;
+
+ subflow = mptcp_subflow_ctx(ssk);
+ subflow->data_fin_tx_seq = data_fin_tx_seq;
+ subflow->data_fin_tx_enable = 1;
+ }
+
ssk->sk_shutdown |= how;
tcp_shutdown(ssk, how);
break;
@@ -598,22 +1159,36 @@ static void mptcp_close(struct sock *sk, long timeout)
struct mptcp_subflow_context *subflow, *tmp;
struct mptcp_sock *msk = mptcp_sk(sk);
LIST_HEAD(conn_list);
+ u64 data_fin_tx_seq;
lock_sock(sk);
mptcp_token_destroy(msk->token);
inet_sk_state_store(sk, TCP_CLOSE);
+ __mptcp_flush_join_list(msk);
+
list_splice_init(&msk->conn_list, &conn_list);
+ data_fin_tx_seq = msk->write_seq;
+
+ __mptcp_clear_xmit(sk);
+
release_sock(sk);
list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ subflow->data_fin_tx_seq = data_fin_tx_seq;
+ subflow->data_fin_tx_enable = 1;
__mptcp_close_ssk(sk, ssk, subflow, timeout);
}
+ mptcp_cancel_work(sk);
+ mptcp_pm_close(msk);
+
+ __skb_queue_purge(&sk->sk_receive_queue);
+
sk_common_release(sk);
}
@@ -640,6 +1215,15 @@ static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
}
+static int mptcp_disconnect(struct sock *sk, int flags)
+{
+ lock_sock(sk);
+ __mptcp_clear_xmit(sk);
+ release_sock(sk);
+ mptcp_cancel_work(sk);
+ return tcp_disconnect(sk, flags);
+}
+
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
{
@@ -649,9 +1233,12 @@ static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
}
#endif
-static struct sock *mptcp_sk_clone_lock(const struct sock *sk)
+struct sock *mptcp_sk_clone(const struct sock *sk, struct request_sock *req)
{
+ struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
+ struct mptcp_sock *msk;
+ u64 ack_seq;
if (!nsk)
return NULL;
@@ -661,6 +1248,40 @@ static struct sock *mptcp_sk_clone_lock(const struct sock *sk)
inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
#endif
+ __mptcp_init_sock(nsk);
+
+ msk = mptcp_sk(nsk);
+ msk->local_key = subflow_req->local_key;
+ msk->token = subflow_req->token;
+ msk->subflow = NULL;
+
+ if (unlikely(mptcp_token_new_accept(subflow_req->token, nsk))) {
+ bh_unlock_sock(nsk);
+
+ /* we can't call into mptcp_close() here - possible BH context
+ * free the sock directly
+ */
+ nsk->sk_prot->destroy(nsk);
+ sk_free(nsk);
+ return NULL;
+ }
+
+ msk->write_seq = subflow_req->idsn + 1;
+ atomic64_set(&msk->snd_una, msk->write_seq);
+ if (subflow_req->remote_key_valid) {
+ msk->can_ack = true;
+ msk->remote_key = subflow_req->remote_key;
+ mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
+ ack_seq++;
+ msk->ack_seq = ack_seq;
+ }
+
+ /* will be fully established after successful MPC subflow creation */
+ inet_sk_state_store(nsk, TCP_SYN_RECV);
+ bh_unlock_sock(nsk);
+
+ /* keep a single reference */
+ __sock_put(nsk);
return nsk;
}
@@ -688,62 +1309,37 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
struct mptcp_subflow_context *subflow;
struct sock *new_mptcp_sock;
struct sock *ssk = newsk;
- u64 ack_seq;
subflow = mptcp_subflow_ctx(newsk);
- lock_sock(sk);
+ new_mptcp_sock = subflow->conn;
- local_bh_disable();
- new_mptcp_sock = mptcp_sk_clone_lock(sk);
- if (!new_mptcp_sock) {
- *err = -ENOBUFS;
- local_bh_enable();
- release_sock(sk);
- mptcp_subflow_shutdown(newsk, SHUT_RDWR + 1);
- tcp_close(newsk, 0);
- return NULL;
+ /* is_mptcp should be false if subflow->conn is missing, see
+ * subflow_syn_recv_sock()
+ */
+ if (WARN_ON_ONCE(!new_mptcp_sock)) {
+ tcp_sk(newsk)->is_mptcp = 0;
+ return newsk;
}
- __mptcp_init_sock(new_mptcp_sock);
+ /* acquire the 2nd reference for the owning socket */
+ sock_hold(new_mptcp_sock);
+ local_bh_disable();
+ bh_lock_sock(new_mptcp_sock);
msk = mptcp_sk(new_mptcp_sock);
- msk->local_key = subflow->local_key;
- msk->token = subflow->token;
- msk->subflow = NULL;
msk->first = newsk;
- mptcp_token_update_accept(newsk, new_mptcp_sock);
-
- msk->write_seq = subflow->idsn + 1;
- if (subflow->can_ack) {
- msk->can_ack = true;
- msk->remote_key = subflow->remote_key;
- mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
- ack_seq++;
- msk->ack_seq = ack_seq;
- }
newsk = new_mptcp_sock;
mptcp_copy_inaddrs(newsk, ssk);
list_add(&subflow->node, &msk->conn_list);
- /* will be fully established at mptcp_stream_accept()
- * completion.
- */
- inet_sk_state_store(new_mptcp_sock, TCP_SYN_RECV);
bh_unlock_sock(new_mptcp_sock);
- local_bh_enable();
- release_sock(sk);
- /* the subflow can already receive packet, avoid racing with
- * the receive path and process the pending ones
- */
- lock_sock(ssk);
- subflow->rel_write_seq = 1;
- subflow->tcp_sock = ssk;
- subflow->conn = new_mptcp_sock;
- if (unlikely(!skb_queue_empty(&ssk->sk_receive_queue)))
- mptcp_subflow_data_available(ssk);
- release_sock(ssk);
+ __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
+ local_bh_enable();
+ } else {
+ MPTCP_INC_STATS(sock_net(sk),
+ MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
}
return newsk;
@@ -755,6 +1351,8 @@ static void mptcp_destroy(struct sock *sk)
if (msk->cached_ext)
__skb_ext_put(msk->cached_ext);
+
+ sk_sockets_allocated_dec(sk);
}
static int mptcp_setsockopt(struct sock *sk, int level, int optname,
@@ -807,6 +1405,40 @@ static int mptcp_getsockopt(struct sock *sk, int level, int optname,
return -EOPNOTSUPP;
}
+#define MPTCP_DEFERRED_ALL (TCPF_DELACK_TIMER_DEFERRED | \
+ TCPF_WRITE_TIMER_DEFERRED)
+
+/* this is very alike tcp_release_cb() but we must handle differently a
+ * different set of events
+ */
+static void mptcp_release_cb(struct sock *sk)
+{
+ unsigned long flags, nflags;
+
+ do {
+ flags = sk->sk_tsq_flags;
+ if (!(flags & MPTCP_DEFERRED_ALL))
+ return;
+ nflags = flags & ~MPTCP_DEFERRED_ALL;
+ } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
+
+ sock_release_ownership(sk);
+
+ if (flags & TCPF_DELACK_TIMER_DEFERRED) {
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct sock *ssk;
+
+ ssk = mptcp_subflow_recv_lookup(msk);
+ if (!ssk || !schedule_work(&msk->work))
+ __sock_put(sk);
+ }
+
+ if (flags & TCPF_WRITE_TIMER_DEFERRED) {
+ mptcp_retransmit_handler(sk);
+ __sock_put(sk);
+ }
+}
+
static int mptcp_get_port(struct sock *sk, unsigned short snum)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -828,13 +1460,15 @@ void mptcp_finish_connect(struct sock *ssk)
u64 ack_seq;
subflow = mptcp_subflow_ctx(ssk);
-
- if (!subflow->mp_capable)
- return;
-
sk = subflow->conn;
msk = mptcp_sk(sk);
+ if (!subflow->mp_capable) {
+ MPTCP_INC_STATS(sock_net(sk),
+ MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
+ return;
+ }
+
pr_debug("msk=%p, token=%u", sk, subflow->token);
mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
@@ -852,6 +1486,9 @@ void mptcp_finish_connect(struct sock *ssk)
WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
WRITE_ONCE(msk->ack_seq, ack_seq);
WRITE_ONCE(msk->can_ack, 1);
+ atomic64_set(&msk->snd_una, msk->write_seq);
+
+ mptcp_pm_new_connection(msk, 0);
}
static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
@@ -863,6 +1500,46 @@ static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
write_unlock_bh(&sk->sk_callback_lock);
}
+bool mptcp_finish_join(struct sock *sk)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ struct sock *parent = (void *)msk;
+ struct socket *parent_sock;
+ bool ret;
+
+ pr_debug("msk=%p, subflow=%p", msk, subflow);
+
+ /* mptcp socket already closing? */
+ if (inet_sk_state_load(parent) != TCP_ESTABLISHED)
+ return false;
+
+ if (!msk->pm.server_side)
+ return true;
+
+ /* passive connection, attach to msk socket */
+ parent_sock = READ_ONCE(parent->sk_socket);
+ if (parent_sock && !sk->sk_socket)
+ mptcp_sock_graft(sk, parent_sock);
+
+ ret = mptcp_pm_allow_new_subflow(msk);
+ if (ret) {
+ /* active connections are already on conn_list */
+ spin_lock_bh(&msk->join_list_lock);
+ if (!WARN_ON_ONCE(!list_empty(&subflow->node)))
+ list_add_tail(&subflow->node, &msk->join_list);
+ spin_unlock_bh(&msk->join_list_lock);
+ }
+ return ret;
+}
+
+bool mptcp_sk_is_subflow(const struct sock *sk)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+
+ return subflow->mp_join == 1;
+}
+
static bool mptcp_memory_free(const struct sock *sk, int wake)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -874,6 +1551,7 @@ static struct proto mptcp_prot = {
.name = "MPTCP",
.owner = THIS_MODULE,
.init = mptcp_init_sock,
+ .disconnect = mptcp_disconnect,
.close = mptcp_close,
.accept = mptcp_accept,
.setsockopt = mptcp_setsockopt,
@@ -882,10 +1560,16 @@ static struct proto mptcp_prot = {
.destroy = mptcp_destroy,
.sendmsg = mptcp_sendmsg,
.recvmsg = mptcp_recvmsg,
+ .release_cb = mptcp_release_cb,
.hash = inet_hash,
.unhash = inet_unhash,
.get_port = mptcp_get_port,
+ .sockets_allocated = &mptcp_sockets_allocated,
+ .memory_allocated = &tcp_memory_allocated,
+ .memory_pressure = &tcp_memory_pressure,
.stream_memory_free = mptcp_memory_free,
+ .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
+ .sysctl_mem = sysctl_tcp_mem,
.obj_size = sizeof(struct mptcp_sock),
.no_autobind = true,
};
@@ -1041,14 +1725,13 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
/* set ssk->sk_socket of accept()ed flows to mptcp socket.
* This is needed so NOSPACE flag can be set from tcp stack.
*/
+ __mptcp_flush_join_list(msk);
list_for_each_entry(subflow, &msk->conn_list, node) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
if (!ssk->sk_socket)
mptcp_sock_graft(ssk, newsock);
}
-
- inet_sk_state_store(newsock->sk, TCP_ESTABLISHED);
}
sock_put(ssock->sk);
@@ -1124,10 +1807,11 @@ static int mptcp_shutdown(struct socket *sock, int how)
sock->state = SS_CONNECTED;
}
+ __mptcp_flush_join_list(msk);
mptcp_for_each_subflow(msk, subflow) {
struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
- mptcp_subflow_shutdown(tcp_sk, how);
+ mptcp_subflow_shutdown(tcp_sk, how, 1, msk->write_seq);
}
out_unlock:
@@ -1174,12 +1858,18 @@ void mptcp_proto_init(void)
{
mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
+ if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
+ panic("Failed to allocate MPTCP pcpu counter\n");
+
mptcp_subflow_init();
+ mptcp_pm_init();
if (proto_register(&mptcp_prot, 1) != 0)
panic("Failed to register MPTCP proto.\n");
inet_register_protosw(&mptcp_protosw);
+
+ BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
}
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 9f8663b30456..f733c5425552 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -17,6 +17,12 @@
#define OPTION_MPTCP_MPC_SYN BIT(0)
#define OPTION_MPTCP_MPC_SYNACK BIT(1)
#define OPTION_MPTCP_MPC_ACK BIT(2)
+#define OPTION_MPTCP_MPJ_SYN BIT(3)
+#define OPTION_MPTCP_MPJ_SYNACK BIT(4)
+#define OPTION_MPTCP_MPJ_ACK BIT(5)
+#define OPTION_MPTCP_ADD_ADDR BIT(6)
+#define OPTION_MPTCP_ADD_ADDR6 BIT(7)
+#define OPTION_MPTCP_RM_ADDR BIT(8)
/* MPTCP option subtypes */
#define MPTCPOPT_MP_CAPABLE 0
@@ -33,12 +39,30 @@
#define TCPOLEN_MPTCP_MPC_SYNACK 12
#define TCPOLEN_MPTCP_MPC_ACK 20
#define TCPOLEN_MPTCP_MPC_ACK_DATA 22
+#define TCPOLEN_MPTCP_MPJ_SYN 12
+#define TCPOLEN_MPTCP_MPJ_SYNACK 16
+#define TCPOLEN_MPTCP_MPJ_ACK 24
#define TCPOLEN_MPTCP_DSS_BASE 4
#define TCPOLEN_MPTCP_DSS_ACK32 4
#define TCPOLEN_MPTCP_DSS_ACK64 8
#define TCPOLEN_MPTCP_DSS_MAP32 10
#define TCPOLEN_MPTCP_DSS_MAP64 14
#define TCPOLEN_MPTCP_DSS_CHECKSUM 2
+#define TCPOLEN_MPTCP_ADD_ADDR 16
+#define TCPOLEN_MPTCP_ADD_ADDR_PORT 18
+#define TCPOLEN_MPTCP_ADD_ADDR_BASE 8
+#define TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT 10
+#define TCPOLEN_MPTCP_ADD_ADDR6 28
+#define TCPOLEN_MPTCP_ADD_ADDR6_PORT 30
+#define TCPOLEN_MPTCP_ADD_ADDR6_BASE 20
+#define TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT 22
+#define TCPOLEN_MPTCP_PORT_LEN 2
+#define TCPOLEN_MPTCP_RM_ADDR_BASE 4
+
+/* MPTCP MP_JOIN flags */
+#define MPTCPOPT_BACKUP BIT(0)
+#define MPTCPOPT_HMAC_LEN 20
+#define MPTCPOPT_THMAC_LEN 8
/* MPTCP MP_CAPABLE flags */
#define MPTCP_VERSION_MASK (0x0F)
@@ -55,9 +79,75 @@
#define MPTCP_DSS_HAS_ACK BIT(0)
#define MPTCP_DSS_FLAG_MASK (0x1F)
+/* MPTCP ADD_ADDR flags */
+#define MPTCP_ADDR_ECHO BIT(0)
+#define MPTCP_ADDR_HMAC_LEN 20
+#define MPTCP_ADDR_IPVERSION_4 4
+#define MPTCP_ADDR_IPVERSION_6 6
+
/* MPTCP socket flags */
#define MPTCP_DATA_READY 0
#define MPTCP_SEND_SPACE 1
+#define MPTCP_WORK_RTX 2
+
+static inline __be32 mptcp_option(u8 subopt, u8 len, u8 nib, u8 field)
+{
+ return htonl((TCPOPT_MPTCP << 24) | (len << 16) | (subopt << 12) |
+ ((nib & 0xF) << 8) | field);
+}
+
+#define MPTCP_PM_MAX_ADDR 4
+
+struct mptcp_addr_info {
+ sa_family_t family;
+ __be16 port;
+ u8 id;
+ union {
+ struct in_addr addr;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ struct in6_addr addr6;
+#endif
+ };
+};
+
+enum mptcp_pm_status {
+ MPTCP_PM_ADD_ADDR_RECEIVED,
+ MPTCP_PM_ESTABLISHED,
+ MPTCP_PM_SUBFLOW_ESTABLISHED,
+};
+
+struct mptcp_pm_data {
+ struct mptcp_addr_info local;
+ struct mptcp_addr_info remote;
+
+ spinlock_t lock; /*protects the whole PM data */
+
+ bool addr_signal;
+ bool server_side;
+ bool work_pending;
+ bool accept_addr;
+ bool accept_subflow;
+ u8 add_addr_signaled;
+ u8 add_addr_accepted;
+ u8 local_addr_used;
+ u8 subflows;
+ u8 add_addr_signal_max;
+ u8 add_addr_accept_max;
+ u8 local_addr_max;
+ u8 subflows_max;
+ u8 status;
+
+ struct work_struct work;
+};
+
+struct mptcp_data_frag {
+ struct list_head list;
+ u64 data_seq;
+ int data_len;
+ int offset;
+ int overhead;
+ struct page *page;
+};
/* MPTCP connection sock */
struct mptcp_sock {
@@ -67,13 +157,20 @@ struct mptcp_sock {
u64 remote_key;
u64 write_seq;
u64 ack_seq;
+ atomic64_t snd_una;
+ unsigned long timer_ival;
u32 token;
unsigned long flags;
bool can_ack;
+ spinlock_t join_list_lock;
+ struct work_struct work;
struct list_head conn_list;
+ struct list_head rtx_queue;
+ struct list_head join_list;
struct skb_ext *cached_ext; /* for the next sendmsg */
struct socket *subflow; /* outgoing connect/listener/!mp_capable */
struct sock *first;
+ struct mptcp_pm_data pm;
};
#define mptcp_for_each_subflow(__msk, __subflow) \
@@ -84,17 +181,42 @@ static inline struct mptcp_sock *mptcp_sk(const struct sock *sk)
return (struct mptcp_sock *)sk;
}
+static inline struct mptcp_data_frag *mptcp_rtx_tail(const struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ if (list_empty(&msk->rtx_queue))
+ return NULL;
+
+ return list_last_entry(&msk->rtx_queue, struct mptcp_data_frag, list);
+}
+
+static inline struct mptcp_data_frag *mptcp_rtx_head(const struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ if (list_empty(&msk->rtx_queue))
+ return NULL;
+
+ return list_first_entry(&msk->rtx_queue, struct mptcp_data_frag, list);
+}
+
struct mptcp_subflow_request_sock {
struct tcp_request_sock sk;
u16 mp_capable : 1,
mp_join : 1,
backup : 1,
remote_key_valid : 1;
+ u8 local_id;
+ u8 remote_id;
u64 local_key;
u64 remote_key;
u64 idsn;
u32 token;
u32 ssn_offset;
+ u64 thmac;
+ u32 local_nonce;
+ u32 remote_nonce;
};
static inline struct mptcp_subflow_request_sock *
@@ -117,14 +239,28 @@ struct mptcp_subflow_context {
u32 ssn_offset;
u32 map_data_len;
u32 request_mptcp : 1, /* send MP_CAPABLE */
+ request_join : 1, /* send MP_JOIN */
+ request_bkup : 1,
mp_capable : 1, /* remote is MPTCP capable */
- fourth_ack : 1, /* send initial DSS */
+ mp_join : 1, /* remote is JOINing */
+ fully_established : 1, /* path validated */
+ pm_notified : 1, /* PM hook called for established status */
conn_finished : 1,
map_valid : 1,
mpc_map : 1,
+ backup : 1,
data_avail : 1,
rx_eof : 1,
+ data_fin_tx_enable : 1,
can_ack : 1; /* only after processing the remote a key */
+ u64 data_fin_tx_seq;
+ u32 remote_nonce;
+ u64 thmac;
+ u32 local_nonce;
+ u32 remote_token;
+ u8 hmac[MPTCPOPT_HMAC_LEN];
+ u8 local_id;
+ u8 remote_id;
struct sock *tcp_sock; /* tcp sk backpointer */
struct sock *conn; /* parent mptcp_sock */
@@ -168,6 +304,11 @@ mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow)
int mptcp_is_enabled(struct net *net);
bool mptcp_subflow_data_available(struct sock *sk);
void mptcp_subflow_init(void);
+
+/* called with sk socket lock held */
+int __mptcp_subflow_connect(struct sock *sk, int ifindex,
+ const struct mptcp_addr_info *loc,
+ const struct mptcp_addr_info *remote);
int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock);
static inline void mptcp_subflow_tcp_fallback(struct sock *sk,
@@ -190,23 +331,20 @@ void mptcp_proto_init(void);
int mptcp_proto_v6_init(void);
#endif
-struct mptcp_read_arg {
- struct msghdr *msg;
-};
-
-int mptcp_read_actor(read_descriptor_t *desc, struct sk_buff *skb,
- unsigned int offset, size_t len);
-
+struct sock *mptcp_sk_clone(const struct sock *sk, struct request_sock *req);
void mptcp_get_options(const struct sk_buff *skb,
struct tcp_options_received *opt_rx);
void mptcp_finish_connect(struct sock *sk);
+void mptcp_data_ready(struct sock *sk, struct sock *ssk);
+bool mptcp_finish_join(struct sock *sk);
+void mptcp_data_acked(struct sock *sk);
int mptcp_token_new_request(struct request_sock *req);
void mptcp_token_destroy_request(u32 token);
int mptcp_token_new_connect(struct sock *sk);
-int mptcp_token_new_accept(u32 token);
-void mptcp_token_update_accept(struct sock *sk, struct sock *conn);
+int mptcp_token_new_accept(u32 token, struct sock *conn);
+struct mptcp_sock *mptcp_token_get_sock(u32 token);
void mptcp_token_destroy(u32 token);
void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn);
@@ -222,8 +360,48 @@ static inline void mptcp_crypto_key_gen_sha(u64 *key, u32 *token, u64 *idsn)
mptcp_crypto_key_sha(*key, token, idsn);
}
-void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
- void *hash_out);
+void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac);
+
+void mptcp_pm_init(void);
+void mptcp_pm_data_init(struct mptcp_sock *msk);
+void mptcp_pm_close(struct mptcp_sock *msk);
+void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side);
+void mptcp_pm_fully_established(struct mptcp_sock *msk);
+bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk);
+void mptcp_pm_connection_closed(struct mptcp_sock *msk);
+void mptcp_pm_subflow_established(struct mptcp_sock *msk,
+ struct mptcp_subflow_context *subflow);
+void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id);
+void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr);
+
+int mptcp_pm_announce_addr(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr);
+int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id);
+int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 remote_id);
+
+static inline bool mptcp_pm_should_signal(struct mptcp_sock *msk)
+{
+ return READ_ONCE(msk->pm.addr_signal);
+}
+
+static inline unsigned int mptcp_add_addr_len(int family)
+{
+ if (family == AF_INET)
+ return TCPOLEN_MPTCP_ADD_ADDR;
+ return TCPOLEN_MPTCP_ADD_ADDR6;
+}
+
+bool mptcp_pm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+ struct mptcp_addr_info *saddr);
+int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
+
+void mptcp_pm_nl_init(void);
+void mptcp_pm_nl_data_init(struct mptcp_sock *msk);
+void mptcp_pm_nl_fully_established(struct mptcp_sock *msk);
+void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk);
+void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk);
+int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
static inline struct mptcp_ext *mptcp_get_ext(struct sk_buff *skb)
{
@@ -237,4 +415,6 @@ static inline bool before64(__u64 seq1, __u64 seq2)
#define after64(seq2, seq1) before64(seq1, seq2)
+void mptcp_diag_subflow_init(struct tcp_ulp_ops *ops);
+
#endif /* __MPTCP_PROTOCOL_H */
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 65122edf60aa..b5180c81588e 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <crypto/algapi.h>
#include <net/sock.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
@@ -19,17 +20,42 @@
#endif
#include <net/mptcp.h>
#include "protocol.h"
+#include "mib.h"
+
+static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
+ enum linux_mptcp_mib_field field)
+{
+ MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
+}
static int subflow_rebuild_header(struct sock *sk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- int err = 0;
+ int local_id, err = 0;
if (subflow->request_mptcp && !subflow->token) {
pr_debug("subflow=%p", sk);
err = mptcp_token_new_connect(sk);
+ } else if (subflow->request_join && !subflow->local_nonce) {
+ struct mptcp_sock *msk = (struct mptcp_sock *)subflow->conn;
+
+ pr_debug("subflow=%p", sk);
+
+ do {
+ get_random_bytes(&subflow->local_nonce, sizeof(u32));
+ } while (!subflow->local_nonce);
+
+ if (subflow->local_id)
+ goto out;
+
+ local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
+ if (local_id < 0)
+ return -EINVAL;
+
+ subflow->local_id = local_id;
}
+out:
if (err)
return err;
@@ -47,6 +73,51 @@ static void subflow_req_destructor(struct request_sock *req)
tcp_request_sock_ops.destructor(req);
}
+static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
+ void *hmac)
+{
+ u8 msg[8];
+
+ put_unaligned_be32(nonce1, &msg[0]);
+ put_unaligned_be32(nonce2, &msg[4]);
+
+ mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
+}
+
+/* validate received token and create truncated hmac and nonce for SYN-ACK */
+static bool subflow_token_join_request(struct request_sock *req,
+ const struct sk_buff *skb)
+{
+ struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+ u8 hmac[MPTCPOPT_HMAC_LEN];
+ struct mptcp_sock *msk;
+ int local_id;
+
+ msk = mptcp_token_get_sock(subflow_req->token);
+ if (!msk) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
+ return false;
+ }
+
+ local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
+ if (local_id < 0) {
+ sock_put((struct sock *)msk);
+ return false;
+ }
+ subflow_req->local_id = local_id;
+
+ get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
+
+ subflow_generate_hmac(msk->local_key, msk->remote_key,
+ subflow_req->local_nonce,
+ subflow_req->remote_nonce, hmac);
+
+ subflow_req->thmac = get_unaligned_be64(hmac);
+
+ sock_put((struct sock *)msk);
+ return true;
+}
+
static void subflow_init_req(struct request_sock *req,
const struct sock *sk_listener,
struct sk_buff *skb)
@@ -61,6 +132,7 @@ static void subflow_init_req(struct request_sock *req,
mptcp_get_options(skb, &rx_opt);
subflow_req->mp_capable = 0;
+ subflow_req->mp_join = 0;
subflow_req->remote_key_valid = 0;
#ifdef CONFIG_TCP_MD5SIG
@@ -71,6 +143,15 @@ static void subflow_init_req(struct request_sock *req,
return;
#endif
+ if (rx_opt.mptcp.mp_capable) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
+
+ if (rx_opt.mptcp.mp_join)
+ return;
+ } else if (rx_opt.mptcp.mp_join) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
+ }
+
if (rx_opt.mptcp.mp_capable && listener->request_mptcp) {
int err;
@@ -79,6 +160,19 @@ static void subflow_init_req(struct request_sock *req,
subflow_req->mp_capable = 1;
subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
+ } else if (rx_opt.mptcp.mp_join && listener->request_mptcp) {
+ subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
+ subflow_req->mp_join = 1;
+ subflow_req->backup = rx_opt.mptcp.backup;
+ subflow_req->remote_id = rx_opt.mptcp.join_id;
+ subflow_req->token = rx_opt.mptcp.token;
+ subflow_req->remote_nonce = rx_opt.mptcp.nonce;
+ pr_debug("token=%u, remote_nonce=%u", subflow_req->token,
+ subflow_req->remote_nonce);
+ if (!subflow_token_join_request(req, skb)) {
+ subflow_req->mp_join = 0;
+ // @@ need to trigger RST
+ }
}
}
@@ -106,13 +200,41 @@ static void subflow_v6_init_req(struct request_sock *req,
}
#endif
+/* validate received truncated hmac and create hmac for third ACK */
+static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
+{
+ u8 hmac[MPTCPOPT_HMAC_LEN];
+ u64 thmac;
+
+ subflow_generate_hmac(subflow->remote_key, subflow->local_key,
+ subflow->remote_nonce, subflow->local_nonce,
+ hmac);
+
+ thmac = get_unaligned_be64(hmac);
+ pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
+ subflow, subflow->token,
+ (unsigned long long)thmac,
+ (unsigned long long)subflow->thmac);
+
+ return thmac == subflow->thmac;
+}
+
static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct sock *parent = subflow->conn;
subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
- if (subflow->conn && !subflow->conn_finished) {
+ if (inet_sk_state_load(parent) != TCP_ESTABLISHED) {
+ inet_sk_state_store(parent, TCP_ESTABLISHED);
+ parent->sk_state_change(parent);
+ }
+
+ if (subflow->conn_finished || !tcp_sk(sk)->is_mptcp)
+ return;
+
+ if (subflow->mp_capable) {
pr_debug("subflow=%p, remote_key=%llu", mptcp_subflow_ctx(sk),
subflow->remote_key);
mptcp_finish_connect(sk);
@@ -122,6 +244,33 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
pr_debug("synack seq=%u", TCP_SKB_CB(skb)->seq);
subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
}
+ } else if (subflow->mp_join) {
+ pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u",
+ subflow, subflow->thmac,
+ subflow->remote_nonce);
+ if (!subflow_thmac_valid(subflow)) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
+ subflow->mp_join = 0;
+ goto do_reset;
+ }
+
+ subflow_generate_hmac(subflow->local_key, subflow->remote_key,
+ subflow->local_nonce,
+ subflow->remote_nonce,
+ subflow->hmac);
+
+ if (skb)
+ subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
+
+ if (!mptcp_finish_join(sk))
+ goto do_reset;
+
+ subflow->conn_finished = 1;
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
+ } else {
+do_reset:
+ tcp_send_active_reset(sk, GFP_ATOMIC);
+ tcp_done(sk);
}
}
@@ -172,6 +321,32 @@ drop:
}
#endif
+/* validate hmac received in third ACK */
+static bool subflow_hmac_valid(const struct request_sock *req,
+ const struct tcp_options_received *rx_opt)
+{
+ const struct mptcp_subflow_request_sock *subflow_req;
+ u8 hmac[MPTCPOPT_HMAC_LEN];
+ struct mptcp_sock *msk;
+ bool ret;
+
+ subflow_req = mptcp_subflow_rsk(req);
+ msk = mptcp_token_get_sock(subflow_req->token);
+ if (!msk)
+ return false;
+
+ subflow_generate_hmac(msk->remote_key, msk->local_key,
+ subflow_req->remote_nonce,
+ subflow_req->local_nonce, hmac);
+
+ ret = true;
+ if (crypto_memneq(hmac, rx_opt->mptcp.hmac, sizeof(hmac)))
+ ret = false;
+
+ sock_put((struct sock *)msk);
+ return ret;
+}
+
static struct sock *subflow_syn_recv_sock(const struct sock *sk,
struct sk_buff *skb,
struct request_sock *req,
@@ -182,6 +357,8 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
struct mptcp_subflow_request_sock *subflow_req;
struct tcp_options_received opt_rx;
+ bool fallback_is_fatal = false;
+ struct sock *new_msk = NULL;
struct sock *child;
pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
@@ -197,7 +374,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
* out-of-order pkt, which will not carry the MP_CAPABLE
* opt even on mptcp enabled paths
*/
- goto create_child;
+ goto create_msk;
}
opt_rx.mptcp.mp_capable = 0;
@@ -207,6 +384,21 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
subflow_req->remote_key_valid = 1;
} else {
subflow_req->mp_capable = 0;
+ goto create_child;
+ }
+
+create_msk:
+ new_msk = mptcp_sk_clone(listener->conn, req);
+ if (!new_msk)
+ subflow_req->mp_capable = 0;
+ } else if (subflow_req->mp_join) {
+ fallback_is_fatal = true;
+ opt_rx.mptcp.mp_join = 0;
+ mptcp_get_options(skb, &opt_rx);
+ if (!opt_rx.mptcp.mp_join ||
+ !subflow_hmac_valid(req, &opt_rx)) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
+ return NULL;
}
}
@@ -217,22 +409,45 @@ create_child:
if (child && *own_req) {
struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
- /* we have null ctx on TCP fallback, not fatal on MPC
- * handshake
+ /* we have null ctx on TCP fallback, which is fatal on
+ * MPJ handshake
*/
- if (!ctx)
- return child;
+ if (!ctx) {
+ if (fallback_is_fatal)
+ goto close_child;
+ goto out;
+ }
if (ctx->mp_capable) {
- if (mptcp_token_new_accept(ctx->token))
+ /* new mpc subflow takes ownership of the newly
+ * created mptcp socket
+ */
+ inet_sk_state_store(new_msk, TCP_ESTABLISHED);
+ mptcp_pm_new_connection(mptcp_sk(new_msk), 1);
+ ctx->conn = new_msk;
+ new_msk = NULL;
+ } else if (ctx->mp_join) {
+ struct mptcp_sock *owner;
+
+ owner = mptcp_token_get_sock(ctx->token);
+ if (!owner)
goto close_child;
+
+ ctx->conn = (struct sock *)owner;
+ if (!mptcp_finish_join(child))
+ goto close_child;
+
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
}
}
+out:
+ /* dispose of the left over mptcp master, if any */
+ if (unlikely(new_msk))
+ sock_put(new_msk);
return child;
close_child:
- pr_debug("closing child socket");
tcp_send_active_reset(child, GFP_ATOMIC);
inet_csk_prepare_forced_close(child);
tcp_done(child);
@@ -338,6 +553,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk)
data_len = mpext->data_len;
if (data_len == 0) {
pr_err("Infinite mapping not handled");
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
return MAPPING_INVALID;
}
@@ -381,8 +597,10 @@ static enum mapping_status get_mapping_status(struct sock *ssk)
/* If this skb data are fully covered by the current mapping,
* the new map would need caching, which is not supported
*/
- if (skb_is_fully_mapped(ssk, skb))
+ if (skb_is_fully_mapped(ssk, skb)) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
return MAPPING_INVALID;
+ }
/* will validate the next map after consuming the current one */
return MAPPING_OK;
@@ -408,6 +626,18 @@ validate_seq:
return MAPPING_OK;
}
+static int subflow_read_actor(read_descriptor_t *desc,
+ struct sk_buff *skb,
+ unsigned int offset, size_t len)
+{
+ size_t copy_len = min(desc->count, len);
+
+ desc->count -= copy_len;
+
+ pr_debug("flushed %zu bytes, %zu left", copy_len, desc->count);
+ return copy_len;
+}
+
static bool subflow_check_data_avail(struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
@@ -420,9 +650,6 @@ static bool subflow_check_data_avail(struct sock *ssk)
if (subflow->data_avail)
return true;
- if (!subflow->conn)
- return false;
-
msk = mptcp_sk(subflow->conn);
for (;;) {
u32 map_remaining;
@@ -482,16 +709,12 @@ static bool subflow_check_data_avail(struct sock *ssk)
pr_debug("discarding %zu bytes, current map len=%d", delta,
map_remaining);
if (delta) {
- struct mptcp_read_arg arg = {
- .msg = NULL,
- };
read_descriptor_t desc = {
.count = delta,
- .arg.data = &arg,
};
int ret;
- ret = tcp_read_sock(ssk, &desc, mptcp_read_actor);
+ ret = tcp_read_sock(ssk, &desc, subflow_read_actor);
if (ret < 0) {
ssk->sk_err = -ret;
goto fatal;
@@ -546,19 +769,15 @@ static void subflow_data_ready(struct sock *sk)
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct sock *parent = subflow->conn;
- if (!parent || !subflow->mp_capable) {
+ if (!subflow->mp_capable && !subflow->mp_join) {
subflow->tcp_data_ready(sk);
- if (parent)
- parent->sk_data_ready(parent);
+ parent->sk_data_ready(parent);
return;
}
- if (mptcp_subflow_data_available(sk)) {
- set_bit(MPTCP_DATA_READY, &mptcp_sk(parent)->flags);
-
- parent->sk_data_ready(parent);
- }
+ if (mptcp_subflow_data_available(sk))
+ mptcp_data_ready(parent, sk);
}
static void subflow_write_space(struct sock *sk)
@@ -567,7 +786,7 @@ static void subflow_write_space(struct sock *sk)
struct sock *parent = subflow->conn;
sk_stream_write_space(sk);
- if (parent && sk_stream_is_writeable(sk)) {
+ if (sk_stream_is_writeable(sk)) {
set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags);
smp_mb__after_atomic();
/* set SEND_SPACE before sk_stream_write_space clears NOSPACE */
@@ -605,6 +824,85 @@ void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
}
#endif
+static void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
+ struct sockaddr_storage *addr)
+{
+ memset(addr, 0, sizeof(*addr));
+ addr->ss_family = info->family;
+ if (addr->ss_family == AF_INET) {
+ struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
+
+ in_addr->sin_addr = info->addr;
+ in_addr->sin_port = info->port;
+ }
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ else if (addr->ss_family == AF_INET6) {
+ struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
+
+ in6_addr->sin6_addr = info->addr6;
+ in6_addr->sin6_port = info->port;
+ }
+#endif
+}
+
+int __mptcp_subflow_connect(struct sock *sk, int ifindex,
+ const struct mptcp_addr_info *loc,
+ const struct mptcp_addr_info *remote)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct mptcp_subflow_context *subflow;
+ struct sockaddr_storage addr;
+ struct socket *sf;
+ u32 remote_token;
+ int addrlen;
+ int err;
+
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -ENOTCONN;
+
+ err = mptcp_subflow_create_socket(sk, &sf);
+ if (err)
+ return err;
+
+ subflow = mptcp_subflow_ctx(sf->sk);
+ subflow->remote_key = msk->remote_key;
+ subflow->local_key = msk->local_key;
+ subflow->token = msk->token;
+ mptcp_info2sockaddr(loc, &addr);
+
+ addrlen = sizeof(struct sockaddr_in);
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ if (loc->family == AF_INET6)
+ addrlen = sizeof(struct sockaddr_in6);
+#endif
+ sf->sk->sk_bound_dev_if = ifindex;
+ err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
+ if (err)
+ goto failed;
+
+ mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
+ pr_debug("msk=%p remote_token=%u", msk, remote_token);
+ subflow->remote_token = remote_token;
+ subflow->local_id = loc->id;
+ subflow->request_join = 1;
+ subflow->request_bkup = 1;
+ mptcp_info2sockaddr(remote, &addr);
+
+ err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
+ if (err && err != -EINPROGRESS)
+ goto failed;
+
+ spin_lock_bh(&msk->join_list_lock);
+ list_add_tail(&subflow->node, &msk->join_list);
+ spin_unlock_bh(&msk->join_list_lock);
+
+ return err;
+
+failed:
+ sock_release(sf);
+ return err;
+}
+
int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
{
struct mptcp_subflow_context *subflow;
@@ -682,7 +980,7 @@ static bool subflow_is_done(const struct sock *sk)
static void subflow_state_change(struct sock *sk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- struct sock *parent = READ_ONCE(subflow->conn);
+ struct sock *parent = subflow->conn;
__subflow_state_change(sk);
@@ -690,13 +988,10 @@ static void subflow_state_change(struct sock *sk)
* a fin packet carrying a DSS can be unnoticed if we don't trigger
* the data available machinery here.
*/
- if (parent && subflow->mp_capable && mptcp_subflow_data_available(sk)) {
- set_bit(MPTCP_DATA_READY, &mptcp_sk(parent)->flags);
-
- parent->sk_data_ready(parent);
- }
+ if (subflow->mp_capable && mptcp_subflow_data_available(sk))
+ mptcp_data_ready(parent, sk);
- if (parent && !(parent->sk_shutdown & RCV_SHUTDOWN) &&
+ if (!(parent->sk_shutdown & RCV_SHUTDOWN) &&
!subflow->rx_eof && subflow_is_done(sk)) {
subflow->rx_eof = 1;
parent->sk_shutdown |= RCV_SHUTDOWN;
@@ -772,7 +1067,8 @@ static void subflow_ulp_clone(const struct request_sock *req,
struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
struct mptcp_subflow_context *new_ctx;
- if (!tcp_rsk(req)->is_mptcp || !subflow_req->mp_capable) {
+ if (!tcp_rsk(req)->is_mptcp ||
+ (!subflow_req->mp_capable && !subflow_req->mp_join)) {
subflow_ulp_fallback(newsk, old_ctx);
return;
}
@@ -783,22 +1079,35 @@ static void subflow_ulp_clone(const struct request_sock *req,
return;
}
- /* see comments in subflow_syn_recv_sock(), MPTCP connection is fully
- * established only after we receive the remote key
- */
new_ctx->conn_finished = 1;
new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
new_ctx->tcp_state_change = old_ctx->tcp_state_change;
new_ctx->tcp_write_space = old_ctx->tcp_write_space;
- new_ctx->mp_capable = 1;
- new_ctx->fourth_ack = subflow_req->remote_key_valid;
- new_ctx->can_ack = subflow_req->remote_key_valid;
- new_ctx->remote_key = subflow_req->remote_key;
- new_ctx->local_key = subflow_req->local_key;
- new_ctx->token = subflow_req->token;
- new_ctx->ssn_offset = subflow_req->ssn_offset;
- new_ctx->idsn = subflow_req->idsn;
+ new_ctx->rel_write_seq = 1;
+ new_ctx->tcp_sock = newsk;
+
+ if (subflow_req->mp_capable) {
+ /* see comments in subflow_syn_recv_sock(), MPTCP connection
+ * is fully established only after we receive the remote key
+ */
+ new_ctx->mp_capable = 1;
+ new_ctx->fully_established = subflow_req->remote_key_valid;
+ new_ctx->can_ack = subflow_req->remote_key_valid;
+ new_ctx->remote_key = subflow_req->remote_key;
+ new_ctx->local_key = subflow_req->local_key;
+ new_ctx->token = subflow_req->token;
+ new_ctx->ssn_offset = subflow_req->ssn_offset;
+ new_ctx->idsn = subflow_req->idsn;
+ } else if (subflow_req->mp_join) {
+ new_ctx->ssn_offset = subflow_req->ssn_offset;
+ new_ctx->mp_join = 1;
+ new_ctx->fully_established = 1;
+ new_ctx->backup = subflow_req->backup;
+ new_ctx->local_id = subflow_req->local_id;
+ new_ctx->token = subflow_req->token;
+ new_ctx->thmac = subflow_req->thmac;
+ }
}
static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
@@ -860,6 +1169,8 @@ void mptcp_subflow_init(void)
subflow_v6m_specific.net_frag_header_len = 0;
#endif
+ mptcp_diag_subflow_init(&subflow_ulp_ops);
+
if (tcp_register_ulp(&subflow_ulp_ops) != 0)
panic("MPTCP: failed to register subflows to ULP\n");
}
diff --git a/net/mptcp/token.c b/net/mptcp/token.c
index 84d887806090..129a5ad1bc35 100644
--- a/net/mptcp/token.c
+++ b/net/mptcp/token.c
@@ -128,43 +128,43 @@ int mptcp_token_new_connect(struct sock *sk)
*
* Called when a SYN packet creates a new logical connection, i.e.
* is not a join request.
- *
- * We don't have an mptcp socket yet at that point.
- * This is paired with mptcp_token_update_accept, called on accept().
*/
-int mptcp_token_new_accept(u32 token)
+int mptcp_token_new_accept(u32 token, struct sock *conn)
{
int err;
spin_lock_bh(&token_tree_lock);
- err = radix_tree_insert(&token_tree, token, &token_used);
+ err = radix_tree_insert(&token_tree, token, conn);
spin_unlock_bh(&token_tree_lock);
return err;
}
/**
- * mptcp_token_update_accept - update token to map to mptcp socket
- * @conn: the new struct mptcp_sock
- * @sk: the initial subflow for this mptcp socket
+ * mptcp_token_get_sock - retrieve mptcp connection sock using its token
+ * @token: token of the mptcp connection to retrieve
+ *
+ * This function returns the mptcp connection structure with the given token.
+ * A reference count on the mptcp socket returned is taken.
*
- * Called when the first mptcp socket is created on accept to
- * refresh the dummy mapping (done to reserve the token) with
- * the mptcp_socket structure that wasn't allocated before.
+ * returns NULL if no connection with the given token value exists.
*/
-void mptcp_token_update_accept(struct sock *sk, struct sock *conn)
+struct mptcp_sock *mptcp_token_get_sock(u32 token)
{
- struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- void __rcu **slot;
+ struct sock *conn;
spin_lock_bh(&token_tree_lock);
- slot = radix_tree_lookup_slot(&token_tree, subflow->token);
- WARN_ON_ONCE(!slot);
- if (slot) {
- WARN_ON_ONCE(rcu_access_pointer(*slot) != &token_used);
- radix_tree_replace_slot(&token_tree, slot, conn);
+ conn = radix_tree_lookup(&token_tree, token);
+ if (conn) {
+ /* token still reserved? */
+ if (conn == (struct sock *)&token_used)
+ conn = NULL;
+ else
+ sock_hold(conn);
}
spin_unlock_bh(&token_tree_lock);
+
+ return mptcp_sk(conn);
}
/**
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 91efae88e8c2..468fea1aebba 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -455,14 +455,6 @@ config NF_TABLES
To compile it as a module, choose M here.
if NF_TABLES
-
-config NF_TABLES_SET
- tristate "Netfilter nf_tables set infrastructure"
- help
- This option enables the nf_tables set infrastructure that allows to
- look up for elements in a set and to build one-way mappings between
- matchings and actions.
-
config NF_TABLES_INET
depends on IPV6
select NF_TABLES_IPV4
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 3f572e5a975e..292e71dc7ba4 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -78,14 +78,17 @@ nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \
nf_tables_trace.o nft_immediate.o nft_cmp.o nft_range.o \
nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \
nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o \
- nft_chain_route.o nf_tables_offload.o
+ nft_chain_route.o nf_tables_offload.o \
+ nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o \
+ nft_set_pipapo.o
-nf_tables_set-objs := nf_tables_set_core.o \
- nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o \
- nft_set_pipapo.o
+ifdef CONFIG_X86_64
+ifneq (,$(findstring -DCONFIG_AS_AVX2=1,$(KBUILD_CFLAGS)))
+nf_tables-objs += nft_set_pipapo_avx2.o
+endif
+endif
obj-$(CONFIG_NF_TABLES) += nf_tables.o
-obj-$(CONFIG_NF_TABLES_SET) += nf_tables_set.o
obj-$(CONFIG_NFT_COMPAT) += nft_compat.o
obj-$(CONFIG_NFT_CONNLIMIT) += nft_connlimit.o
obj-$(CONFIG_NFT_NUMGEN) += nft_numgen.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index 0a2196f59106..486959f70cf3 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -46,7 +46,7 @@ struct bitmap_ip {
u8 netmask; /* subnet netmask */
struct timer_list gc; /* garbage collection */
struct ip_set *set; /* attached to this ip_set */
- unsigned char extensions[0] /* data extensions */
+ unsigned char extensions[] /* data extensions */
__aligned(__alignof__(u64));
};
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 739e343efaf6..2310a316e0af 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -49,7 +49,7 @@ struct bitmap_ipmac {
size_t memsize; /* members size */
struct timer_list gc; /* garbage collector */
struct ip_set *set; /* attached to this ip_set */
- unsigned char extensions[0] /* MAC + data extensions */
+ unsigned char extensions[] /* MAC + data extensions */
__aligned(__alignof__(u64));
};
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index b49978dd810d..e56ced66f202 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -37,7 +37,7 @@ struct bitmap_port {
size_t memsize; /* members size */
struct timer_list gc; /* garbage collection */
struct ip_set *set; /* attached to this ip_set */
- unsigned char extensions[0] /* data extensions */
+ unsigned char extensions[] /* data extensions */
__aligned(__alignof__(u64));
};
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index e52d7b7597a0..1ee43752d6d3 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -76,7 +76,7 @@ struct hbucket {
DECLARE_BITMAP(used, AHASH_MAX_TUNED);
u8 size; /* size of the array */
u8 pos; /* position of the first free entry */
- unsigned char value[0] /* the array of the values */
+ unsigned char value[] /* the array of the values */
__aligned(__alignof__(u64));
};
@@ -109,7 +109,7 @@ struct htable {
u8 htable_bits; /* size of hash table == 2^htable_bits */
u32 maxelem; /* Maxelem per region */
struct ip_set_region *hregion; /* Region locks and ext sizes */
- struct hbucket __rcu *bucket[0]; /* hashtable buckets */
+ struct hbucket __rcu *bucket[]; /* hashtable buckets */
};
#define hbucket(h, i) ((h)->bucket[i])
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 512259f579d7..aa6a603a2425 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1661,8 +1661,9 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
struct ip_vs_protocol *pp;
struct ip_vs_proto_data *pd;
unsigned int offset, offset2, ihl, verdict;
- bool ipip, new_cp = false;
+ bool tunnel, new_cp = false;
union nf_inet_addr *raddr;
+ char *outer_proto = "IPIP";
*related = 1;
@@ -1703,8 +1704,8 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
return NF_ACCEPT; /* The packet looks wrong, ignore */
raddr = (union nf_inet_addr *)&cih->daddr;
- /* Special case for errors for IPIP packets */
- ipip = false;
+ /* Special case for errors for IPIP/UDP/GRE tunnel packets */
+ tunnel = false;
if (cih->protocol == IPPROTO_IPIP) {
struct ip_vs_dest *dest;
@@ -1721,7 +1722,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
if (cih == NULL)
return NF_ACCEPT; /* The packet looks wrong, ignore */
- ipip = true;
+ tunnel = true;
} else if ((cih->protocol == IPPROTO_UDP || /* Can be UDP encap */
cih->protocol == IPPROTO_GRE) && /* Can be GRE encap */
/* Error for our tunnel must arrive at LOCAL_IN */
@@ -1729,16 +1730,19 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
__u8 iproto;
int ulen;
- /* Non-first fragment has no UDP header */
+ /* Non-first fragment has no UDP/GRE header */
if (unlikely(cih->frag_off & htons(IP_OFFSET)))
return NF_ACCEPT;
offset2 = offset + cih->ihl * 4;
- if (cih->protocol == IPPROTO_UDP)
+ if (cih->protocol == IPPROTO_UDP) {
ulen = ipvs_udp_decap(ipvs, skb, offset2, AF_INET,
raddr, &iproto);
- else
+ outer_proto = "UDP";
+ } else {
ulen = ipvs_gre_decap(ipvs, skb, offset2, AF_INET,
raddr, &iproto);
+ outer_proto = "GRE";
+ }
if (ulen > 0) {
/* Skip IP and UDP/GRE tunnel headers */
offset = offset2 + ulen;
@@ -1747,7 +1751,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
&_ciph);
if (cih && cih->version == 4 && cih->ihl >= 5 &&
iproto == IPPROTO_IPIP)
- ipip = true;
+ tunnel = true;
else
return NF_ACCEPT;
}
@@ -1767,11 +1771,11 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
"Checking incoming ICMP for");
offset2 = offset;
- ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, !ipip, &ciph);
+ ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, !tunnel, &ciph);
offset = ciph.len;
/* The embedded headers contain source and dest in reverse order.
- * For IPIP this is error for request, not for reply.
+ * For IPIP/UDP/GRE tunnel this is error for request, not for reply.
*/
cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
ipvs, AF_INET, skb, &ciph);
@@ -1779,7 +1783,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
if (!cp) {
int v;
- if (ipip || !sysctl_schedule_icmp(ipvs))
+ if (tunnel || !sysctl_schedule_icmp(ipvs))
return NF_ACCEPT;
if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
@@ -1797,7 +1801,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
goto out;
}
- if (ipip) {
+ if (tunnel) {
__be32 info = ic->un.gateway;
__u8 type = ic->type;
__u8 code = ic->code;
@@ -1809,17 +1813,18 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
u32 mtu = ntohs(ic->un.frag.mtu);
__be16 frag_off = cih->frag_off;
- /* Strip outer IP and ICMP, go to IPIP header */
+ /* Strip outer IP and ICMP, go to IPIP/UDP/GRE header */
if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL)
- goto ignore_ipip;
+ goto ignore_tunnel;
offset2 -= ihl + sizeof(_icmph);
skb_reset_network_header(skb);
- IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
- &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
+ IP_VS_DBG(12, "ICMP for %s %pI4->%pI4: mtu=%u\n",
+ outer_proto, &ip_hdr(skb)->saddr,
+ &ip_hdr(skb)->daddr, mtu);
ipv4_update_pmtu(skb, ipvs->net, mtu, 0, 0);
/* Client uses PMTUD? */
if (!(frag_off & htons(IP_DF)))
- goto ignore_ipip;
+ goto ignore_tunnel;
/* Prefer the resulting PMTU */
if (dest) {
struct ip_vs_dest_dst *dest_dst;
@@ -1832,11 +1837,11 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
mtu -= sizeof(struct iphdr);
info = htonl(mtu);
}
- /* Strip outer IP, ICMP and IPIP, go to IP header of
+ /* Strip outer IP, ICMP and IPIP/UDP/GRE, go to IP header of
* original request.
*/
if (pskb_pull(skb, offset2) == NULL)
- goto ignore_ipip;
+ goto ignore_tunnel;
skb_reset_network_header(skb);
IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n",
&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
@@ -1845,7 +1850,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
/* ICMP can be shorter but anyways, account it */
ip_vs_out_stats(cp, skb);
-ignore_ipip:
+ignore_tunnel:
consume_skb(skb);
verdict = NF_STOLEN;
goto out;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 1927fc296f95..c4582eb71766 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -143,6 +143,7 @@ static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
}
static void nf_conntrack_all_lock(void)
+ __acquires(&nf_conntrack_locks_all_lock)
{
int i;
@@ -162,6 +163,7 @@ static void nf_conntrack_all_lock(void)
}
static void nf_conntrack_all_unlock(void)
+ __releases(&nf_conntrack_locks_all_lock)
{
/* All prior stores must be complete before we clear
* 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
@@ -863,9 +865,8 @@ out:
}
EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
-static inline void nf_ct_acct_update(struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int len)
+void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets,
+ unsigned int bytes)
{
struct nf_conn_acct *acct;
@@ -873,10 +874,11 @@ static inline void nf_ct_acct_update(struct nf_conn *ct,
if (acct) {
struct nf_conn_counter *counter = acct->counter;
- atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
- atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
+ atomic64_add(packets, &counter[dir].packets);
+ atomic64_add(bytes, &counter[dir].bytes);
}
}
+EXPORT_SYMBOL_GPL(nf_ct_acct_add);
static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
const struct nf_conn *loser_ct)
@@ -890,7 +892,7 @@ static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
/* u32 should be fine since we must have seen one packet. */
bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
- nf_ct_acct_update(ct, ctinfo, bytes);
+ nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), bytes);
}
}
@@ -1931,7 +1933,7 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
WRITE_ONCE(ct->timeout, extra_jiffies);
acct:
if (do_acct)
- nf_ct_acct_update(ct, ctinfo, skb->len);
+ nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
}
EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
@@ -1939,7 +1941,7 @@ bool nf_ct_kill_acct(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
const struct sk_buff *skb)
{
- nf_ct_acct_update(ct, ctinfo, skb->len);
+ nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
return nf_ct_delete(ct, 0, 0);
}
@@ -2633,7 +2635,6 @@ void nf_conntrack_init_end(void)
*/
#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
#define DYING_NULLS_VAL ((1<<30)+1)
-#define TEMPLATE_NULLS_VAL ((1<<30)+2)
int nf_conntrack_init_net(struct net *net)
{
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6a1c8f1f6171..9ddfcd002d3b 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -860,7 +860,7 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
struct ctnetlink_filter *filter;
#ifndef CONFIG_NF_CONNTRACK_MARK
- if (cda[CTA_MARK] && cda[CTA_MARK_MASK])
+ if (cda[CTA_MARK] || cda[CTA_MARK_MASK])
return ERR_PTR(-EOPNOTSUPP);
#endif
@@ -1533,6 +1533,7 @@ static int
ctnetlink_parse_nat_setup(struct nf_conn *ct,
enum nf_nat_manip_type manip,
const struct nlattr *attr)
+ __must_hold(RCU)
{
struct nf_nat_hook *nat_hook;
int err;
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 4912069627b6..9b57330c81f8 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -1054,21 +1054,18 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
nf_conntrack_standalone_init_dccp_sysctl(net, table);
nf_conntrack_standalone_init_gre_sysctl(net, table);
- /* Don't export sysctls to unprivileged users */
+ /* Don't allow unprivileged users to alter certain sysctls */
if (net->user_ns != &init_user_ns) {
- table[NF_SYSCTL_CT_MAX].procname = NULL;
- table[NF_SYSCTL_CT_ACCT].procname = NULL;
- table[NF_SYSCTL_CT_HELPER].procname = NULL;
-#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
- table[NF_SYSCTL_CT_TIMESTAMP].procname = NULL;
-#endif
+ table[NF_SYSCTL_CT_MAX].mode = 0444;
+ table[NF_SYSCTL_CT_EXPECT_MAX].mode = 0444;
+ table[NF_SYSCTL_CT_HELPER].mode = 0444;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- table[NF_SYSCTL_CT_EVENTS].procname = NULL;
+ table[NF_SYSCTL_CT_EVENTS].mode = 0444;
#endif
- }
-
- if (!net_eq(&init_net, net))
table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
+ } else if (!net_eq(&init_net, net)) {
+ table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
+ }
net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table);
if (!net->ct.sysctl_header)
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 70ebebaf5bc1..c0cb79495c35 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -252,6 +252,19 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
}
EXPORT_SYMBOL_GPL(flow_offload_add);
+void flow_offload_refresh(struct nf_flowtable *flow_table,
+ struct flow_offload *flow)
+{
+ flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
+
+ if (likely(!nf_flowtable_hw_offload(flow_table) ||
+ !test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags)))
+ return;
+
+ nf_flow_offload_add(flow_table, flow);
+}
+EXPORT_SYMBOL_GPL(flow_offload_refresh);
+
static inline bool nf_flow_has_expired(const struct flow_offload *flow)
{
return nf_flow_timeout_delta(flow->timeout) <= 0;
@@ -372,6 +385,50 @@ static void nf_flow_offload_work_gc(struct work_struct *work)
queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
}
+int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
+ flow_setup_cb_t *cb, void *cb_priv)
+{
+ struct flow_block *block = &flow_table->flow_block;
+ struct flow_block_cb *block_cb;
+ int err = 0;
+
+ down_write(&flow_table->flow_block_lock);
+ block_cb = flow_block_cb_lookup(block, cb, cb_priv);
+ if (block_cb) {
+ err = -EEXIST;
+ goto unlock;
+ }
+
+ block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
+ if (IS_ERR(block_cb)) {
+ err = PTR_ERR(block_cb);
+ goto unlock;
+ }
+
+ list_add_tail(&block_cb->list, &block->cb_list);
+
+unlock:
+ up_write(&flow_table->flow_block_lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);
+
+void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
+ flow_setup_cb_t *cb, void *cb_priv)
+{
+ struct flow_block *block = &flow_table->flow_block;
+ struct flow_block_cb *block_cb;
+
+ down_write(&flow_table->flow_block_lock);
+ block_cb = flow_block_cb_lookup(block, cb, cb_priv);
+ if (block_cb)
+ list_del(&block_cb->list);
+ else
+ WARN_ON(true);
+ up_write(&flow_table->flow_block_lock);
+}
+EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);
+
static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
__be16 port, __be16 new_port)
{
@@ -494,6 +551,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
flow_block_init(&flowtable->flow_block);
+ init_rwsem(&flowtable->flow_block_lock);
err = rhashtable_init(&flowtable->rhashtable,
&nf_flow_offload_rhash_params);
@@ -550,6 +608,7 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
mutex_lock(&flowtable_lock);
list_del(&flow_table->list);
mutex_unlock(&flowtable_lock);
+
cancel_delayed_work_sync(&flow_table->gc_work);
nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index ba775aecd89a..a3bca758b849 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -12,6 +12,7 @@
#include <net/ip6_route.h>
#include <net/neighbour.h>
#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_conntrack_acct.h>
/* For layer 4 checksum field offset. */
#include <linux/tcp.h>
#include <linux/udp.h>
@@ -235,13 +236,6 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
return NF_STOLEN;
}
-static bool nf_flow_offload_refresh(struct nf_flowtable *flow_table,
- struct flow_offload *flow)
-{
- return nf_flowtable_hw_offload(flow_table) &&
- test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags);
-}
-
unsigned int
nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
@@ -282,8 +276,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
return NF_ACCEPT;
- if (unlikely(nf_flow_offload_refresh(flow_table, flow)))
- nf_flow_offload_add(flow_table, flow);
+ flow_offload_refresh(flow_table, flow);
if (nf_flow_offload_dst_check(&rt->dst)) {
flow_offload_teardown(flow);
@@ -293,11 +286,13 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
return NF_DROP;
- flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
iph = ip_hdr(skb);
ip_decrease_ttl(iph);
skb->tstamp = 0;
+ if (flow_table->flags & NF_FLOWTABLE_COUNTER)
+ nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
+
if (unlikely(dst_xfrm(&rt->dst))) {
memset(skb->cb, 0, sizeof(struct inet_skb_parm));
IPCB(skb)->iif = skb->dev->ifindex;
@@ -514,8 +509,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
sizeof(*ip6h)))
return NF_ACCEPT;
- if (unlikely(nf_flow_offload_refresh(flow_table, flow)))
- nf_flow_offload_add(flow_table, flow);
+ flow_offload_refresh(flow_table, flow);
if (nf_flow_offload_dst_check(&rt->dst)) {
flow_offload_teardown(flow);
@@ -528,11 +522,13 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
if (nf_flow_nat_ipv6(flow, skb, dir) < 0)
return NF_DROP;
- flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
ip6h = ipv6_hdr(skb);
ip6h->hop_limit--;
skb->tstamp = 0;
+ if (flow_table->flags & NF_FLOWTABLE_COUNTER)
+ nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
+
if (unlikely(dst_xfrm(&rt->dst))) {
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
IP6CB(skb)->iif = skb->dev->ifindex;
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index f2c22c682851..e3b099c14eff 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -7,13 +7,13 @@
#include <linux/tc_act/tc_csum.h>
#include <net/flow_offload.h>
#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_tuple.h>
-static struct work_struct nf_flow_offload_work;
-static DEFINE_SPINLOCK(flow_offload_pending_list_lock);
-static LIST_HEAD(flow_offload_pending_list);
+static struct workqueue_struct *nf_flow_offload_wq;
struct flow_offload_work {
struct list_head list;
@@ -21,40 +21,68 @@ struct flow_offload_work {
int priority;
struct nf_flowtable *flowtable;
struct flow_offload *flow;
-};
-
-struct nf_flow_key {
- struct flow_dissector_key_meta meta;
- struct flow_dissector_key_control control;
- struct flow_dissector_key_basic basic;
- union {
- struct flow_dissector_key_ipv4_addrs ipv4;
- struct flow_dissector_key_ipv6_addrs ipv6;
- };
- struct flow_dissector_key_tcp tcp;
- struct flow_dissector_key_ports tp;
-} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
-
-struct nf_flow_match {
- struct flow_dissector dissector;
- struct nf_flow_key key;
- struct nf_flow_key mask;
-};
-
-struct nf_flow_rule {
- struct nf_flow_match match;
- struct flow_rule *rule;
+ struct work_struct work;
};
#define NF_FLOW_DISSECTOR(__match, __type, __field) \
(__match)->dissector.offset[__type] = \
offsetof(struct nf_flow_key, __field)
+static void nf_flow_rule_lwt_match(struct nf_flow_match *match,
+ struct ip_tunnel_info *tun_info)
+{
+ struct nf_flow_key *mask = &match->mask;
+ struct nf_flow_key *key = &match->key;
+ unsigned int enc_keys;
+
+ if (!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX))
+ return;
+
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_CONTROL, enc_control);
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
+ key->enc_key_id.keyid = tunnel_id_to_key32(tun_info->key.tun_id);
+ mask->enc_key_id.keyid = 0xffffffff;
+ enc_keys = BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL);
+
+ if (ip_tunnel_info_af(tun_info) == AF_INET) {
+ NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+ enc_ipv4);
+ key->enc_ipv4.src = tun_info->key.u.ipv4.dst;
+ key->enc_ipv4.dst = tun_info->key.u.ipv4.src;
+ if (key->enc_ipv4.src)
+ mask->enc_ipv4.src = 0xffffffff;
+ if (key->enc_ipv4.dst)
+ mask->enc_ipv4.dst = 0xffffffff;
+ enc_keys |= BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
+ key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ } else {
+ memcpy(&key->enc_ipv6.src, &tun_info->key.u.ipv6.dst,
+ sizeof(struct in6_addr));
+ memcpy(&key->enc_ipv6.dst, &tun_info->key.u.ipv6.src,
+ sizeof(struct in6_addr));
+ if (memcmp(&key->enc_ipv6.src, &in6addr_any,
+ sizeof(struct in6_addr)))
+ memset(&key->enc_ipv6.src, 0xff,
+ sizeof(struct in6_addr));
+ if (memcmp(&key->enc_ipv6.dst, &in6addr_any,
+ sizeof(struct in6_addr)))
+ memset(&key->enc_ipv6.dst, 0xff,
+ sizeof(struct in6_addr));
+ enc_keys |= BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS);
+ key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ }
+
+ match->dissector.used_keys |= enc_keys;
+}
+
static int nf_flow_rule_match(struct nf_flow_match *match,
- const struct flow_offload_tuple *tuple)
+ const struct flow_offload_tuple *tuple,
+ struct dst_entry *other_dst)
{
struct nf_flow_key *mask = &match->mask;
struct nf_flow_key *key = &match->key;
+ struct ip_tunnel_info *tun_info;
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_META, meta);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
@@ -64,6 +92,11 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
+ if (other_dst && other_dst->lwtstate) {
+ tun_info = lwt_tun_info(other_dst->lwtstate);
+ nf_flow_rule_lwt_match(match, tun_info);
+ }
+
key->meta.ingress_ifindex = tuple->iifidx;
mask->meta.ingress_ifindex = 0xffffffff;
@@ -443,10 +476,52 @@ static void flow_offload_redirect(const struct flow_offload *flow,
dev_hold(rt->dst.dev);
}
+static void flow_offload_encap_tunnel(const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ struct flow_action_entry *entry;
+ struct dst_entry *dst;
+
+ dst = flow->tuplehash[dir].tuple.dst_cache;
+ if (dst && dst->lwtstate) {
+ struct ip_tunnel_info *tun_info;
+
+ tun_info = lwt_tun_info(dst->lwtstate);
+ if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX)) {
+ entry = flow_action_entry_next(flow_rule);
+ entry->id = FLOW_ACTION_TUNNEL_ENCAP;
+ entry->tunnel = tun_info;
+ }
+ }
+}
+
+static void flow_offload_decap_tunnel(const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+{
+ struct flow_action_entry *entry;
+ struct dst_entry *dst;
+
+ dst = flow->tuplehash[!dir].tuple.dst_cache;
+ if (dst && dst->lwtstate) {
+ struct ip_tunnel_info *tun_info;
+
+ tun_info = lwt_tun_info(dst->lwtstate);
+ if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX)) {
+ entry = flow_action_entry_next(flow_rule);
+ entry->id = FLOW_ACTION_TUNNEL_DECAP;
+ }
+ }
+}
+
int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
+ flow_offload_decap_tunnel(flow, dir, flow_rule);
+ flow_offload_encap_tunnel(flow, dir, flow_rule);
+
if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
return -1;
@@ -473,6 +548,9 @@ int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule)
{
+ flow_offload_decap_tunnel(flow, dir, flow_rule);
+ flow_offload_encap_tunnel(flow, dir, flow_rule);
+
if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
return -1;
@@ -503,6 +581,7 @@ nf_flow_offload_rule_alloc(struct net *net,
const struct flow_offload *flow = offload->flow;
const struct flow_offload_tuple *tuple;
struct nf_flow_rule *flow_rule;
+ struct dst_entry *other_dst;
int err = -ENOMEM;
flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
@@ -518,7 +597,8 @@ nf_flow_offload_rule_alloc(struct net *net,
flow_rule->rule->match.key = &flow_rule->match.key;
tuple = &flow->tuplehash[dir].tuple;
- err = nf_flow_rule_match(&flow_rule->match, tuple);
+ other_dst = flow->tuplehash[!dir].tuple.dst_cache;
+ err = nf_flow_rule_match(&flow_rule->match, tuple, other_dst);
if (err < 0)
goto err_flow_match;
@@ -598,6 +678,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
struct nf_flow_rule *flow_rule,
enum flow_offload_tuple_dir dir,
int priority, int cmd,
+ struct flow_stats *stats,
struct list_head *block_cb_list)
{
struct flow_cls_offload cls_flow = {};
@@ -611,6 +692,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
if (cmd == FLOW_CLS_REPLACE)
cls_flow.rule = flow_rule->rule;
+ down_read(&flowtable->flow_block_lock);
list_for_each_entry(block_cb, block_cb_list, list) {
err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
block_cb->cb_priv);
@@ -619,6 +701,10 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
i++;
}
+ up_read(&flowtable->flow_block_lock);
+
+ if (cmd == FLOW_CLS_STATS)
+ memcpy(stats, &cls_flow.stats, sizeof(*stats));
return i;
}
@@ -629,7 +715,7 @@ static int flow_offload_tuple_add(struct flow_offload_work *offload,
{
return nf_flow_offload_tuple(offload->flowtable, offload->flow,
flow_rule, dir, offload->priority,
- FLOW_CLS_REPLACE,
+ FLOW_CLS_REPLACE, NULL,
&offload->flowtable->flow_block.cb_list);
}
@@ -637,7 +723,7 @@ static void flow_offload_tuple_del(struct flow_offload_work *offload,
enum flow_offload_tuple_dir dir)
{
nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
- offload->priority, FLOW_CLS_DESTROY,
+ offload->priority, FLOW_CLS_DESTROY, NULL,
&offload->flowtable->flow_block.cb_list);
}
@@ -683,19 +769,9 @@ static void flow_offload_tuple_stats(struct flow_offload_work *offload,
enum flow_offload_tuple_dir dir,
struct flow_stats *stats)
{
- struct nf_flowtable *flowtable = offload->flowtable;
- struct flow_cls_offload cls_flow = {};
- struct flow_block_cb *block_cb;
- struct netlink_ext_ack extack;
- __be16 proto = ETH_P_ALL;
-
- nf_flow_offload_init(&cls_flow, proto, offload->priority,
- FLOW_CLS_STATS,
- &offload->flow->tuplehash[dir].tuple, &extack);
-
- list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
- block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv);
- memcpy(stats, &cls_flow.stats, sizeof(*stats));
+ nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
+ offload->priority, FLOW_CLS_STATS, stats,
+ &offload->flowtable->flow_block.cb_list);
}
static void flow_offload_work_stats(struct flow_offload_work *offload)
@@ -709,19 +785,25 @@ static void flow_offload_work_stats(struct flow_offload_work *offload)
lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
offload->flow->timeout = max_t(u64, offload->flow->timeout,
lastused + NF_FLOW_TIMEOUT);
+
+ if (offload->flowtable->flags & NF_FLOWTABLE_COUNTER) {
+ if (stats[0].pkts)
+ nf_ct_acct_add(offload->flow->ct,
+ FLOW_OFFLOAD_DIR_ORIGINAL,
+ stats[0].pkts, stats[0].bytes);
+ if (stats[1].pkts)
+ nf_ct_acct_add(offload->flow->ct,
+ FLOW_OFFLOAD_DIR_REPLY,
+ stats[1].pkts, stats[1].bytes);
+ }
}
static void flow_offload_work_handler(struct work_struct *work)
{
- struct flow_offload_work *offload, *next;
- LIST_HEAD(offload_pending_list);
-
- spin_lock_bh(&flow_offload_pending_list_lock);
- list_replace_init(&flow_offload_pending_list, &offload_pending_list);
- spin_unlock_bh(&flow_offload_pending_list_lock);
+ struct flow_offload_work *offload;
- list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
- switch (offload->cmd) {
+ offload = container_of(work, struct flow_offload_work, work);
+ switch (offload->cmd) {
case FLOW_CLS_REPLACE:
flow_offload_work_add(offload);
break;
@@ -733,19 +815,14 @@ static void flow_offload_work_handler(struct work_struct *work)
break;
default:
WARN_ON_ONCE(1);
- }
- list_del(&offload->list);
- kfree(offload);
}
+
+ kfree(offload);
}
static void flow_offload_queue_work(struct flow_offload_work *offload)
{
- spin_lock_bh(&flow_offload_pending_list_lock);
- list_add_tail(&offload->list, &flow_offload_pending_list);
- spin_unlock_bh(&flow_offload_pending_list_lock);
-
- schedule_work(&nf_flow_offload_work);
+ queue_work(nf_flow_offload_wq, &offload->work);
}
static struct flow_offload_work *
@@ -762,6 +839,7 @@ nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
offload->flow = flow;
offload->priority = flowtable->priority;
offload->flowtable = flowtable;
+ INIT_WORK(&offload->work, flow_offload_work_handler);
return offload;
}
@@ -812,7 +890,7 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
{
if (nf_flowtable_hw_offload(flowtable))
- flush_work(&nf_flow_offload_work);
+ flush_workqueue(nf_flow_offload_wq);
}
static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
@@ -840,25 +918,47 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
return err;
}
-static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
- struct nf_flowtable *flowtable,
- struct net_device *dev,
- enum flow_block_command cmd,
- struct netlink_ext_ack *extack)
+static void nf_flow_table_block_offload_init(struct flow_block_offload *bo,
+ struct net *net,
+ enum flow_block_command cmd,
+ struct nf_flowtable *flowtable,
+ struct netlink_ext_ack *extack)
{
- int err;
-
- if (!dev->netdev_ops->ndo_setup_tc)
- return -EOPNOTSUPP;
-
memset(bo, 0, sizeof(*bo));
- bo->net = dev_net(dev);
+ bo->net = net;
bo->block = &flowtable->flow_block;
bo->command = cmd;
bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
bo->extack = extack;
INIT_LIST_HEAD(&bo->cb_list);
+}
+
+static int nf_flow_table_indr_offload_cmd(struct flow_block_offload *bo,
+ struct nf_flowtable *flowtable,
+ struct net_device *dev,
+ enum flow_block_command cmd,
+ struct netlink_ext_ack *extack)
+{
+ nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
+ extack);
+ flow_indr_block_call(dev, bo, cmd, TC_SETUP_FT);
+
+ if (list_empty(&bo->cb_list))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
+ struct nf_flowtable *flowtable,
+ struct net_device *dev,
+ enum flow_block_command cmd,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
+ extack);
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo);
if (err < 0)
return err;
@@ -877,7 +977,12 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
if (!nf_flowtable_hw_offload(flowtable))
return 0;
- err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd, &extack);
+ if (dev->netdev_ops->ndo_setup_tc)
+ err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd,
+ &extack);
+ else
+ err = nf_flow_table_indr_offload_cmd(&bo, flowtable, dev, cmd,
+ &extack);
if (err < 0)
return err;
@@ -885,22 +990,83 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
}
EXPORT_SYMBOL_GPL(nf_flow_table_offload_setup);
-int nf_flow_table_offload_init(void)
+static void nf_flow_table_indr_block_ing_cmd(struct net_device *dev,
+ struct nf_flowtable *flowtable,
+ flow_indr_block_bind_cb_t *cb,
+ void *cb_priv,
+ enum flow_block_command cmd)
{
- INIT_WORK(&nf_flow_offload_work, flow_offload_work_handler);
+ struct netlink_ext_ack extack = {};
+ struct flow_block_offload bo;
- return 0;
+ if (!flowtable)
+ return;
+
+ nf_flow_table_block_offload_init(&bo, dev_net(dev), cmd, flowtable,
+ &extack);
+
+ cb(dev, cb_priv, TC_SETUP_FT, &bo);
+
+ nf_flow_table_block_setup(flowtable, &bo, cmd);
}
-void nf_flow_table_offload_exit(void)
+static void nf_flow_table_indr_block_cb_cmd(struct nf_flowtable *flowtable,
+ struct net_device *dev,
+ flow_indr_block_bind_cb_t *cb,
+ void *cb_priv,
+ enum flow_block_command cmd)
{
- struct flow_offload_work *offload, *next;
- LIST_HEAD(offload_pending_list);
+ if (!(flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD))
+ return;
- cancel_work_sync(&nf_flow_offload_work);
+ nf_flow_table_indr_block_ing_cmd(dev, flowtable, cb, cb_priv, cmd);
+}
- list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
- list_del(&offload->list);
- kfree(offload);
+static void nf_flow_table_indr_block_cb(struct net_device *dev,
+ flow_indr_block_bind_cb_t *cb,
+ void *cb_priv,
+ enum flow_block_command cmd)
+{
+ struct net *net = dev_net(dev);
+ struct nft_flowtable *nft_ft;
+ struct nft_table *table;
+ struct nft_hook *hook;
+
+ mutex_lock(&net->nft.commit_mutex);
+ list_for_each_entry(table, &net->nft.tables, list) {
+ list_for_each_entry(nft_ft, &table->flowtables, list) {
+ list_for_each_entry(hook, &nft_ft->hook_list, list) {
+ if (hook->ops.dev != dev)
+ continue;
+
+ nf_flow_table_indr_block_cb_cmd(&nft_ft->data,
+ dev, cb,
+ cb_priv, cmd);
+ }
+ }
}
+ mutex_unlock(&net->nft.commit_mutex);
+}
+
+static struct flow_indr_block_entry block_ing_entry = {
+ .cb = nf_flow_table_indr_block_cb,
+ .list = LIST_HEAD_INIT(block_ing_entry.list),
+};
+
+int nf_flow_table_offload_init(void)
+{
+ nf_flow_offload_wq = alloc_workqueue("nf_flow_table_offload",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+ if (!nf_flow_offload_wq)
+ return -ENOMEM;
+
+ flow_indr_add_block_cb(&block_ing_entry);
+
+ return 0;
+}
+
+void nf_flow_table_offload_exit(void)
+{
+ flow_indr_del_block_cb(&block_ing_entry);
+ destroy_workqueue(nf_flow_offload_wq);
}
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index f8f52ff99cfb..bbd1209694b8 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -46,25 +46,7 @@ void nf_unregister_queue_handler(struct net *net)
}
EXPORT_SYMBOL(nf_unregister_queue_handler);
-static void nf_queue_entry_release_br_nf_refs(struct sk_buff *skb)
-{
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
-
- if (nf_bridge) {
- struct net_device *physdev;
-
- physdev = nf_bridge_get_physindev(skb);
- if (physdev)
- dev_put(physdev);
- physdev = nf_bridge_get_physoutdev(skb);
- if (physdev)
- dev_put(physdev);
- }
-#endif
-}
-
-void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
+static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
{
struct nf_hook_state *state = &entry->state;
@@ -76,24 +58,34 @@ void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
if (state->sk)
sock_put(state->sk);
- nf_queue_entry_release_br_nf_refs(entry->skb);
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ if (entry->physin)
+ dev_put(entry->physin);
+ if (entry->physout)
+ dev_put(entry->physout);
+#endif
+}
+
+void nf_queue_entry_free(struct nf_queue_entry *entry)
+{
+ nf_queue_entry_release_refs(entry);
+ kfree(entry);
}
-EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
+EXPORT_SYMBOL_GPL(nf_queue_entry_free);
-static void nf_queue_entry_get_br_nf_refs(struct sk_buff *skb)
+static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry)
{
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ const struct sk_buff *skb = entry->skb;
+ struct nf_bridge_info *nf_bridge;
+ nf_bridge = nf_bridge_info_get(skb);
if (nf_bridge) {
- struct net_device *physdev;
-
- physdev = nf_bridge_get_physindev(skb);
- if (physdev)
- dev_hold(physdev);
- physdev = nf_bridge_get_physoutdev(skb);
- if (physdev)
- dev_hold(physdev);
+ entry->physin = nf_bridge_get_physindev(skb);
+ entry->physout = nf_bridge_get_physoutdev(skb);
+ } else {
+ entry->physin = NULL;
+ entry->physout = NULL;
}
#endif
}
@@ -110,7 +102,12 @@ void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
if (state->sk)
sock_hold(state->sk);
- nf_queue_entry_get_br_nf_refs(entry->skb);
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ if (entry->physin)
+ dev_hold(entry->physin);
+ if (entry->physout)
+ dev_hold(entry->physout);
+#endif
}
EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
@@ -158,18 +155,16 @@ static void nf_ip6_saveroute(const struct sk_buff *skb,
static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
unsigned int index, unsigned int queuenum)
{
- int status = -ENOENT;
struct nf_queue_entry *entry = NULL;
const struct nf_queue_handler *qh;
struct net *net = state->net;
unsigned int route_key_size;
+ int status;
/* QUEUE == DROP if no one is waiting, to be safe. */
qh = rcu_dereference(net->nf.queue_handler);
- if (!qh) {
- status = -ESRCH;
- goto err;
- }
+ if (!qh)
+ return -ESRCH;
switch (state->pf) {
case AF_INET:
@@ -184,14 +179,12 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
}
entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
- if (!entry) {
- status = -ENOMEM;
- goto err;
- }
+ if (!entry)
+ return -ENOMEM;
if (skb_dst(skb) && !skb_dst_force(skb)) {
- status = -ENETDOWN;
- goto err;
+ kfree(entry);
+ return -ENETDOWN;
}
*entry = (struct nf_queue_entry) {
@@ -201,6 +194,8 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
.size = sizeof(*entry) + route_key_size,
};
+ __nf_queue_entry_init_physdevs(entry);
+
nf_queue_entry_get_refs(entry);
switch (entry->state.pf) {
@@ -213,17 +208,12 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
}
status = qh->outfn(entry, queuenum);
-
if (status < 0) {
- nf_queue_entry_release_refs(entry);
- goto err;
+ nf_queue_entry_free(entry);
+ return status;
}
return 0;
-
-err:
- kfree(entry);
- return status;
}
/* Packets leaving via this function must come back through nf_reinject(). */
@@ -304,12 +294,10 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
hooks = nf_hook_entries_head(net, pf, entry->state.hook);
- nf_queue_entry_release_refs(entry);
-
i = entry->hook_index;
if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
kfree_skb(skb);
- kfree(entry);
+ nf_queue_entry_free(entry);
return;
}
@@ -348,6 +336,6 @@ next_hook:
kfree_skb(skb);
}
- kfree(entry);
+ nf_queue_entry_free(entry);
}
EXPORT_SYMBOL(nf_reinject);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index d11f1a74d43c..d0ab5ffa1e2c 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -520,7 +520,8 @@ static struct nft_table *nft_table_lookup(const struct net *net,
if (nla == NULL)
return ERR_PTR(-EINVAL);
- list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ list_for_each_entry_rcu(table, &net->nft.tables, list,
+ lockdep_is_held(&net->nft.commit_mutex)) {
if (!nla_strcmp(nla, table->name) &&
table->family == family &&
nft_active_genmask(table, genmask))
@@ -2523,8 +2524,8 @@ static void nf_tables_expr_destroy(const struct nft_ctx *ctx,
module_put(type->owner);
}
-struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
- const struct nlattr *nla)
+static struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
+ const struct nlattr *nla)
{
struct nft_expr_info info;
struct nft_expr *expr;
@@ -2557,6 +2558,24 @@ err1:
return ERR_PTR(err);
}
+int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
+{
+ int err;
+
+ if (src->ops->clone) {
+ dst->ops = src->ops;
+ err = src->ops->clone(dst, src);
+ if (err < 0)
+ return err;
+ } else {
+ memcpy(dst, src, src->ops->size);
+ }
+
+ __module_get(src->ops->type->owner);
+
+ return 0;
+}
+
void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr)
{
nf_tables_expr_destroy(ctx, expr);
@@ -3266,25 +3285,17 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk,
/*
* Sets
*/
-
-static LIST_HEAD(nf_tables_set_types);
-
-int nft_register_set(struct nft_set_type *type)
-{
- nfnl_lock(NFNL_SUBSYS_NFTABLES);
- list_add_tail_rcu(&type->list, &nf_tables_set_types);
- nfnl_unlock(NFNL_SUBSYS_NFTABLES);
- return 0;
-}
-EXPORT_SYMBOL_GPL(nft_register_set);
-
-void nft_unregister_set(struct nft_set_type *type)
-{
- nfnl_lock(NFNL_SUBSYS_NFTABLES);
- list_del_rcu(&type->list);
- nfnl_unlock(NFNL_SUBSYS_NFTABLES);
-}
-EXPORT_SYMBOL_GPL(nft_unregister_set);
+static const struct nft_set_type *nft_set_types[] = {
+ &nft_set_hash_fast_type,
+ &nft_set_hash_type,
+ &nft_set_rhash_type,
+ &nft_set_bitmap_type,
+ &nft_set_rbtree_type,
+#if defined(CONFIG_X86_64) && defined(CONFIG_AS_AVX2)
+ &nft_set_pipapo_avx2_type,
+#endif
+ &nft_set_pipapo_type,
+};
#define NFT_SET_FEATURES (NFT_SET_INTERVAL | NFT_SET_MAP | \
NFT_SET_TIMEOUT | NFT_SET_OBJECT | \
@@ -3310,15 +3321,11 @@ nft_select_set_ops(const struct nft_ctx *ctx,
struct nft_set_estimate est, best;
const struct nft_set_type *type;
u32 flags = 0;
+ int i;
lockdep_assert_held(&ctx->net->nft.commit_mutex);
lockdep_nfnl_nft_mutex_not_held();
-#ifdef CONFIG_MODULES
- if (list_empty(&nf_tables_set_types)) {
- if (nft_request_module(ctx->net, "nft-set") == -EAGAIN)
- return ERR_PTR(-EAGAIN);
- }
-#endif
+
if (nla[NFTA_SET_FLAGS] != NULL)
flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
@@ -3327,7 +3334,8 @@ nft_select_set_ops(const struct nft_ctx *ctx,
best.lookup = ~0;
best.space = ~0;
- list_for_each_entry(type, &nf_tables_set_types, list) {
+ for (i = 0; i < ARRAY_SIZE(nft_set_types); i++) {
+ type = nft_set_types[i];
ops = &type->ops;
if (!nft_set_ops_candidate(type, flags))
@@ -3358,11 +3366,6 @@ nft_select_set_ops(const struct nft_ctx *ctx,
break;
}
- if (!try_module_get(type->owner))
- continue;
- if (bops != NULL)
- module_put(to_set_type(bops)->owner);
-
bops = ops;
best = est;
}
@@ -3392,6 +3395,7 @@ static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
.len = NFT_USERDATA_MAXLEN },
[NFTA_SET_OBJ_TYPE] = { .type = NLA_U32 },
[NFTA_SET_HANDLE] = { .type = NLA_U64 },
+ [NFTA_SET_EXPR] = { .type = NLA_NESTED },
};
static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
@@ -3595,8 +3599,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
{
struct nfgenmsg *nfmsg;
struct nlmsghdr *nlh;
- struct nlattr *desc;
u32 portid = ctx->portid;
+ struct nlattr *nest;
u32 seq = ctx->seq;
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
@@ -3652,9 +3656,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
goto nla_put_failure;
- desc = nla_nest_start_noflag(skb, NFTA_SET_DESC);
-
- if (desc == NULL)
+ nest = nla_nest_start_noflag(skb, NFTA_SET_DESC);
+ if (!nest)
goto nla_put_failure;
if (set->size &&
nla_put_be32(skb, NFTA_SET_DESC_SIZE, htonl(set->size)))
@@ -3664,7 +3667,15 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
nf_tables_fill_set_concat(skb, set))
goto nla_put_failure;
- nla_nest_end(skb, desc);
+ nla_nest_end(skb, nest);
+
+ if (set->expr) {
+ nest = nla_nest_start_noflag(skb, NFTA_SET_EXPR);
+ if (nf_tables_fill_expr_info(skb, set->expr) < 0)
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+ }
nlmsg_end(skb, nlh);
return 0;
@@ -3911,6 +3922,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
u8 genmask = nft_genmask_next(net);
int family = nfmsg->nfgen_family;
const struct nft_set_ops *ops;
+ struct nft_expr *expr = NULL;
struct nft_table *table;
struct nft_set *set;
struct nft_ctx ctx;
@@ -4020,6 +4032,9 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
return err;
}
+ if (nla[NFTA_SET_EXPR])
+ desc.expr = true;
+
table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family, genmask);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_SET_TABLE]);
@@ -4061,21 +4076,27 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
size = ops->privsize(nla, &desc);
set = kvzalloc(sizeof(*set) + size + udlen, GFP_KERNEL);
- if (!set) {
- err = -ENOMEM;
- goto err1;
- }
+ if (!set)
+ return -ENOMEM;
name = nla_strdup(nla[NFTA_SET_NAME], GFP_KERNEL);
if (!name) {
err = -ENOMEM;
- goto err2;
+ goto err_set_name;
}
err = nf_tables_set_alloc_name(&ctx, set, name);
kfree(name);
if (err < 0)
- goto err2;
+ goto err_set_alloc_name;
+
+ if (nla[NFTA_SET_EXPR]) {
+ expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]);
+ if (IS_ERR(expr)) {
+ err = PTR_ERR(expr);
+ goto err_set_alloc_name;
+ }
+ }
udata = NULL;
if (udlen) {
@@ -4092,6 +4113,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
set->dtype = dtype;
set->objtype = objtype;
set->dlen = desc.dlen;
+ set->expr = expr;
set->flags = flags;
set->size = desc.size;
set->policy = policy;
@@ -4107,34 +4129,37 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
err = ops->init(set, &desc, nla);
if (err < 0)
- goto err3;
+ goto err_set_init;
err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
if (err < 0)
- goto err4;
+ goto err_set_trans;
list_add_tail_rcu(&set->list, &table->sets);
table->use++;
return 0;
-err4:
+err_set_trans:
ops->destroy(set);
-err3:
+err_set_init:
+ if (expr)
+ nft_expr_destroy(&ctx, expr);
+err_set_alloc_name:
kfree(set->name);
-err2:
+err_set_name:
kvfree(set);
-err1:
- module_put(to_set_type(ops)->owner);
return err;
}
-static void nft_set_destroy(struct nft_set *set)
+static void nft_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
{
if (WARN_ON(set->use > 0))
return;
+ if (set->expr)
+ nft_expr_destroy(ctx, set->expr);
+
set->ops->destroy(set);
- module_put(to_set_type(set->ops)->owner);
kfree(set->name);
kvfree(set);
}
@@ -4274,7 +4299,7 @@ EXPORT_SYMBOL_GPL(nf_tables_deactivate_set);
void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
{
if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
- nft_set_destroy(set);
+ nft_set_destroy(ctx, set);
}
EXPORT_SYMBOL_GPL(nf_tables_destroy_set);
@@ -4312,7 +4337,6 @@ const struct nft_set_ext_type nft_set_ext_types[] = {
.align = __alignof__(u32),
},
};
-EXPORT_SYMBOL_GPL(nft_set_ext_types);
/*
* Set elements
@@ -4801,6 +4825,36 @@ static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx,
return trans;
}
+struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx,
+ const struct nft_set *set,
+ const struct nlattr *attr)
+{
+ struct nft_expr *expr;
+ int err;
+
+ expr = nft_expr_init(ctx, attr);
+ if (IS_ERR(expr))
+ return expr;
+
+ err = -EOPNOTSUPP;
+ if (!(expr->ops->type->flags & NFT_EXPR_STATEFUL))
+ goto err_set_elem_expr;
+
+ if (expr->ops->type->flags & NFT_EXPR_GC) {
+ if (set->flags & NFT_SET_TIMEOUT)
+ goto err_set_elem_expr;
+ if (!set->ops->gc_init)
+ goto err_set_elem_expr;
+ set->ops->gc_init(set);
+ }
+
+ return expr;
+
+err_set_elem_expr:
+ nft_expr_destroy(ctx, expr);
+ return ERR_PTR(err);
+}
+
void *nft_set_elem_init(const struct nft_set *set,
const struct nft_set_ext_tmpl *tmpl,
const u32 *key, const u32 *key_end,
@@ -4832,6 +4886,17 @@ void *nft_set_elem_init(const struct nft_set *set,
return elem;
}
+static void nft_set_elem_expr_destroy(const struct nft_ctx *ctx,
+ struct nft_expr *expr)
+{
+ if (expr->ops->destroy_clone) {
+ expr->ops->destroy_clone(ctx, expr);
+ module_put(expr->ops->type->owner);
+ } else {
+ nf_tables_expr_destroy(ctx, expr);
+ }
+}
+
void nft_set_elem_destroy(const struct nft_set *set, void *elem,
bool destroy_expr)
{
@@ -4844,16 +4909,9 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
nft_data_release(nft_set_ext_key(ext), NFT_DATA_VALUE);
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
nft_data_release(nft_set_ext_data(ext), set->dtype);
- if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) {
- struct nft_expr *expr = nft_set_ext_expr(ext);
+ if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
+ nft_set_elem_expr_destroy(&ctx, nft_set_ext_expr(ext));
- if (expr->ops->destroy_clone) {
- expr->ops->destroy_clone(&ctx, expr);
- module_put(expr->ops->type->owner);
- } else {
- nf_tables_expr_destroy(&ctx, expr);
- }
- }
if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
(*nft_set_ext_obj(ext))->use--;
kfree(elem);
@@ -4869,7 +4927,8 @@ static void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
- nf_tables_expr_destroy(ctx, nft_set_ext_expr(ext));
+ nft_set_elem_expr_destroy(ctx, nft_set_ext_expr(ext));
+
kfree(elem);
}
@@ -4883,6 +4942,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_elem elem;
struct nft_set_binding *binding;
struct nft_object *obj = NULL;
+ struct nft_expr *expr = NULL;
struct nft_userdata *udata;
struct nft_data_desc desc;
struct nft_data data;
@@ -4950,10 +5010,29 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
return err;
}
+ if (nla[NFTA_SET_ELEM_EXPR] != NULL) {
+ expr = nft_set_elem_expr_alloc(ctx, set,
+ nla[NFTA_SET_ELEM_EXPR]);
+ if (IS_ERR(expr))
+ return PTR_ERR(expr);
+
+ err = -EOPNOTSUPP;
+ if (set->expr && set->expr->ops != expr->ops)
+ goto err_set_elem_expr;
+ } else if (set->expr) {
+ expr = kzalloc(set->expr->ops->size, GFP_KERNEL);
+ if (!expr)
+ return -ENOMEM;
+
+ err = nft_expr_clone(expr, set->expr);
+ if (err < 0)
+ goto err_set_elem_expr;
+ }
+
err = nft_setelem_parse_key(ctx, set, &elem.key.val,
nla[NFTA_SET_ELEM_KEY]);
if (err < 0)
- return err;
+ goto err_set_elem_expr;
nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
@@ -4972,6 +5051,10 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
}
+ if (expr)
+ nft_set_ext_add_length(&tmpl, NFT_SET_EXT_EXPR,
+ expr->ops->size);
+
if (nla[NFTA_SET_ELEM_OBJREF] != NULL) {
if (!(set->flags & NFT_SET_OBJECT)) {
err = -EINVAL;
@@ -5056,6 +5139,11 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
*nft_set_ext_obj(ext) = obj;
obj->use++;
}
+ if (expr) {
+ memcpy(nft_set_ext_expr(ext), expr, expr->ops->size);
+ kfree(expr);
+ expr = NULL;
+ }
trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
if (trans == NULL)
@@ -5108,7 +5196,8 @@ err_element_clash:
err_trans:
if (obj)
obj->use--;
- kfree(elem.priv);
+
+ nf_tables_set_elem_destroy(ctx, set, elem.priv);
err_parse_data:
if (nla[NFTA_SET_ELEM_DATA] != NULL)
nft_data_release(&data, desc.type);
@@ -5116,6 +5205,9 @@ err_parse_key_end:
nft_data_release(&elem.key_end.val, NFT_DATA_VALUE);
err_parse_key:
nft_data_release(&elem.key.val, NFT_DATA_VALUE);
+err_set_elem_expr:
+ if (expr != NULL)
+ nft_expr_destroy(ctx, expr);
return err;
}
@@ -5370,7 +5462,6 @@ void nft_set_gc_batch_release(struct rcu_head *rcu)
nft_set_elem_destroy(gcb->head.set, gcb->elems[i], true);
kfree(gcb);
}
-EXPORT_SYMBOL_GPL(nft_set_gc_batch_release);
struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
gfp_t gfp)
@@ -5383,7 +5474,6 @@ struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
gcb->head.set = set;
return gcb;
}
-EXPORT_SYMBOL_GPL(nft_set_gc_batch_alloc);
/*
* Stateful objects
@@ -6294,7 +6384,7 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
if (nla[NFTA_FLOWTABLE_FLAGS]) {
flowtable->data.flags =
ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
- if (flowtable->data.flags & ~NF_FLOWTABLE_HW_OFFLOAD)
+ if (flowtable->data.flags & ~NFT_FLOWTABLE_MASK)
goto err3;
}
@@ -6982,7 +7072,7 @@ static void nft_commit_release(struct nft_trans *trans)
nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
break;
case NFT_MSG_DELSET:
- nft_set_destroy(nft_trans_set(trans));
+ nft_set_destroy(&trans->ctx, nft_trans_set(trans));
break;
case NFT_MSG_DELSETELEM:
nf_tables_set_elem_destroy(&trans->ctx,
@@ -7413,7 +7503,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
break;
case NFT_MSG_NEWSET:
- nft_set_destroy(nft_trans_set(trans));
+ nft_set_destroy(&trans->ctx, nft_trans_set(trans));
break;
case NFT_MSG_NEWSETELEM:
nft_set_elem_destroy(nft_trans_elem_set(trans),
@@ -8139,7 +8229,7 @@ static void __nft_release_tables(struct net *net)
list_for_each_entry_safe(set, ns, &table->sets, list) {
list_del(&set->list);
table->use--;
- nft_set_destroy(set);
+ nft_set_destroy(&ctx, set);
}
list_for_each_entry_safe(obj, ne, &table->objects, list) {
nft_obj_del(obj);
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index 2bb28483af22..954bccb7f32a 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -313,7 +313,7 @@ static int nft_indr_block_offload_cmd(struct nft_base_chain *chain,
nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack);
- flow_indr_block_call(dev, &bo, cmd);
+ flow_indr_block_call(dev, &bo, cmd, TC_SETUP_BLOCK);
if (list_empty(&bo.cb_list))
return -EOPNOTSUPP;
diff --git a/net/netfilter/nf_tables_set_core.c b/net/netfilter/nf_tables_set_core.c
deleted file mode 100644
index 586b621007eb..000000000000
--- a/net/netfilter/nf_tables_set_core.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/module.h>
-#include <net/netfilter/nf_tables_core.h>
-
-static int __init nf_tables_set_module_init(void)
-{
- nft_register_set(&nft_set_hash_fast_type);
- nft_register_set(&nft_set_hash_type);
- nft_register_set(&nft_set_rhash_type);
- nft_register_set(&nft_set_bitmap_type);
- nft_register_set(&nft_set_rbtree_type);
- nft_register_set(&nft_set_pipapo_type);
-
- return 0;
-}
-
-static void __exit nf_tables_set_module_exit(void)
-{
- nft_unregister_set(&nft_set_pipapo_type);
- nft_unregister_set(&nft_set_rbtree_type);
- nft_unregister_set(&nft_set_bitmap_type);
- nft_unregister_set(&nft_set_rhash_type);
- nft_unregister_set(&nft_set_hash_type);
- nft_unregister_set(&nft_set_hash_fast_type);
-}
-
-module_init(nf_tables_set_module_init);
-module_exit(nf_tables_set_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_NFT_SET();
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 2481470dec36..5827117f2635 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -33,7 +33,7 @@ struct nf_acct {
refcount_t refcnt;
char name[NFACCT_NAME_MAX];
struct rcu_head rcu_head;
- char data[0];
+ char data[];
};
struct nfacct_filter {
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 76535fd9278c..3243a31f6e82 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -737,12 +737,6 @@ static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
#define nf_bridge_adjust_segmented_data(s) do {} while (0)
#endif
-static void free_entry(struct nf_queue_entry *entry)
-{
- nf_queue_entry_release_refs(entry);
- kfree(entry);
-}
-
static int
__nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
struct sk_buff *skb, struct nf_queue_entry *entry)
@@ -768,7 +762,7 @@ __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
entry_seg->skb = skb;
ret = __nfqnl_enqueue_packet(net, queue, entry_seg);
if (ret)
- free_entry(entry_seg);
+ nf_queue_entry_free(entry_seg);
}
return ret;
}
@@ -827,7 +821,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
if (queued) {
if (err) /* some segments are already queued */
- free_entry(entry);
+ nf_queue_entry_free(entry);
kfree_skb(skb);
return 0;
}
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index 0ed2281f03be..bc37d6c59db4 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -93,7 +93,7 @@ static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = {
static int nft_bitwise_init_bool(struct nft_bitwise *priv,
const struct nlattr *const tb[])
{
- struct nft_data_desc d1, d2;
+ struct nft_data_desc mask, xor;
int err;
if (tb[NFTA_BITWISE_DATA])
@@ -103,29 +103,29 @@ static int nft_bitwise_init_bool(struct nft_bitwise *priv,
!tb[NFTA_BITWISE_XOR])
return -EINVAL;
- err = nft_data_init(NULL, &priv->mask, sizeof(priv->mask), &d1,
+ err = nft_data_init(NULL, &priv->mask, sizeof(priv->mask), &mask,
tb[NFTA_BITWISE_MASK]);
if (err < 0)
return err;
- if (d1.type != NFT_DATA_VALUE || d1.len != priv->len) {
+ if (mask.type != NFT_DATA_VALUE || mask.len != priv->len) {
err = -EINVAL;
goto err1;
}
- err = nft_data_init(NULL, &priv->xor, sizeof(priv->xor), &d2,
+ err = nft_data_init(NULL, &priv->xor, sizeof(priv->xor), &xor,
tb[NFTA_BITWISE_XOR]);
if (err < 0)
goto err1;
- if (d2.type != NFT_DATA_VALUE || d2.len != priv->len) {
+ if (xor.type != NFT_DATA_VALUE || xor.len != priv->len) {
err = -EINVAL;
goto err2;
}
return 0;
err2:
- nft_data_release(&priv->xor, d2.type);
+ nft_data_release(&priv->xor, xor.type);
err1:
- nft_data_release(&priv->mask, d1.type);
+ nft_data_release(&priv->mask, mask.type);
return err;
}
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 683785225a3e..64ca13a1885b 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -24,23 +24,6 @@ struct nft_dynset {
struct nft_set_binding binding;
};
-static int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
-{
- int err;
-
- if (src->ops->clone) {
- dst->ops = src->ops;
- err = src->ops->clone(dst, src);
- if (err < 0)
- return err;
- } else {
- memcpy(dst, src, src->ops->size);
- }
-
- __module_get(src->ops->type->owner);
- return 0;
-}
-
static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
struct nft_regs *regs)
{
@@ -81,7 +64,6 @@ void nft_dynset_eval(const struct nft_expr *expr,
const struct nft_dynset *priv = nft_expr_priv(expr);
struct nft_set *set = priv->set;
const struct nft_set_ext *ext;
- const struct nft_expr *sexpr;
u64 timeout;
if (priv->op == NFT_DYNSET_OP_DELETE) {
@@ -91,18 +73,13 @@ void nft_dynset_eval(const struct nft_expr *expr,
if (set->ops->update(set, &regs->data[priv->sreg_key], nft_dynset_new,
expr, regs, &ext)) {
- sexpr = NULL;
- if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
- sexpr = nft_set_ext_expr(ext);
-
if (priv->op == NFT_DYNSET_OP_UPDATE &&
nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
timeout = priv->timeout ? : set->timeout;
*nft_set_ext_expiration(ext) = get_jiffies_64() + timeout;
}
- if (sexpr != NULL)
- sexpr->ops->eval(sexpr, regs, pkt);
+ nft_set_elem_update_expr(ext, regs, pkt);
if (priv->invert)
regs->verdict.code = NFT_BREAK;
@@ -206,20 +183,14 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
if (!(set->flags & NFT_SET_EVAL))
return -EINVAL;
- priv->expr = nft_expr_init(ctx, tb[NFTA_DYNSET_EXPR]);
+ priv->expr = nft_set_elem_expr_alloc(ctx, set,
+ tb[NFTA_DYNSET_EXPR]);
if (IS_ERR(priv->expr))
return PTR_ERR(priv->expr);
- err = -EOPNOTSUPP;
- if (!(priv->expr->ops->type->flags & NFT_EXPR_STATEFUL))
- goto err1;
-
- if (priv->expr->ops->type->flags & NFT_EXPR_GC) {
- if (set->flags & NFT_SET_TIMEOUT)
- goto err1;
- if (!set->ops->gc_init)
- goto err1;
- set->ops->gc_init(set);
+ if (set->expr && set->expr->ops != priv->expr->ops) {
+ err = -EOPNOTSUPP;
+ goto err_expr_free;
}
}
@@ -239,7 +210,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
err = nf_tables_bind_set(ctx, set, &priv->binding);
if (err < 0)
- goto err1;
+ goto err_expr_free;
if (set->size == 0)
set->size = 0xffff;
@@ -247,7 +218,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
priv->set = set;
return 0;
-err1:
+err_expr_free:
if (priv->expr != NULL)
nft_expr_destroy(ctx, priv->expr);
return err;
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index a5e8469859e3..07782836fad6 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -228,7 +228,6 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
unsigned int i, optl, tcphdr_len, offset;
struct tcphdr *tcph;
u8 *opt;
- u32 src;
tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
if (!tcph)
@@ -237,7 +236,6 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
opt = (u8 *)tcph;
for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
union {
- u8 octet;
__be16 v16;
__be32 v32;
} old, new;
@@ -259,13 +257,13 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
if (!tcph)
return;
- src = regs->data[priv->sreg];
offset = i + priv->offset;
switch (priv->len) {
case 2:
old.v16 = get_unaligned((u16 *)(opt + offset));
- new.v16 = src;
+ new.v16 = (__force __be16)nft_reg_load16(
+ &regs->data[priv->sreg]);
switch (priv->type) {
case TCPOPT_MSS:
@@ -283,7 +281,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
old.v16, new.v16, false);
break;
case 4:
- new.v32 = src;
+ new.v32 = regs->data[priv->sreg];
old.v32 = get_unaligned((u32 *)(opt + offset));
if (old.v32 == new.v32)
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 660bad688e2b..1e70359d633c 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -43,6 +43,7 @@ void nft_lookup_eval(const struct nft_expr *expr,
nft_data_copy(&regs->data[priv->dreg],
nft_set_ext_data(ext), set->dlen);
+ nft_set_elem_update_expr(ext, regs, pkt);
}
static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index 87e8d9ba0c9b..32f0fc8be3a4 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -81,6 +81,7 @@ static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
u32 idx, off;
nft_bitmap_location(set, key, &idx, &off);
+ *ext = NULL;
return nft_bitmap_active(priv->bitmap, idx, off, genmask);
}
@@ -285,6 +286,8 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
/* Make sure bitmaps we don't get bitmaps larger than 16 Kbytes. */
if (desc->klen > 2)
return false;
+ else if (desc->expr)
+ return false;
est->size = nft_bitmap_total_size(desc->klen);
est->lookup = NFT_SET_CLASS_O_1;
@@ -293,8 +296,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
return true;
}
-struct nft_set_type nft_set_bitmap_type __read_mostly = {
- .owner = THIS_MODULE,
+const struct nft_set_type nft_set_bitmap_type = {
.ops = {
.privsize = nft_bitmap_privsize,
.elemsize = offsetof(struct nft_bitmap_elem, ext),
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index d350a7cd3af0..4d3f147e8d8d 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -662,8 +662,7 @@ static bool nft_hash_fast_estimate(const struct nft_set_desc *desc, u32 features
return true;
}
-struct nft_set_type nft_set_rhash_type __read_mostly = {
- .owner = THIS_MODULE,
+const struct nft_set_type nft_set_rhash_type = {
.features = NFT_SET_MAP | NFT_SET_OBJECT |
NFT_SET_TIMEOUT | NFT_SET_EVAL,
.ops = {
@@ -686,8 +685,7 @@ struct nft_set_type nft_set_rhash_type __read_mostly = {
},
};
-struct nft_set_type nft_set_hash_type __read_mostly = {
- .owner = THIS_MODULE,
+const struct nft_set_type nft_set_hash_type = {
.features = NFT_SET_MAP | NFT_SET_OBJECT,
.ops = {
.privsize = nft_hash_privsize,
@@ -706,8 +704,7 @@ struct nft_set_type nft_set_hash_type __read_mostly = {
},
};
-struct nft_set_type nft_set_hash_fast_type __read_mostly = {
- .owner = THIS_MODULE,
+const struct nft_set_type nft_set_hash_fast_type = {
.features = NFT_SET_MAP | NFT_SET_OBJECT,
.ops = {
.privsize = nft_hash_privsize,
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index ef7e8ad2e344..87aabf651cfe 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -330,144 +330,22 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/log2.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <uapi/linux/netfilter/nf_tables.h>
-#include <net/ipv6.h> /* For the maximum length of a field */
#include <linux/bitmap.h>
#include <linux/bitops.h>
-/* Count of concatenated fields depends on count of 32-bit nftables registers */
-#define NFT_PIPAPO_MAX_FIELDS NFT_REG32_COUNT
-
-/* Largest supported field size */
-#define NFT_PIPAPO_MAX_BYTES (sizeof(struct in6_addr))
-#define NFT_PIPAPO_MAX_BITS (NFT_PIPAPO_MAX_BYTES * BITS_PER_BYTE)
-
-/* Number of bits to be grouped together in lookup table buckets, arbitrary */
-#define NFT_PIPAPO_GROUP_BITS 4
-#define NFT_PIPAPO_GROUPS_PER_BYTE (BITS_PER_BYTE / NFT_PIPAPO_GROUP_BITS)
-
-/* Fields are padded to 32 bits in input registers */
-#define NFT_PIPAPO_GROUPS_PADDED_SIZE(x) \
- (round_up((x) / NFT_PIPAPO_GROUPS_PER_BYTE, sizeof(u32)))
-#define NFT_PIPAPO_GROUPS_PADDING(x) \
- (NFT_PIPAPO_GROUPS_PADDED_SIZE((x)) - (x) / NFT_PIPAPO_GROUPS_PER_BYTE)
-
-/* Number of buckets, given by 2 ^ n, with n grouped bits */
-#define NFT_PIPAPO_BUCKETS (1 << NFT_PIPAPO_GROUP_BITS)
-
-/* Each n-bit range maps to up to n * 2 rules */
-#define NFT_PIPAPO_MAP_NBITS (const_ilog2(NFT_PIPAPO_MAX_BITS * 2))
-
-/* Use the rest of mapping table buckets for rule indices, but it makes no sense
- * to exceed 32 bits
- */
-#if BITS_PER_LONG == 64
-#define NFT_PIPAPO_MAP_TOBITS 32
-#else
-#define NFT_PIPAPO_MAP_TOBITS (BITS_PER_LONG - NFT_PIPAPO_MAP_NBITS)
-#endif
-
-/* ...which gives us the highest allowed index for a rule */
-#define NFT_PIPAPO_RULE0_MAX ((1UL << (NFT_PIPAPO_MAP_TOBITS - 1)) \
- - (1UL << NFT_PIPAPO_MAP_NBITS))
-
-#define nft_pipapo_for_each_field(field, index, match) \
- for ((field) = (match)->f, (index) = 0; \
- (index) < (match)->field_count; \
- (index)++, (field)++)
-
-/**
- * union nft_pipapo_map_bucket - Bucket of mapping table
- * @to: First rule number (in next field) this rule maps to
- * @n: Number of rules (in next field) this rule maps to
- * @e: If there's no next field, pointer to element this rule maps to
- */
-union nft_pipapo_map_bucket {
- struct {
-#if BITS_PER_LONG == 64
- static_assert(NFT_PIPAPO_MAP_TOBITS <= 32);
- u32 to;
-
- static_assert(NFT_PIPAPO_MAP_NBITS <= 32);
- u32 n;
-#else
- unsigned long to:NFT_PIPAPO_MAP_TOBITS;
- unsigned long n:NFT_PIPAPO_MAP_NBITS;
-#endif
- };
- struct nft_pipapo_elem *e;
-};
-
-/**
- * struct nft_pipapo_field - Lookup, mapping tables and related data for a field
- * @groups: Amount of 4-bit groups
- * @rules: Number of inserted rules
- * @bsize: Size of each bucket in lookup table, in longs
- * @lt: Lookup table: 'groups' rows of NFT_PIPAPO_BUCKETS buckets
- * @mt: Mapping table: one bucket per rule
- */
-struct nft_pipapo_field {
- int groups;
- unsigned long rules;
- size_t bsize;
- unsigned long *lt;
- union nft_pipapo_map_bucket *mt;
-};
-
-/**
- * struct nft_pipapo_match - Data used for lookup and matching
- * @field_count Amount of fields in set
- * @scratch: Preallocated per-CPU maps for partial matching results
- * @bsize_max: Maximum lookup table bucket size of all fields, in longs
- * @rcu Matching data is swapped on commits
- * @f: Fields, with lookup and mapping tables
- */
-struct nft_pipapo_match {
- int field_count;
- unsigned long * __percpu *scratch;
- size_t bsize_max;
- struct rcu_head rcu;
- struct nft_pipapo_field f[0];
-};
+#include "nft_set_pipapo_avx2.h"
+#include "nft_set_pipapo.h"
/* Current working bitmap index, toggled between field matches */
static DEFINE_PER_CPU(bool, nft_pipapo_scratch_index);
/**
- * struct nft_pipapo - Representation of a set
- * @match: Currently in-use matching data
- * @clone: Copy where pending insertions and deletions are kept
- * @groups: Total amount of 4-bit groups for fields in this set
- * @width: Total bytes to be matched for one packet, including padding
- * @dirty: Working copy has pending insertions or deletions
- * @last_gc: Timestamp of last garbage collection run, jiffies
- */
-struct nft_pipapo {
- struct nft_pipapo_match __rcu *match;
- struct nft_pipapo_match *clone;
- int groups;
- int width;
- bool dirty;
- unsigned long last_gc;
-};
-
-struct nft_pipapo_elem;
-
-/**
- * struct nft_pipapo_elem - API-facing representation of single set element
- * @ext: nftables API extensions
- */
-struct nft_pipapo_elem {
- struct nft_set_ext ext;
-};
-
-/**
* pipapo_refill() - For each set bit, set bits from selected mapping table item
* @map: Bitmap to be scanned for set bits
* @len: Length of bitmap in longs
@@ -484,9 +362,8 @@ struct nft_pipapo_elem {
*
* Return: -1 on no match, bit position on 'match_only', 0 otherwise.
*/
-static int pipapo_refill(unsigned long *map, int len, int rules,
- unsigned long *dst, union nft_pipapo_map_bucket *mt,
- bool match_only)
+int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
+ union nft_pipapo_map_bucket *mt, bool match_only)
{
unsigned long bitset;
int k, ret = -1;
@@ -559,26 +436,18 @@ static bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
nft_pipapo_for_each_field(f, i, m) {
bool last = i == m->field_count - 1;
- unsigned long *lt = f->lt;
- int b, group;
+ int b;
- /* For each 4-bit group: select lookup table bucket depending on
+ /* For each bit group: select lookup table bucket depending on
* packet bytes value, then AND bucket value
*/
- for (group = 0; group < f->groups; group += 2) {
- u8 v;
-
- v = *rp >> 4;
- __bitmap_and(res_map, res_map, lt + v * f->bsize,
- f->bsize * BITS_PER_LONG);
- lt += f->bsize * NFT_PIPAPO_BUCKETS;
-
- v = *rp & 0x0f;
- rp++;
- __bitmap_and(res_map, res_map, lt + v * f->bsize,
- f->bsize * BITS_PER_LONG);
- lt += f->bsize * NFT_PIPAPO_BUCKETS;
- }
+ if (likely(f->bb == 8))
+ pipapo_and_field_buckets_8bit(f, res_map, rp);
+ else
+ pipapo_and_field_buckets_4bit(f, res_map, rp);
+ NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4;
+
+ rp += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
/* Now populate the bitmap for the next field, unless this is
* the last field, in which case return the matched 'ext'
@@ -621,7 +490,7 @@ next_match:
map_index = !map_index;
swap(res_map, fill_map);
- rp += NFT_PIPAPO_GROUPS_PADDING(f->groups);
+ rp += NFT_PIPAPO_GROUPS_PADDING(f);
}
out:
@@ -669,26 +538,19 @@ static struct nft_pipapo_elem *pipapo_get(const struct net *net,
nft_pipapo_for_each_field(f, i, m) {
bool last = i == m->field_count - 1;
- unsigned long *lt = f->lt;
- int b, group;
+ int b;
- /* For each 4-bit group: select lookup table bucket depending on
+ /* For each bit group: select lookup table bucket depending on
* packet bytes value, then AND bucket value
*/
- for (group = 0; group < f->groups; group++) {
- u8 v;
-
- if (group % 2) {
- v = *data & 0x0f;
- data++;
- } else {
- v = *data >> 4;
- }
- __bitmap_and(res_map, res_map, lt + v * f->bsize,
- f->bsize * BITS_PER_LONG);
+ if (f->bb == 8)
+ pipapo_and_field_buckets_8bit(f, res_map, data);
+ else if (f->bb == 4)
+ pipapo_and_field_buckets_4bit(f, res_map, data);
+ else
+ BUG();
- lt += f->bsize * NFT_PIPAPO_BUCKETS;
- }
+ data += f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f);
/* Now populate the bitmap for the next field, unless this is
* the last field, in which case return the matched 'ext'
@@ -713,7 +575,7 @@ next_match:
goto out;
}
- data += NFT_PIPAPO_GROUPS_PADDING(f->groups);
+ data += NFT_PIPAPO_GROUPS_PADDING(f);
/* Swap bitmap indices: fill_map will be the initial bitmap for
* the next field (i.e. the new res_map), and res_map is
@@ -736,8 +598,8 @@ out:
* @elem: nftables API element representation containing key data
* @flags: Unused
*/
-void *nft_pipapo_get(const struct net *net, const struct nft_set *set,
- const struct nft_set_elem *elem, unsigned int flags)
+static void *nft_pipapo_get(const struct net *net, const struct nft_set *set,
+ const struct nft_set_elem *elem, unsigned int flags)
{
return pipapo_get(net, set, (const u8 *)elem->key.val.data,
nft_genmask_cur(net));
@@ -763,6 +625,10 @@ static int pipapo_resize(struct nft_pipapo_field *f, int old_rules, int rules)
int group, bucket;
new_bucket_size = DIV_ROUND_UP(rules, BITS_PER_LONG);
+#ifdef NFT_PIPAPO_ALIGN
+ new_bucket_size = roundup(new_bucket_size,
+ NFT_PIPAPO_ALIGN / sizeof(*new_lt));
+#endif
if (new_bucket_size == f->bsize)
goto mt;
@@ -772,15 +638,18 @@ static int pipapo_resize(struct nft_pipapo_field *f, int old_rules, int rules)
else
copy = new_bucket_size;
- new_lt = kvzalloc(f->groups * NFT_PIPAPO_BUCKETS * new_bucket_size *
- sizeof(*new_lt), GFP_KERNEL);
+ new_lt = kvzalloc(f->groups * NFT_PIPAPO_BUCKETS(f->bb) *
+ new_bucket_size * sizeof(*new_lt) +
+ NFT_PIPAPO_ALIGN_HEADROOM,
+ GFP_KERNEL);
if (!new_lt)
return -ENOMEM;
- new_p = new_lt;
- old_p = old_lt;
+ new_p = NFT_PIPAPO_LT_ALIGN(new_lt);
+ old_p = NFT_PIPAPO_LT_ALIGN(old_lt);
+
for (group = 0; group < f->groups; group++) {
- for (bucket = 0; bucket < NFT_PIPAPO_BUCKETS; bucket++) {
+ for (bucket = 0; bucket < NFT_PIPAPO_BUCKETS(f->bb); bucket++) {
memcpy(new_p, old_p, copy * sizeof(*new_p));
new_p += copy;
old_p += copy;
@@ -807,7 +676,7 @@ mt:
if (new_lt) {
f->bsize = new_bucket_size;
- f->lt = new_lt;
+ NFT_PIPAPO_LT_ASSIGN(f, new_lt);
kvfree(old_lt);
}
@@ -829,13 +698,196 @@ static void pipapo_bucket_set(struct nft_pipapo_field *f, int rule, int group,
{
unsigned long *pos;
- pos = f->lt + f->bsize * NFT_PIPAPO_BUCKETS * group;
+ pos = NFT_PIPAPO_LT_ALIGN(f->lt);
+ pos += f->bsize * NFT_PIPAPO_BUCKETS(f->bb) * group;
pos += f->bsize * v;
__set_bit(rule, pos);
}
/**
+ * pipapo_lt_4b_to_8b() - Switch lookup table group width from 4 bits to 8 bits
+ * @old_groups: Number of current groups
+ * @bsize: Size of one bucket, in longs
+ * @old_lt: Pointer to the current lookup table
+ * @new_lt: Pointer to the new, pre-allocated lookup table
+ *
+ * Each bucket with index b in the new lookup table, belonging to group g, is
+ * filled with the bit intersection between:
+ * - bucket with index given by the upper 4 bits of b, from group g, and
+ * - bucket with index given by the lower 4 bits of b, from group g + 1
+ *
+ * That is, given buckets from the new lookup table N(x, y) and the old lookup
+ * table O(x, y), with x bucket index, and y group index:
+ *
+ * N(b, g) := O(b / 16, g) & O(b % 16, g + 1)
+ *
+ * This ensures equivalence of the matching results on lookup. Two examples in
+ * pictures:
+ *
+ * bucket
+ * group 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 ... 254 255
+ * 0 ^
+ * 1 | ^
+ * ... ( & ) |
+ * / \ |
+ * / \ .-( & )-.
+ * / bucket \ | |
+ * group 0 / 1 2 3 \ 4 5 6 7 8 9 10 11 12 13 |14 15 |
+ * 0 / \ | |
+ * 1 \ | |
+ * 2 | --'
+ * 3 '-
+ * ...
+ */
+static void pipapo_lt_4b_to_8b(int old_groups, int bsize,
+ unsigned long *old_lt, unsigned long *new_lt)
+{
+ int g, b, i;
+
+ for (g = 0; g < old_groups / 2; g++) {
+ int src_g0 = g * 2, src_g1 = g * 2 + 1;
+
+ for (b = 0; b < NFT_PIPAPO_BUCKETS(8); b++) {
+ int src_b0 = b / NFT_PIPAPO_BUCKETS(4);
+ int src_b1 = b % NFT_PIPAPO_BUCKETS(4);
+ int src_i0 = src_g0 * NFT_PIPAPO_BUCKETS(4) + src_b0;
+ int src_i1 = src_g1 * NFT_PIPAPO_BUCKETS(4) + src_b1;
+
+ for (i = 0; i < bsize; i++) {
+ *new_lt = old_lt[src_i0 * bsize + i] &
+ old_lt[src_i1 * bsize + i];
+ new_lt++;
+ }
+ }
+ }
+}
+
+/**
+ * pipapo_lt_8b_to_4b() - Switch lookup table group width from 8 bits to 4 bits
+ * @old_groups: Number of current groups
+ * @bsize: Size of one bucket, in longs
+ * @old_lt: Pointer to the current lookup table
+ * @new_lt: Pointer to the new, pre-allocated lookup table
+ *
+ * Each bucket with index b in the new lookup table, belonging to group g, is
+ * filled with the bit union of:
+ * - all the buckets with index such that the upper four bits of the lower byte
+ * equal b, from group g, with g odd
+ * - all the buckets with index such that the lower four bits equal b, from
+ * group g, with g even
+ *
+ * That is, given buckets from the new lookup table N(x, y) and the old lookup
+ * table O(x, y), with x bucket index, and y group index:
+ *
+ * - with g odd: N(b, g) := U(O(x, g) for each x : x = (b & 0xf0) >> 4)
+ * - with g even: N(b, g) := U(O(x, g) for each x : x = b & 0x0f)
+ *
+ * where U() denotes the arbitrary union operation (binary OR of n terms). This
+ * ensures equivalence of the matching results on lookup.
+ */
+static void pipapo_lt_8b_to_4b(int old_groups, int bsize,
+ unsigned long *old_lt, unsigned long *new_lt)
+{
+ int g, b, bsrc, i;
+
+ memset(new_lt, 0, old_groups * 2 * NFT_PIPAPO_BUCKETS(4) * bsize *
+ sizeof(unsigned long));
+
+ for (g = 0; g < old_groups * 2; g += 2) {
+ int src_g = g / 2;
+
+ for (b = 0; b < NFT_PIPAPO_BUCKETS(4); b++) {
+ for (bsrc = NFT_PIPAPO_BUCKETS(8) * src_g;
+ bsrc < NFT_PIPAPO_BUCKETS(8) * (src_g + 1);
+ bsrc++) {
+ if (((bsrc & 0xf0) >> 4) != b)
+ continue;
+
+ for (i = 0; i < bsize; i++)
+ new_lt[i] |= old_lt[bsrc * bsize + i];
+ }
+
+ new_lt += bsize;
+ }
+
+ for (b = 0; b < NFT_PIPAPO_BUCKETS(4); b++) {
+ for (bsrc = NFT_PIPAPO_BUCKETS(8) * src_g;
+ bsrc < NFT_PIPAPO_BUCKETS(8) * (src_g + 1);
+ bsrc++) {
+ if ((bsrc & 0x0f) != b)
+ continue;
+
+ for (i = 0; i < bsize; i++)
+ new_lt[i] |= old_lt[bsrc * bsize + i];
+ }
+
+ new_lt += bsize;
+ }
+ }
+}
+
+/**
+ * pipapo_lt_bits_adjust() - Adjust group size for lookup table if needed
+ * @f: Field containing lookup table
+ */
+static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
+{
+ unsigned long *new_lt;
+ int groups, bb;
+ size_t lt_size;
+
+ lt_size = f->groups * NFT_PIPAPO_BUCKETS(f->bb) * f->bsize *
+ sizeof(*f->lt);
+
+ if (f->bb == NFT_PIPAPO_GROUP_BITS_SMALL_SET &&
+ lt_size > NFT_PIPAPO_LT_SIZE_HIGH) {
+ groups = f->groups * 2;
+ bb = NFT_PIPAPO_GROUP_BITS_LARGE_SET;
+
+ lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
+ sizeof(*f->lt);
+ } else if (f->bb == NFT_PIPAPO_GROUP_BITS_LARGE_SET &&
+ lt_size < NFT_PIPAPO_LT_SIZE_LOW) {
+ groups = f->groups / 2;
+ bb = NFT_PIPAPO_GROUP_BITS_SMALL_SET;
+
+ lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
+ sizeof(*f->lt);
+
+ /* Don't increase group width if the resulting lookup table size
+ * would exceed the upper size threshold for a "small" set.
+ */
+ if (lt_size > NFT_PIPAPO_LT_SIZE_HIGH)
+ return;
+ } else {
+ return;
+ }
+
+ new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL);
+ if (!new_lt)
+ return;
+
+ NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4;
+ if (f->bb == 4 && bb == 8) {
+ pipapo_lt_4b_to_8b(f->groups, f->bsize,
+ NFT_PIPAPO_LT_ALIGN(f->lt),
+ NFT_PIPAPO_LT_ALIGN(new_lt));
+ } else if (f->bb == 8 && bb == 4) {
+ pipapo_lt_8b_to_4b(f->groups, f->bsize,
+ NFT_PIPAPO_LT_ALIGN(f->lt),
+ NFT_PIPAPO_LT_ALIGN(new_lt));
+ } else {
+ BUG();
+ }
+
+ f->groups = groups;
+ f->bb = bb;
+ kvfree(f->lt);
+ NFT_PIPAPO_LT_ASSIGN(f, new_lt);
+}
+
+/**
* pipapo_insert() - Insert new rule in field given input key and mask length
* @f: Field containing lookup table
* @k: Input key for classification, without nftables padding
@@ -849,7 +901,7 @@ static void pipapo_bucket_set(struct nft_pipapo_field *f, int rule, int group,
static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k,
int mask_bits)
{
- int rule = f->rules++, group, ret;
+ int rule = f->rules++, group, ret, bit_offset = 0;
ret = pipapo_resize(f, f->rules - 1, f->rules);
if (ret)
@@ -859,28 +911,33 @@ static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k,
int i, v;
u8 mask;
- if (group % 2)
- v = k[group / 2] & 0x0f;
- else
- v = k[group / 2] >> 4;
+ v = k[group / (BITS_PER_BYTE / f->bb)];
+ v &= GENMASK(BITS_PER_BYTE - bit_offset - 1, 0);
+ v >>= (BITS_PER_BYTE - bit_offset) - f->bb;
+
+ bit_offset += f->bb;
+ bit_offset %= BITS_PER_BYTE;
- if (mask_bits >= (group + 1) * 4) {
+ if (mask_bits >= (group + 1) * f->bb) {
/* Not masked */
pipapo_bucket_set(f, rule, group, v);
- } else if (mask_bits <= group * 4) {
+ } else if (mask_bits <= group * f->bb) {
/* Completely masked */
- for (i = 0; i < NFT_PIPAPO_BUCKETS; i++)
+ for (i = 0; i < NFT_PIPAPO_BUCKETS(f->bb); i++)
pipapo_bucket_set(f, rule, group, i);
} else {
/* The mask limit falls on this group */
- mask = 0x0f >> (mask_bits - group * 4);
- for (i = 0; i < NFT_PIPAPO_BUCKETS; i++) {
+ mask = GENMASK(f->bb - 1, 0);
+ mask >>= mask_bits - group * f->bb;
+ for (i = 0; i < NFT_PIPAPO_BUCKETS(f->bb); i++) {
if ((i & ~mask) == (v & ~mask))
pipapo_bucket_set(f, rule, group, i);
}
}
}
+ pipapo_lt_bits_adjust(f);
+
return 1;
}
@@ -1053,8 +1110,12 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
for_each_possible_cpu(i) {
unsigned long *scratch;
+#ifdef NFT_PIPAPO_ALIGN
+ unsigned long *scratch_aligned;
+#endif
- scratch = kzalloc_node(bsize_max * sizeof(*scratch) * 2,
+ scratch = kzalloc_node(bsize_max * sizeof(*scratch) * 2 +
+ NFT_PIPAPO_ALIGN_HEADROOM,
GFP_KERNEL, cpu_to_node(i));
if (!scratch) {
/* On failure, there's no need to undo previous
@@ -1070,6 +1131,11 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
kfree(*per_cpu_ptr(clone->scratch, i));
*per_cpu_ptr(clone->scratch, i) = scratch;
+
+#ifdef NFT_PIPAPO_ALIGN
+ scratch_aligned = NFT_PIPAPO_LT_ALIGN(scratch);
+ *per_cpu_ptr(clone->scratch_aligned, i) = scratch_aligned;
+#endif
}
return 0;
@@ -1143,11 +1209,11 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
return -ENOSPC;
if (memcmp(start_p, end_p,
- f->groups / NFT_PIPAPO_GROUPS_PER_BYTE) > 0)
+ f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f)) > 0)
return -EINVAL;
- start_p += NFT_PIPAPO_GROUPS_PADDED_SIZE(f->groups);
- end_p += NFT_PIPAPO_GROUPS_PADDED_SIZE(f->groups);
+ start_p += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
+ end_p += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
}
/* Insert */
@@ -1161,22 +1227,19 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
rulemap[i].to = f->rules;
ret = memcmp(start, end,
- f->groups / NFT_PIPAPO_GROUPS_PER_BYTE);
- if (!ret) {
- ret = pipapo_insert(f, start,
- f->groups * NFT_PIPAPO_GROUP_BITS);
- } else {
- ret = pipapo_expand(f, start, end,
- f->groups * NFT_PIPAPO_GROUP_BITS);
- }
+ f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f));
+ if (!ret)
+ ret = pipapo_insert(f, start, f->groups * f->bb);
+ else
+ ret = pipapo_expand(f, start, end, f->groups * f->bb);
if (f->bsize > bsize_max)
bsize_max = f->bsize;
rulemap[i].n = ret;
- start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f->groups);
- end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f->groups);
+ start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
+ end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
}
if (!*this_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) {
@@ -1220,23 +1283,35 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
if (!new->scratch)
goto out_scratch;
+#ifdef NFT_PIPAPO_ALIGN
+ new->scratch_aligned = alloc_percpu(*new->scratch_aligned);
+ if (!new->scratch_aligned)
+ goto out_scratch;
+#endif
+
rcu_head_init(&new->rcu);
src = old->f;
dst = new->f;
for (i = 0; i < old->field_count; i++) {
+ unsigned long *new_lt;
+
memcpy(dst, src, offsetof(struct nft_pipapo_field, lt));
- dst->lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS *
- src->bsize * sizeof(*dst->lt),
- GFP_KERNEL);
- if (!dst->lt)
+ new_lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS(src->bb) *
+ src->bsize * sizeof(*dst->lt) +
+ NFT_PIPAPO_ALIGN_HEADROOM,
+ GFP_KERNEL);
+ if (!new_lt)
goto out_lt;
- memcpy(dst->lt, src->lt,
+ NFT_PIPAPO_LT_ASSIGN(dst, new_lt);
+
+ memcpy(NFT_PIPAPO_LT_ALIGN(new_lt),
+ NFT_PIPAPO_LT_ALIGN(src->lt),
src->bsize * sizeof(*dst->lt) *
- src->groups * NFT_PIPAPO_BUCKETS);
+ src->groups * NFT_PIPAPO_BUCKETS(src->bb));
dst->mt = kvmalloc(src->rules * sizeof(*src->mt), GFP_KERNEL);
if (!dst->mt)
@@ -1257,8 +1332,11 @@ out_lt:
kvfree(dst->lt);
dst--;
}
- free_percpu(new->scratch);
+#ifdef NFT_PIPAPO_ALIGN
+ free_percpu(new->scratch_aligned);
+#endif
out_scratch:
+ free_percpu(new->scratch);
kfree(new);
return ERR_PTR(-ENOMEM);
@@ -1414,9 +1492,10 @@ static void pipapo_drop(struct nft_pipapo_match *m,
unsigned long *pos;
int b;
- pos = f->lt + g * NFT_PIPAPO_BUCKETS * f->bsize;
+ pos = NFT_PIPAPO_LT_ALIGN(f->lt) + g *
+ NFT_PIPAPO_BUCKETS(f->bb) * f->bsize;
- for (b = 0; b < NFT_PIPAPO_BUCKETS; b++) {
+ for (b = 0; b < NFT_PIPAPO_BUCKETS(f->bb); b++) {
bitmap_cut(pos, pos, rulemap[i].to,
rulemap[i].n,
f->bsize * BITS_PER_LONG);
@@ -1434,6 +1513,8 @@ static void pipapo_drop(struct nft_pipapo_match *m,
;
}
f->rules -= rulemap[i].n;
+
+ pipapo_lt_bits_adjust(f);
}
}
@@ -1518,6 +1599,9 @@ static void pipapo_reclaim_match(struct rcu_head *rcu)
for_each_possible_cpu(i)
kfree(*per_cpu_ptr(m->scratch, i));
+#ifdef NFT_PIPAPO_ALIGN
+ free_percpu(m->scratch_aligned);
+#endif
free_percpu(m->scratch);
pipapo_free_fields(m);
@@ -1710,30 +1794,33 @@ static bool nft_pipapo_flush(const struct net *net, const struct nft_set *set,
static int pipapo_get_boundaries(struct nft_pipapo_field *f, int first_rule,
int rule_count, u8 *left, u8 *right)
{
+ int g, mask_len = 0, bit_offset = 0;
u8 *l = left, *r = right;
- int g, mask_len = 0;
for (g = 0; g < f->groups; g++) {
int b, x0, x1;
x0 = -1;
x1 = -1;
- for (b = 0; b < NFT_PIPAPO_BUCKETS; b++) {
+ for (b = 0; b < NFT_PIPAPO_BUCKETS(f->bb); b++) {
unsigned long *pos;
- pos = f->lt + (g * NFT_PIPAPO_BUCKETS + b) * f->bsize;
+ pos = NFT_PIPAPO_LT_ALIGN(f->lt) +
+ (g * NFT_PIPAPO_BUCKETS(f->bb) + b) * f->bsize;
if (test_bit(first_rule, pos) && x0 == -1)
x0 = b;
if (test_bit(first_rule + rule_count - 1, pos))
x1 = b;
}
- if (g % 2) {
- *(l++) |= x0 & 0x0f;
- *(r++) |= x1 & 0x0f;
- } else {
- *l |= x0 << 4;
- *r |= x1 << 4;
+ *l |= x0 << (BITS_PER_BYTE - f->bb - bit_offset);
+ *r |= x1 << (BITS_PER_BYTE - f->bb - bit_offset);
+
+ bit_offset += f->bb;
+ if (bit_offset >= BITS_PER_BYTE) {
+ bit_offset %= BITS_PER_BYTE;
+ l++;
+ r++;
}
if (x1 - x0 == 0)
@@ -1768,8 +1855,9 @@ static bool pipapo_match_field(struct nft_pipapo_field *f,
pipapo_get_boundaries(f, first_rule, rule_count, left, right);
- return !memcmp(start, left, f->groups / NFT_PIPAPO_GROUPS_PER_BYTE) &&
- !memcmp(end, right, f->groups / NFT_PIPAPO_GROUPS_PER_BYTE);
+ return !memcmp(start, left,
+ f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f)) &&
+ !memcmp(end, right, f->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f));
}
/**
@@ -1821,8 +1909,8 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
rules_fx = f->mt[start].n;
start = f->mt[start].to;
- match_start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f->groups);
- match_end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f->groups);
+ match_start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
+ match_end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
}
if (i == m->field_count) {
@@ -1905,56 +1993,24 @@ static u64 nft_pipapo_privsize(const struct nlattr * const nla[],
}
/**
- * nft_pipapo_estimate() - Estimate set size, space and lookup complexity
- * @desc: Set description, element count and field description used here
+ * nft_pipapo_estimate() - Set size, space and lookup complexity
+ * @desc: Set description, element count and field description used
* @features: Flags: NFT_SET_INTERVAL needs to be there
* @est: Storage for estimation data
*
- * The size for this set type can vary dramatically, as it depends on the number
- * of rules (composing netmasks) the entries expand to. We compute the worst
- * case here.
- *
- * In general, for a non-ranged entry or a single composing netmask, we need
- * one bit in each of the sixteen NFT_PIPAPO_BUCKETS, for each 4-bit group (that
- * is, each input bit needs four bits of matching data), plus a bucket in the
- * mapping table for each field.
- *
- * Return: true only for compatible range concatenations
+ * Return: true if set description is compatible, false otherwise
*/
static bool nft_pipapo_estimate(const struct nft_set_desc *desc, u32 features,
struct nft_set_estimate *est)
{
- unsigned long entry_size;
- int i;
-
- if (!(features & NFT_SET_INTERVAL) || desc->field_count <= 1)
+ if (!(features & NFT_SET_INTERVAL) ||
+ desc->field_count < NFT_PIPAPO_MIN_FIELDS)
return false;
- for (i = 0, entry_size = 0; i < desc->field_count; i++) {
- unsigned long rules;
-
- if (desc->field_len[i] > NFT_PIPAPO_MAX_BYTES)
- return false;
-
- /* Worst-case ranges for each concatenated field: each n-bit
- * field can expand to up to n * 2 rules in each bucket, and
- * each rule also needs a mapping bucket.
- */
- rules = ilog2(desc->field_len[i] * BITS_PER_BYTE) * 2;
- entry_size += rules * NFT_PIPAPO_BUCKETS / BITS_PER_BYTE;
- entry_size += rules * sizeof(union nft_pipapo_map_bucket);
- }
-
- /* Rules in lookup and mapping tables are needed for each entry */
- est->size = desc->size * entry_size;
- if (est->size && div_u64(est->size, desc->size) != entry_size)
+ est->size = pipapo_estimate_size(desc);
+ if (!est->size)
return false;
- est->size += sizeof(struct nft_pipapo) +
- sizeof(struct nft_pipapo_match) * 2;
-
- est->size += sizeof(struct nft_pipapo_field) * desc->field_count;
-
est->lookup = NFT_SET_CLASS_O_LOG_N;
est->space = NFT_SET_CLASS_O_N;
@@ -1981,38 +2037,52 @@ static int nft_pipapo_init(const struct nft_set *set,
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_match *m;
struct nft_pipapo_field *f;
- int err, i;
+ int err, i, field_count;
+
+ field_count = desc->field_count ? : 1;
- if (desc->field_count > NFT_PIPAPO_MAX_FIELDS)
+ if (field_count > NFT_PIPAPO_MAX_FIELDS)
return -EINVAL;
- m = kmalloc(sizeof(*priv->match) + sizeof(*f) * desc->field_count,
+ m = kmalloc(sizeof(*priv->match) + sizeof(*f) * field_count,
GFP_KERNEL);
if (!m)
return -ENOMEM;
- m->field_count = desc->field_count;
+ m->field_count = field_count;
m->bsize_max = 0;
m->scratch = alloc_percpu(unsigned long *);
if (!m->scratch) {
err = -ENOMEM;
- goto out_free;
+ goto out_scratch;
}
for_each_possible_cpu(i)
*per_cpu_ptr(m->scratch, i) = NULL;
+#ifdef NFT_PIPAPO_ALIGN
+ m->scratch_aligned = alloc_percpu(unsigned long *);
+ if (!m->scratch_aligned) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+ for_each_possible_cpu(i)
+ *per_cpu_ptr(m->scratch_aligned, i) = NULL;
+#endif
+
rcu_head_init(&m->rcu);
nft_pipapo_for_each_field(f, i, m) {
- f->groups = desc->field_len[i] * NFT_PIPAPO_GROUPS_PER_BYTE;
- priv->groups += f->groups;
+ int len = desc->field_len[i] ? : set->klen;
- priv->width += round_up(desc->field_len[i], sizeof(u32));
+ f->bb = NFT_PIPAPO_GROUP_BITS_INIT;
+ f->groups = len * NFT_PIPAPO_GROUPS_PER_BYTE(f);
+
+ priv->width += round_up(len, sizeof(u32));
f->bsize = 0;
f->rules = 0;
- f->lt = NULL;
+ NFT_PIPAPO_LT_ASSIGN(f, NULL);
f->mt = NULL;
}
@@ -2030,7 +2100,11 @@ static int nft_pipapo_init(const struct nft_set *set,
return 0;
out_free:
+#ifdef NFT_PIPAPO_ALIGN
+ free_percpu(m->scratch_aligned);
+#endif
free_percpu(m->scratch);
+out_scratch:
kfree(m);
return err;
@@ -2065,16 +2139,21 @@ static void nft_pipapo_destroy(const struct nft_set *set)
nft_set_elem_destroy(set, e, true);
}
+#ifdef NFT_PIPAPO_ALIGN
+ free_percpu(m->scratch_aligned);
+#endif
for_each_possible_cpu(cpu)
kfree(*per_cpu_ptr(m->scratch, cpu));
free_percpu(m->scratch);
-
pipapo_free_fields(m);
kfree(m);
priv->match = NULL;
}
if (priv->clone) {
+#ifdef NFT_PIPAPO_ALIGN
+ free_percpu(priv->clone->scratch_aligned);
+#endif
for_each_possible_cpu(cpu)
kfree(*per_cpu_ptr(priv->clone->scratch, cpu));
free_percpu(priv->clone->scratch);
@@ -2101,8 +2180,7 @@ static void nft_pipapo_gc_init(const struct nft_set *set)
priv->last_gc = jiffies;
}
-struct nft_set_type nft_set_pipapo_type __read_mostly = {
- .owner = THIS_MODULE,
+const struct nft_set_type nft_set_pipapo_type = {
.features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT |
NFT_SET_TIMEOUT,
.ops = {
@@ -2122,3 +2200,26 @@ struct nft_set_type nft_set_pipapo_type __read_mostly = {
.elemsize = offsetof(struct nft_pipapo_elem, ext),
},
};
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_AS_AVX2)
+const struct nft_set_type nft_set_pipapo_avx2_type = {
+ .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT |
+ NFT_SET_TIMEOUT,
+ .ops = {
+ .lookup = nft_pipapo_avx2_lookup,
+ .insert = nft_pipapo_insert,
+ .activate = nft_pipapo_activate,
+ .deactivate = nft_pipapo_deactivate,
+ .flush = nft_pipapo_flush,
+ .remove = nft_pipapo_remove,
+ .walk = nft_pipapo_walk,
+ .get = nft_pipapo_get,
+ .privsize = nft_pipapo_privsize,
+ .estimate = nft_pipapo_avx2_estimate,
+ .init = nft_pipapo_init,
+ .destroy = nft_pipapo_destroy,
+ .gc_init = nft_pipapo_gc_init,
+ .elemsize = offsetof(struct nft_pipapo_elem, ext),
+ },
+};
+#endif
diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
new file mode 100644
index 000000000000..25a75591583e
--- /dev/null
+++ b/net/netfilter/nft_set_pipapo.h
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#ifndef _NFT_SET_PIPAPO_H
+
+#include <linux/log2.h>
+#include <net/ipv6.h> /* For the maximum length of a field */
+
+/* Count of concatenated fields depends on count of 32-bit nftables registers */
+#define NFT_PIPAPO_MAX_FIELDS NFT_REG32_COUNT
+
+/* Restrict usage to multiple fields, make sure rbtree is used otherwise */
+#define NFT_PIPAPO_MIN_FIELDS 2
+
+/* Largest supported field size */
+#define NFT_PIPAPO_MAX_BYTES (sizeof(struct in6_addr))
+#define NFT_PIPAPO_MAX_BITS (NFT_PIPAPO_MAX_BYTES * BITS_PER_BYTE)
+
+/* Bits to be grouped together in table buckets depending on set size */
+#define NFT_PIPAPO_GROUP_BITS_INIT NFT_PIPAPO_GROUP_BITS_SMALL_SET
+#define NFT_PIPAPO_GROUP_BITS_SMALL_SET 8
+#define NFT_PIPAPO_GROUP_BITS_LARGE_SET 4
+#define NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4 \
+ BUILD_BUG_ON((NFT_PIPAPO_GROUP_BITS_SMALL_SET != 8) || \
+ (NFT_PIPAPO_GROUP_BITS_LARGE_SET != 4))
+#define NFT_PIPAPO_GROUPS_PER_BYTE(f) (BITS_PER_BYTE / (f)->bb)
+
+/* If a lookup table gets bigger than NFT_PIPAPO_LT_SIZE_HIGH, switch to the
+ * small group width, and switch to the big group width if the table gets
+ * smaller than NFT_PIPAPO_LT_SIZE_LOW.
+ *
+ * Picking 2MiB as threshold (for a single table) avoids as much as possible
+ * crossing page boundaries on most architectures (x86-64 and MIPS huge pages,
+ * ARMv7 supersections, POWER "large" pages, SPARC Level 1 regions, etc.), which
+ * keeps performance nice in case kvmalloc() gives us non-contiguous areas.
+ */
+#define NFT_PIPAPO_LT_SIZE_THRESHOLD (1 << 21)
+#define NFT_PIPAPO_LT_SIZE_HYSTERESIS (1 << 16)
+#define NFT_PIPAPO_LT_SIZE_HIGH NFT_PIPAPO_LT_SIZE_THRESHOLD
+#define NFT_PIPAPO_LT_SIZE_LOW NFT_PIPAPO_LT_SIZE_THRESHOLD - \
+ NFT_PIPAPO_LT_SIZE_HYSTERESIS
+
+/* Fields are padded to 32 bits in input registers */
+#define NFT_PIPAPO_GROUPS_PADDED_SIZE(f) \
+ (round_up((f)->groups / NFT_PIPAPO_GROUPS_PER_BYTE(f), sizeof(u32)))
+#define NFT_PIPAPO_GROUPS_PADDING(f) \
+ (NFT_PIPAPO_GROUPS_PADDED_SIZE(f) - (f)->groups / \
+ NFT_PIPAPO_GROUPS_PER_BYTE(f))
+
+/* Number of buckets given by 2 ^ n, with n bucket bits */
+#define NFT_PIPAPO_BUCKETS(bb) (1 << (bb))
+
+/* Each n-bit range maps to up to n * 2 rules */
+#define NFT_PIPAPO_MAP_NBITS (const_ilog2(NFT_PIPAPO_MAX_BITS * 2))
+
+/* Use the rest of mapping table buckets for rule indices, but it makes no sense
+ * to exceed 32 bits
+ */
+#if BITS_PER_LONG == 64
+#define NFT_PIPAPO_MAP_TOBITS 32
+#else
+#define NFT_PIPAPO_MAP_TOBITS (BITS_PER_LONG - NFT_PIPAPO_MAP_NBITS)
+#endif
+
+/* ...which gives us the highest allowed index for a rule */
+#define NFT_PIPAPO_RULE0_MAX ((1UL << (NFT_PIPAPO_MAP_TOBITS - 1)) \
+ - (1UL << NFT_PIPAPO_MAP_NBITS))
+
+/* Definitions for vectorised implementations */
+#ifdef NFT_PIPAPO_ALIGN
+#define NFT_PIPAPO_ALIGN_HEADROOM \
+ (NFT_PIPAPO_ALIGN - ARCH_KMALLOC_MINALIGN)
+#define NFT_PIPAPO_LT_ALIGN(lt) (PTR_ALIGN((lt), NFT_PIPAPO_ALIGN))
+#define NFT_PIPAPO_LT_ASSIGN(field, x) \
+ do { \
+ (field)->lt_aligned = NFT_PIPAPO_LT_ALIGN(x); \
+ (field)->lt = (x); \
+ } while (0)
+#else
+#define NFT_PIPAPO_ALIGN_HEADROOM 0
+#define NFT_PIPAPO_LT_ALIGN(lt) (lt)
+#define NFT_PIPAPO_LT_ASSIGN(field, x) ((field)->lt = (x))
+#endif /* NFT_PIPAPO_ALIGN */
+
+#define nft_pipapo_for_each_field(field, index, match) \
+ for ((field) = (match)->f, (index) = 0; \
+ (index) < (match)->field_count; \
+ (index)++, (field)++)
+
+/**
+ * union nft_pipapo_map_bucket - Bucket of mapping table
+ * @to: First rule number (in next field) this rule maps to
+ * @n: Number of rules (in next field) this rule maps to
+ * @e: If there's no next field, pointer to element this rule maps to
+ */
+union nft_pipapo_map_bucket {
+ struct {
+#if BITS_PER_LONG == 64
+ static_assert(NFT_PIPAPO_MAP_TOBITS <= 32);
+ u32 to;
+
+ static_assert(NFT_PIPAPO_MAP_NBITS <= 32);
+ u32 n;
+#else
+ unsigned long to:NFT_PIPAPO_MAP_TOBITS;
+ unsigned long n:NFT_PIPAPO_MAP_NBITS;
+#endif
+ };
+ struct nft_pipapo_elem *e;
+};
+
+/**
+ * struct nft_pipapo_field - Lookup, mapping tables and related data for a field
+ * @groups: Amount of bit groups
+ * @rules: Number of inserted rules
+ * @bsize: Size of each bucket in lookup table, in longs
+ * @bb: Number of bits grouped together in lookup table buckets
+ * @lt: Lookup table: 'groups' rows of buckets
+ * @lt_aligned: Version of @lt aligned to NFT_PIPAPO_ALIGN bytes
+ * @mt: Mapping table: one bucket per rule
+ */
+struct nft_pipapo_field {
+ int groups;
+ unsigned long rules;
+ size_t bsize;
+ int bb;
+#ifdef NFT_PIPAPO_ALIGN
+ unsigned long *lt_aligned;
+#endif
+ unsigned long *lt;
+ union nft_pipapo_map_bucket *mt;
+};
+
+/**
+ * struct nft_pipapo_match - Data used for lookup and matching
+ * @field_count Amount of fields in set
+ * @scratch: Preallocated per-CPU maps for partial matching results
+ * @scratch_aligned: Version of @scratch aligned to NFT_PIPAPO_ALIGN bytes
+ * @bsize_max: Maximum lookup table bucket size of all fields, in longs
+ * @rcu Matching data is swapped on commits
+ * @f: Fields, with lookup and mapping tables
+ */
+struct nft_pipapo_match {
+ int field_count;
+#ifdef NFT_PIPAPO_ALIGN
+ unsigned long * __percpu *scratch_aligned;
+#endif
+ unsigned long * __percpu *scratch;
+ size_t bsize_max;
+ struct rcu_head rcu;
+ struct nft_pipapo_field f[];
+};
+
+/**
+ * struct nft_pipapo - Representation of a set
+ * @match: Currently in-use matching data
+ * @clone: Copy where pending insertions and deletions are kept
+ * @width: Total bytes to be matched for one packet, including padding
+ * @dirty: Working copy has pending insertions or deletions
+ * @last_gc: Timestamp of last garbage collection run, jiffies
+ */
+struct nft_pipapo {
+ struct nft_pipapo_match __rcu *match;
+ struct nft_pipapo_match *clone;
+ int width;
+ bool dirty;
+ unsigned long last_gc;
+};
+
+struct nft_pipapo_elem;
+
+/**
+ * struct nft_pipapo_elem - API-facing representation of single set element
+ * @ext: nftables API extensions
+ */
+struct nft_pipapo_elem {
+ struct nft_set_ext ext;
+};
+
+int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
+ union nft_pipapo_map_bucket *mt, bool match_only);
+
+/**
+ * pipapo_and_field_buckets_4bit() - Intersect 4-bit buckets
+ * @f: Field including lookup table
+ * @dst: Area to store result
+ * @data: Input data selecting table buckets
+ */
+static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
+ unsigned long *dst,
+ const u8 *data)
+{
+ unsigned long *lt = NFT_PIPAPO_LT_ALIGN(f->lt);
+ int group;
+
+ for (group = 0; group < f->groups; group += BITS_PER_BYTE / 4, data++) {
+ u8 v;
+
+ v = *data >> 4;
+ __bitmap_and(dst, dst, lt + v * f->bsize,
+ f->bsize * BITS_PER_LONG);
+ lt += f->bsize * NFT_PIPAPO_BUCKETS(4);
+
+ v = *data & 0x0f;
+ __bitmap_and(dst, dst, lt + v * f->bsize,
+ f->bsize * BITS_PER_LONG);
+ lt += f->bsize * NFT_PIPAPO_BUCKETS(4);
+ }
+}
+
+/**
+ * pipapo_and_field_buckets_8bit() - Intersect 8-bit buckets
+ * @f: Field including lookup table
+ * @dst: Area to store result
+ * @data: Input data selecting table buckets
+ */
+static inline void pipapo_and_field_buckets_8bit(struct nft_pipapo_field *f,
+ unsigned long *dst,
+ const u8 *data)
+{
+ unsigned long *lt = NFT_PIPAPO_LT_ALIGN(f->lt);
+ int group;
+
+ for (group = 0; group < f->groups; group++, data++) {
+ __bitmap_and(dst, dst, lt + *data * f->bsize,
+ f->bsize * BITS_PER_LONG);
+ lt += f->bsize * NFT_PIPAPO_BUCKETS(8);
+ }
+}
+
+/**
+ * pipapo_estimate_size() - Estimate worst-case for set size
+ * @desc: Set description, element count and field description used here
+ *
+ * The size for this set type can vary dramatically, as it depends on the number
+ * of rules (composing netmasks) the entries expand to. We compute the worst
+ * case here.
+ *
+ * In general, for a non-ranged entry or a single composing netmask, we need
+ * one bit in each of the sixteen NFT_PIPAPO_BUCKETS, for each 4-bit group (that
+ * is, each input bit needs four bits of matching data), plus a bucket in the
+ * mapping table for each field.
+ *
+ * Return: worst-case set size in bytes, 0 on any overflow
+ */
+static u64 pipapo_estimate_size(const struct nft_set_desc *desc)
+{
+ unsigned long entry_size;
+ u64 size;
+ int i;
+
+ for (i = 0, entry_size = 0; i < desc->field_count; i++) {
+ unsigned long rules;
+
+ if (desc->field_len[i] > NFT_PIPAPO_MAX_BYTES)
+ return 0;
+
+ /* Worst-case ranges for each concatenated field: each n-bit
+ * field can expand to up to n * 2 rules in each bucket, and
+ * each rule also needs a mapping bucket.
+ */
+ rules = ilog2(desc->field_len[i] * BITS_PER_BYTE) * 2;
+ entry_size += rules *
+ NFT_PIPAPO_BUCKETS(NFT_PIPAPO_GROUP_BITS_INIT) /
+ BITS_PER_BYTE;
+ entry_size += rules * sizeof(union nft_pipapo_map_bucket);
+ }
+
+ /* Rules in lookup and mapping tables are needed for each entry */
+ size = desc->size * entry_size;
+ if (size && div_u64(size, desc->size) != entry_size)
+ return 0;
+
+ size += sizeof(struct nft_pipapo) + sizeof(struct nft_pipapo_match) * 2;
+
+ size += sizeof(struct nft_pipapo_field) * desc->field_count;
+
+ return size;
+}
+
+#endif /* _NFT_SET_PIPAPO_H */
diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
new file mode 100644
index 000000000000..d65ae0e23028
--- /dev/null
+++ b/net/netfilter/nft_set_pipapo_avx2.c
@@ -0,0 +1,1223 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* PIPAPO: PIle PAcket POlicies: AVX2 packet lookup routines
+ *
+ * Copyright (c) 2019-2020 Red Hat GmbH
+ *
+ * Author: Stefano Brivio <sbrivio@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <uapi/linux/netfilter/nf_tables.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+
+#include <linux/compiler.h>
+#include <asm/fpu/api.h>
+
+#include "nft_set_pipapo_avx2.h"
+#include "nft_set_pipapo.h"
+
+#define NFT_PIPAPO_LONGS_PER_M256 (XSAVE_YMM_SIZE / BITS_PER_LONG)
+
+/* Load from memory into YMM register with non-temporal hint ("stream load"),
+ * that is, don't fetch lines from memory into the cache. This avoids pushing
+ * precious packet data out of the cache hierarchy, and is appropriate when:
+ *
+ * - loading buckets from lookup tables, as they are not going to be used
+ * again before packets are entirely classified
+ *
+ * - loading the result bitmap from the previous field, as it's never used
+ * again
+ */
+#define NFT_PIPAPO_AVX2_LOAD(reg, loc) \
+ asm volatile("vmovntdqa %0, %%ymm" #reg : : "m" (loc))
+
+/* Stream a single lookup table bucket into YMM register given lookup table,
+ * group index, value of packet bits, bucket size.
+ */
+#define NFT_PIPAPO_AVX2_BUCKET_LOAD4(reg, lt, group, v, bsize) \
+ NFT_PIPAPO_AVX2_LOAD(reg, \
+ lt[((group) * NFT_PIPAPO_BUCKETS(4) + \
+ (v)) * (bsize)])
+#define NFT_PIPAPO_AVX2_BUCKET_LOAD8(reg, lt, group, v, bsize) \
+ NFT_PIPAPO_AVX2_LOAD(reg, \
+ lt[((group) * NFT_PIPAPO_BUCKETS(8) + \
+ (v)) * (bsize)])
+
+/* Bitwise AND: the staple operation of this algorithm */
+#define NFT_PIPAPO_AVX2_AND(dst, a, b) \
+ asm volatile("vpand %ymm" #a ", %ymm" #b ", %ymm" #dst)
+
+/* Jump to label if @reg is zero */
+#define NFT_PIPAPO_AVX2_NOMATCH_GOTO(reg, label) \
+ asm_volatile_goto("vptest %%ymm" #reg ", %%ymm" #reg ";" \
+ "je %l[" #label "]" : : : : label)
+
+/* Store 256 bits from YMM register into memory. Contrary to bucket load
+ * operation, we don't bypass the cache here, as stored matching results
+ * are always used shortly after.
+ */
+#define NFT_PIPAPO_AVX2_STORE(loc, reg) \
+ asm volatile("vmovdqa %%ymm" #reg ", %0" : "=m" (loc))
+
+/* Zero out a complete YMM register, @reg */
+#define NFT_PIPAPO_AVX2_ZERO(reg) \
+ asm volatile("vpxor %ymm" #reg ", %ymm" #reg ", %ymm" #reg)
+
+/* Current working bitmap index, toggled between field matches */
+static DEFINE_PER_CPU(bool, nft_pipapo_avx2_scratch_index);
+
+/**
+ * nft_pipapo_avx2_prepare() - Prepare before main algorithm body
+ *
+ * This zeroes out ymm15, which is later used whenever we need to clear a
+ * memory location, by storing its content into memory.
+ */
+static void nft_pipapo_avx2_prepare(void)
+{
+ NFT_PIPAPO_AVX2_ZERO(15);
+}
+
+/**
+ * nft_pipapo_avx2_fill() - Fill a bitmap region with ones
+ * @data: Base memory area
+ * @start: First bit to set
+ * @len: Count of bits to fill
+ *
+ * This is nothing else than a version of bitmap_set(), as used e.g. by
+ * pipapo_refill(), tailored for the microarchitectures using it and better
+ * suited for the specific usage: it's very likely that we'll set a small number
+ * of bits, not crossing a word boundary, and correct branch prediction is
+ * critical here.
+ *
+ * This function doesn't actually use any AVX2 instruction.
+ */
+static void nft_pipapo_avx2_fill(unsigned long *data, int start, int len)
+{
+ int offset = start % BITS_PER_LONG;
+ unsigned long mask;
+
+ data += start / BITS_PER_LONG;
+
+ if (likely(len == 1)) {
+ *data |= BIT(offset);
+ return;
+ }
+
+ if (likely(len < BITS_PER_LONG || offset)) {
+ if (likely(len + offset <= BITS_PER_LONG)) {
+ *data |= GENMASK(len - 1 + offset, offset);
+ return;
+ }
+
+ *data |= ~0UL << offset;
+ len -= BITS_PER_LONG - offset;
+ data++;
+
+ if (len <= BITS_PER_LONG) {
+ mask = ~0UL >> (BITS_PER_LONG - len);
+ *data |= mask;
+ return;
+ }
+ }
+
+ memset(data, 0xff, len / BITS_PER_BYTE);
+ data += len / BITS_PER_LONG;
+
+ len %= BITS_PER_LONG;
+ if (len)
+ *data |= ~0UL >> (BITS_PER_LONG - len);
+}
+
+/**
+ * nft_pipapo_avx2_refill() - Scan bitmap, select mapping table item, set bits
+ * @offset: Start from given bitmap (equivalent to bucket) offset, in longs
+ * @map: Bitmap to be scanned for set bits
+ * @dst: Destination bitmap
+ * @mt: Mapping table containing bit set specifiers
+ * @len: Length of bitmap in longs
+ * @last: Return index of first set bit, if this is the last field
+ *
+ * This is an alternative implementation of pipapo_refill() suitable for usage
+ * with AVX2 lookup routines: we know there are four words to be scanned, at
+ * a given offset inside the map, for each matching iteration.
+ *
+ * This function doesn't actually use any AVX2 instruction.
+ *
+ * Return: first set bit index if @last, index of first filled word otherwise.
+ */
+static int nft_pipapo_avx2_refill(int offset, unsigned long *map,
+ unsigned long *dst,
+ union nft_pipapo_map_bucket *mt, bool last)
+{
+ int ret = -1;
+
+#define NFT_PIPAPO_AVX2_REFILL_ONE_WORD(x) \
+ do { \
+ while (map[(x)]) { \
+ int r = __builtin_ctzl(map[(x)]); \
+ int i = (offset + (x)) * BITS_PER_LONG + r; \
+ \
+ if (last) \
+ return i; \
+ \
+ nft_pipapo_avx2_fill(dst, mt[i].to, mt[i].n); \
+ \
+ if (ret == -1) \
+ ret = mt[i].to; \
+ \
+ map[(x)] &= ~(1UL << r); \
+ } \
+ } while (0)
+
+ NFT_PIPAPO_AVX2_REFILL_ONE_WORD(0);
+ NFT_PIPAPO_AVX2_REFILL_ONE_WORD(1);
+ NFT_PIPAPO_AVX2_REFILL_ONE_WORD(2);
+ NFT_PIPAPO_AVX2_REFILL_ONE_WORD(3);
+#undef NFT_PIPAPO_AVX2_REFILL_ONE_WORD
+
+ return ret;
+}
+
+/**
+ * nft_pipapo_avx2_lookup_4b_2() - AVX2-based lookup for 2 four-bit groups
+ * @map: Previous match result, used as initial bitmap
+ * @fill: Destination bitmap to be filled with current match result
+ * @f: Field, containing lookup and mapping tables
+ * @offset: Ignore buckets before the given index, no bits are filled there
+ * @pkt: Packet data, pointer to input nftables register
+ * @first: If this is the first field, don't source previous result
+ * @last: Last field: stop at the first match and return bit index
+ *
+ * Load buckets from lookup table corresponding to the values of each 4-bit
+ * group of packet bytes, and perform a bitwise intersection between them. If
+ * this is the first field in the set, simply AND the buckets together
+ * (equivalent to using an all-ones starting bitmap), use the provided starting
+ * bitmap otherwise. Then call nft_pipapo_avx2_refill() to generate the next
+ * working bitmap, @fill.
+ *
+ * This is used for 8-bit fields (i.e. protocol numbers).
+ *
+ * Out-of-order (and superscalar) execution is vital here, so it's critical to
+ * avoid false data dependencies. CPU and compiler could (mostly) take care of
+ * this on their own, but the operation ordering is explicitly given here with
+ * a likely execution order in mind, to highlight possible stalls. That's why
+ * a number of logically distinct operations (i.e. loading buckets, intersecting
+ * buckets) are interleaved.
+ *
+ * Return: -1 on no match, rule index of match if @last, otherwise first long
+ * word index to be checked next (i.e. first filled word).
+ */
+static int nft_pipapo_avx2_lookup_4b_2(unsigned long *map, unsigned long *fill,
+ struct nft_pipapo_field *f, int offset,
+ const u8 *pkt, bool first, bool last)
+{
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ u8 pg[2] = { pkt[0] >> 4, pkt[0] & 0xf };
+ unsigned long *lt = f->lt, bsize = f->bsize;
+
+ lt += offset * NFT_PIPAPO_LONGS_PER_M256;
+ for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
+ int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
+
+ if (first) {
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(0, lt, 0, pg[0], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(1, lt, 1, pg[1], bsize);
+ NFT_PIPAPO_AVX2_AND(4, 0, 1);
+ } else {
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(0, lt, 0, pg[0], bsize);
+ NFT_PIPAPO_AVX2_LOAD(2, map[i_ul]);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(1, lt, 1, pg[1], bsize);
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(2, nothing);
+ NFT_PIPAPO_AVX2_AND(3, 0, 1);
+ NFT_PIPAPO_AVX2_AND(4, 2, 3);
+ }
+
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(4, nomatch);
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 4);
+
+ b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
+ if (last)
+ return b;
+
+ if (unlikely(ret == -1))
+ ret = b / XSAVE_YMM_SIZE;
+
+ continue;
+nomatch:
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
+nothing:
+ ;
+ }
+
+ return ret;
+}
+
+/**
+ * nft_pipapo_avx2_lookup_4b_4() - AVX2-based lookup for 4 four-bit groups
+ * @map: Previous match result, used as initial bitmap
+ * @fill: Destination bitmap to be filled with current match result
+ * @f: Field, containing lookup and mapping tables
+ * @offset: Ignore buckets before the given index, no bits are filled there
+ * @pkt: Packet data, pointer to input nftables register
+ * @first: If this is the first field, don't source previous result
+ * @last: Last field: stop at the first match and return bit index
+ *
+ * See nft_pipapo_avx2_lookup_4b_2().
+ *
+ * This is used for 16-bit fields (i.e. ports).
+ *
+ * Return: -1 on no match, rule index of match if @last, otherwise first long
+ * word index to be checked next (i.e. first filled word).
+ */
+static int nft_pipapo_avx2_lookup_4b_4(unsigned long *map, unsigned long *fill,
+ struct nft_pipapo_field *f, int offset,
+ const u8 *pkt, bool first, bool last)
+{
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ u8 pg[4] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf };
+ unsigned long *lt = f->lt, bsize = f->bsize;
+
+ lt += offset * NFT_PIPAPO_LONGS_PER_M256;
+ for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
+ int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
+
+ if (first) {
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(0, lt, 0, pg[0], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(1, lt, 1, pg[1], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(2, lt, 2, pg[2], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 3, pg[3], bsize);
+ NFT_PIPAPO_AVX2_AND(4, 0, 1);
+ NFT_PIPAPO_AVX2_AND(5, 2, 3);
+ NFT_PIPAPO_AVX2_AND(7, 4, 5);
+ } else {
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(0, lt, 0, pg[0], bsize);
+
+ NFT_PIPAPO_AVX2_LOAD(1, map[i_ul]);
+
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(2, lt, 1, pg[1], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 2, pg[2], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(4, lt, 3, pg[3], bsize);
+ NFT_PIPAPO_AVX2_AND(5, 0, 1);
+
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(1, nothing);
+
+ NFT_PIPAPO_AVX2_AND(6, 2, 3);
+ NFT_PIPAPO_AVX2_AND(7, 4, 5);
+ /* Stall */
+ NFT_PIPAPO_AVX2_AND(7, 6, 7);
+ }
+
+ /* Stall */
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(7, nomatch);
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 7);
+
+ b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
+ if (last)
+ return b;
+
+ if (unlikely(ret == -1))
+ ret = b / XSAVE_YMM_SIZE;
+
+ continue;
+nomatch:
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
+nothing:
+ ;
+ }
+
+ return ret;
+}
+
+/**
+ * nft_pipapo_avx2_lookup_4b_8() - AVX2-based lookup for 8 four-bit groups
+ * @map: Previous match result, used as initial bitmap
+ * @fill: Destination bitmap to be filled with current match result
+ * @f: Field, containing lookup and mapping tables
+ * @offset: Ignore buckets before the given index, no bits are filled there
+ * @pkt: Packet data, pointer to input nftables register
+ * @first: If this is the first field, don't source previous result
+ * @last: Last field: stop at the first match and return bit index
+ *
+ * See nft_pipapo_avx2_lookup_4b_2().
+ *
+ * This is used for 32-bit fields (i.e. IPv4 addresses).
+ *
+ * Return: -1 on no match, rule index of match if @last, otherwise first long
+ * word index to be checked next (i.e. first filled word).
+ */
+static int nft_pipapo_avx2_lookup_4b_8(unsigned long *map, unsigned long *fill,
+ struct nft_pipapo_field *f, int offset,
+ const u8 *pkt, bool first, bool last)
+{
+ u8 pg[8] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
+ pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
+ };
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ unsigned long *lt = f->lt, bsize = f->bsize;
+
+ lt += offset * NFT_PIPAPO_LONGS_PER_M256;
+ for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
+ int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
+
+ if (first) {
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(0, lt, 0, pg[0], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(1, lt, 1, pg[1], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(2, lt, 2, pg[2], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 3, pg[3], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(4, lt, 4, pg[4], bsize);
+ NFT_PIPAPO_AVX2_AND(5, 0, 1);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(6, lt, 5, pg[5], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(7, lt, 6, pg[6], bsize);
+ NFT_PIPAPO_AVX2_AND(8, 2, 3);
+ NFT_PIPAPO_AVX2_AND(9, 4, 5);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(10, lt, 7, pg[7], bsize);
+ NFT_PIPAPO_AVX2_AND(11, 6, 7);
+ NFT_PIPAPO_AVX2_AND(12, 8, 9);
+ NFT_PIPAPO_AVX2_AND(13, 10, 11);
+
+ /* Stall */
+ NFT_PIPAPO_AVX2_AND(1, 12, 13);
+ } else {
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(0, lt, 0, pg[0], bsize);
+ NFT_PIPAPO_AVX2_LOAD(1, map[i_ul]);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(2, lt, 1, pg[1], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 2, pg[2], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(4, lt, 3, pg[3], bsize);
+
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(1, nothing);
+
+ NFT_PIPAPO_AVX2_AND(5, 0, 1);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(6, lt, 4, pg[4], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(7, lt, 5, pg[5], bsize);
+ NFT_PIPAPO_AVX2_AND(8, 2, 3);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(9, lt, 6, pg[6], bsize);
+ NFT_PIPAPO_AVX2_AND(10, 4, 5);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(11, lt, 7, pg[7], bsize);
+ NFT_PIPAPO_AVX2_AND(12, 6, 7);
+ NFT_PIPAPO_AVX2_AND(13, 8, 9);
+ NFT_PIPAPO_AVX2_AND(14, 10, 11);
+
+ /* Stall */
+ NFT_PIPAPO_AVX2_AND(1, 12, 13);
+ NFT_PIPAPO_AVX2_AND(1, 1, 14);
+ }
+
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(1, nomatch);
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 1);
+
+ b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
+ if (last)
+ return b;
+
+ if (unlikely(ret == -1))
+ ret = b / XSAVE_YMM_SIZE;
+
+ continue;
+
+nomatch:
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
+nothing:
+ ;
+ }
+
+ return ret;
+}
+
+/**
+ * nft_pipapo_avx2_lookup_4b_12() - AVX2-based lookup for 12 four-bit groups
+ * @map: Previous match result, used as initial bitmap
+ * @fill: Destination bitmap to be filled with current match result
+ * @f: Field, containing lookup and mapping tables
+ * @offset: Ignore buckets before the given index, no bits are filled there
+ * @pkt: Packet data, pointer to input nftables register
+ * @first: If this is the first field, don't source previous result
+ * @last: Last field: stop at the first match and return bit index
+ *
+ * See nft_pipapo_avx2_lookup_4b_2().
+ *
+ * This is used for 48-bit fields (i.e. MAC addresses/EUI-48).
+ *
+ * Return: -1 on no match, rule index of match if @last, otherwise first long
+ * word index to be checked next (i.e. first filled word).
+ */
+static int nft_pipapo_avx2_lookup_4b_12(unsigned long *map, unsigned long *fill,
+ struct nft_pipapo_field *f, int offset,
+ const u8 *pkt, bool first, bool last)
+{
+ u8 pg[12] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
+ pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
+ pkt[4] >> 4, pkt[4] & 0xf, pkt[5] >> 4, pkt[5] & 0xf,
+ };
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ unsigned long *lt = f->lt, bsize = f->bsize;
+
+ lt += offset * NFT_PIPAPO_LONGS_PER_M256;
+ for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
+ int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
+
+ if (!first)
+ NFT_PIPAPO_AVX2_LOAD(0, map[i_ul]);
+
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(1, lt, 0, pg[0], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(2, lt, 1, pg[1], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 2, pg[2], bsize);
+
+ if (!first) {
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(0, nothing);
+ NFT_PIPAPO_AVX2_AND(1, 1, 0);
+ }
+
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(4, lt, 3, pg[3], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(5, lt, 4, pg[4], bsize);
+ NFT_PIPAPO_AVX2_AND(6, 2, 3);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(7, lt, 5, pg[5], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(8, lt, 6, pg[6], bsize);
+ NFT_PIPAPO_AVX2_AND(9, 1, 4);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(10, lt, 7, pg[7], bsize);
+ NFT_PIPAPO_AVX2_AND(11, 5, 6);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(12, lt, 8, pg[8], bsize);
+ NFT_PIPAPO_AVX2_AND(13, 7, 8);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(14, lt, 9, pg[9], bsize);
+
+ NFT_PIPAPO_AVX2_AND(0, 9, 10);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(1, lt, 10, pg[10], bsize);
+ NFT_PIPAPO_AVX2_AND(2, 11, 12);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 11, pg[11], bsize);
+ NFT_PIPAPO_AVX2_AND(4, 13, 14);
+ NFT_PIPAPO_AVX2_AND(5, 0, 1);
+
+ NFT_PIPAPO_AVX2_AND(6, 2, 3);
+
+ /* Stalls */
+ NFT_PIPAPO_AVX2_AND(7, 4, 5);
+ NFT_PIPAPO_AVX2_AND(8, 6, 7);
+
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(8, nomatch);
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 8);
+
+ b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
+ if (last)
+ return b;
+
+ if (unlikely(ret == -1))
+ ret = b / XSAVE_YMM_SIZE;
+
+ continue;
+nomatch:
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
+nothing:
+ ;
+ }
+
+ return ret;
+}
+
+/**
+ * nft_pipapo_avx2_lookup_4b_32() - AVX2-based lookup for 32 four-bit groups
+ * @map: Previous match result, used as initial bitmap
+ * @fill: Destination bitmap to be filled with current match result
+ * @f: Field, containing lookup and mapping tables
+ * @offset: Ignore buckets before the given index, no bits are filled there
+ * @pkt: Packet data, pointer to input nftables register
+ * @first: If this is the first field, don't source previous result
+ * @last: Last field: stop at the first match and return bit index
+ *
+ * See nft_pipapo_avx2_lookup_4b_2().
+ *
+ * This is used for 128-bit fields (i.e. IPv6 addresses).
+ *
+ * Return: -1 on no match, rule index of match if @last, otherwise first long
+ * word index to be checked next (i.e. first filled word).
+ */
+static int nft_pipapo_avx2_lookup_4b_32(unsigned long *map, unsigned long *fill,
+ struct nft_pipapo_field *f, int offset,
+ const u8 *pkt, bool first, bool last)
+{
+ u8 pg[32] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
+ pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
+ pkt[4] >> 4, pkt[4] & 0xf, pkt[5] >> 4, pkt[5] & 0xf,
+ pkt[6] >> 4, pkt[6] & 0xf, pkt[7] >> 4, pkt[7] & 0xf,
+ pkt[8] >> 4, pkt[8] & 0xf, pkt[9] >> 4, pkt[9] & 0xf,
+ pkt[10] >> 4, pkt[10] & 0xf, pkt[11] >> 4, pkt[11] & 0xf,
+ pkt[12] >> 4, pkt[12] & 0xf, pkt[13] >> 4, pkt[13] & 0xf,
+ pkt[14] >> 4, pkt[14] & 0xf, pkt[15] >> 4, pkt[15] & 0xf,
+ };
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ unsigned long *lt = f->lt, bsize = f->bsize;
+
+ lt += offset * NFT_PIPAPO_LONGS_PER_M256;
+ for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
+ int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
+
+ if (!first)
+ NFT_PIPAPO_AVX2_LOAD(0, map[i_ul]);
+
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(1, lt, 0, pg[0], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(2, lt, 1, pg[1], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 2, pg[2], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(4, lt, 3, pg[3], bsize);
+ if (!first) {
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(0, nothing);
+ NFT_PIPAPO_AVX2_AND(1, 1, 0);
+ }
+
+ NFT_PIPAPO_AVX2_AND(5, 2, 3);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(6, lt, 4, pg[4], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(7, lt, 5, pg[5], bsize);
+ NFT_PIPAPO_AVX2_AND(8, 1, 4);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(9, lt, 6, pg[6], bsize);
+ NFT_PIPAPO_AVX2_AND(10, 5, 6);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(11, lt, 7, pg[7], bsize);
+ NFT_PIPAPO_AVX2_AND(12, 7, 8);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(13, lt, 8, pg[8], bsize);
+ NFT_PIPAPO_AVX2_AND(14, 9, 10);
+
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(0, lt, 9, pg[9], bsize);
+ NFT_PIPAPO_AVX2_AND(1, 11, 12);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(2, lt, 10, pg[10], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 11, pg[11], bsize);
+ NFT_PIPAPO_AVX2_AND(4, 13, 14);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(5, lt, 12, pg[12], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(6, lt, 13, pg[13], bsize);
+ NFT_PIPAPO_AVX2_AND(7, 0, 1);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(8, lt, 14, pg[14], bsize);
+ NFT_PIPAPO_AVX2_AND(9, 2, 3);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(10, lt, 15, pg[15], bsize);
+ NFT_PIPAPO_AVX2_AND(11, 4, 5);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(12, lt, 16, pg[16], bsize);
+ NFT_PIPAPO_AVX2_AND(13, 6, 7);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(14, lt, 17, pg[17], bsize);
+
+ NFT_PIPAPO_AVX2_AND(0, 8, 9);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(1, lt, 18, pg[18], bsize);
+ NFT_PIPAPO_AVX2_AND(2, 10, 11);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 19, pg[19], bsize);
+ NFT_PIPAPO_AVX2_AND(4, 12, 13);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(5, lt, 20, pg[20], bsize);
+ NFT_PIPAPO_AVX2_AND(6, 14, 0);
+ NFT_PIPAPO_AVX2_AND(7, 1, 2);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(8, lt, 21, pg[21], bsize);
+ NFT_PIPAPO_AVX2_AND(9, 3, 4);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(10, lt, 22, pg[22], bsize);
+ NFT_PIPAPO_AVX2_AND(11, 5, 6);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(12, lt, 23, pg[23], bsize);
+ NFT_PIPAPO_AVX2_AND(13, 7, 8);
+
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(14, lt, 24, pg[24], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(0, lt, 25, pg[25], bsize);
+ NFT_PIPAPO_AVX2_AND(1, 9, 10);
+ NFT_PIPAPO_AVX2_AND(2, 11, 12);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(3, lt, 26, pg[26], bsize);
+ NFT_PIPAPO_AVX2_AND(4, 13, 14);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(5, lt, 27, pg[27], bsize);
+ NFT_PIPAPO_AVX2_AND(6, 0, 1);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(7, lt, 28, pg[28], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(8, lt, 29, pg[29], bsize);
+ NFT_PIPAPO_AVX2_AND(9, 2, 3);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(10, lt, 30, pg[30], bsize);
+ NFT_PIPAPO_AVX2_AND(11, 4, 5);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD4(12, lt, 31, pg[31], bsize);
+
+ NFT_PIPAPO_AVX2_AND(0, 6, 7);
+ NFT_PIPAPO_AVX2_AND(1, 8, 9);
+ NFT_PIPAPO_AVX2_AND(2, 10, 11);
+ NFT_PIPAPO_AVX2_AND(3, 12, 0);
+
+ /* Stalls */
+ NFT_PIPAPO_AVX2_AND(4, 1, 2);
+ NFT_PIPAPO_AVX2_AND(5, 3, 4);
+
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(5, nomatch);
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 5);
+
+ b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
+ if (last)
+ return b;
+
+ if (unlikely(ret == -1))
+ ret = b / XSAVE_YMM_SIZE;
+
+ continue;
+nomatch:
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
+nothing:
+ ;
+ }
+
+ return ret;
+}
+
+/**
+ * nft_pipapo_avx2_lookup_8b_1() - AVX2-based lookup for one eight-bit group
+ * @map: Previous match result, used as initial bitmap
+ * @fill: Destination bitmap to be filled with current match result
+ * @f: Field, containing lookup and mapping tables
+ * @offset: Ignore buckets before the given index, no bits are filled there
+ * @pkt: Packet data, pointer to input nftables register
+ * @first: If this is the first field, don't source previous result
+ * @last: Last field: stop at the first match and return bit index
+ *
+ * See nft_pipapo_avx2_lookup_4b_2().
+ *
+ * This is used for 8-bit fields (i.e. protocol numbers).
+ *
+ * Return: -1 on no match, rule index of match if @last, otherwise first long
+ * word index to be checked next (i.e. first filled word).
+ */
+static int nft_pipapo_avx2_lookup_8b_1(unsigned long *map, unsigned long *fill,
+ struct nft_pipapo_field *f, int offset,
+ const u8 *pkt, bool first, bool last)
+{
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ unsigned long *lt = f->lt, bsize = f->bsize;
+
+ lt += offset * NFT_PIPAPO_LONGS_PER_M256;
+ for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
+ int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
+
+ if (first) {
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(2, lt, 0, pkt[0], bsize);
+ } else {
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(0, lt, 0, pkt[0], bsize);
+ NFT_PIPAPO_AVX2_LOAD(1, map[i_ul]);
+ NFT_PIPAPO_AVX2_AND(2, 0, 1);
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(1, nothing);
+ }
+
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(2, nomatch);
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 2);
+
+ b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
+ if (last)
+ return b;
+
+ if (unlikely(ret == -1))
+ ret = b / XSAVE_YMM_SIZE;
+
+ continue;
+nomatch:
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
+nothing:
+ ;
+ }
+
+ return ret;
+}
+
+/**
+ * nft_pipapo_avx2_lookup_8b_2() - AVX2-based lookup for 2 eight-bit groups
+ * @map: Previous match result, used as initial bitmap
+ * @fill: Destination bitmap to be filled with current match result
+ * @f: Field, containing lookup and mapping tables
+ * @offset: Ignore buckets before the given index, no bits are filled there
+ * @pkt: Packet data, pointer to input nftables register
+ * @first: If this is the first field, don't source previous result
+ * @last: Last field: stop at the first match and return bit index
+ *
+ * See nft_pipapo_avx2_lookup_4b_2().
+ *
+ * This is used for 16-bit fields (i.e. ports).
+ *
+ * Return: -1 on no match, rule index of match if @last, otherwise first long
+ * word index to be checked next (i.e. first filled word).
+ */
+static int nft_pipapo_avx2_lookup_8b_2(unsigned long *map, unsigned long *fill,
+ struct nft_pipapo_field *f, int offset,
+ const u8 *pkt, bool first, bool last)
+{
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ unsigned long *lt = f->lt, bsize = f->bsize;
+
+ lt += offset * NFT_PIPAPO_LONGS_PER_M256;
+ for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
+ int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
+
+ if (first) {
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(0, lt, 0, pkt[0], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 1, pkt[1], bsize);
+ NFT_PIPAPO_AVX2_AND(4, 0, 1);
+ } else {
+ NFT_PIPAPO_AVX2_LOAD(0, map[i_ul]);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 0, pkt[0], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(2, lt, 1, pkt[1], bsize);
+
+ /* Stall */
+ NFT_PIPAPO_AVX2_AND(3, 0, 1);
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(0, nothing);
+ NFT_PIPAPO_AVX2_AND(4, 3, 2);
+ }
+
+ /* Stall */
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(4, nomatch);
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 4);
+
+ b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
+ if (last)
+ return b;
+
+ if (unlikely(ret == -1))
+ ret = b / XSAVE_YMM_SIZE;
+
+ continue;
+nomatch:
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
+nothing:
+ ;
+ }
+
+ return ret;
+}
+
+/**
+ * nft_pipapo_avx2_lookup_8b_4() - AVX2-based lookup for 4 eight-bit groups
+ * @map: Previous match result, used as initial bitmap
+ * @fill: Destination bitmap to be filled with current match result
+ * @f: Field, containing lookup and mapping tables
+ * @offset: Ignore buckets before the given index, no bits are filled there
+ * @pkt: Packet data, pointer to input nftables register
+ * @first: If this is the first field, don't source previous result
+ * @last: Last field: stop at the first match and return bit index
+ *
+ * See nft_pipapo_avx2_lookup_4b_2().
+ *
+ * This is used for 32-bit fields (i.e. IPv4 addresses).
+ *
+ * Return: -1 on no match, rule index of match if @last, otherwise first long
+ * word index to be checked next (i.e. first filled word).
+ */
+static int nft_pipapo_avx2_lookup_8b_4(unsigned long *map, unsigned long *fill,
+ struct nft_pipapo_field *f, int offset,
+ const u8 *pkt, bool first, bool last)
+{
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ unsigned long *lt = f->lt, bsize = f->bsize;
+
+ lt += offset * NFT_PIPAPO_LONGS_PER_M256;
+ for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
+ int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
+
+ if (first) {
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(0, lt, 0, pkt[0], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 1, pkt[1], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(2, lt, 2, pkt[2], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 3, pkt[3], bsize);
+
+ /* Stall */
+ NFT_PIPAPO_AVX2_AND(4, 0, 1);
+ NFT_PIPAPO_AVX2_AND(5, 2, 3);
+ NFT_PIPAPO_AVX2_AND(0, 4, 5);
+ } else {
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(0, lt, 0, pkt[0], bsize);
+ NFT_PIPAPO_AVX2_LOAD(1, map[i_ul]);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(2, lt, 1, pkt[1], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 2, pkt[2], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(4, lt, 3, pkt[3], bsize);
+
+ NFT_PIPAPO_AVX2_AND(5, 0, 1);
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(1, nothing);
+ NFT_PIPAPO_AVX2_AND(6, 2, 3);
+
+ /* Stall */
+ NFT_PIPAPO_AVX2_AND(7, 4, 5);
+ NFT_PIPAPO_AVX2_AND(0, 6, 7);
+ }
+
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(0, nomatch);
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 0);
+
+ b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
+ if (last)
+ return b;
+
+ if (unlikely(ret == -1))
+ ret = b / XSAVE_YMM_SIZE;
+
+ continue;
+
+nomatch:
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
+nothing:
+ ;
+ }
+
+ return ret;
+}
+
+/**
+ * nft_pipapo_avx2_lookup_8b_6() - AVX2-based lookup for 6 eight-bit groups
+ * @map: Previous match result, used as initial bitmap
+ * @fill: Destination bitmap to be filled with current match result
+ * @f: Field, containing lookup and mapping tables
+ * @offset: Ignore buckets before the given index, no bits are filled there
+ * @pkt: Packet data, pointer to input nftables register
+ * @first: If this is the first field, don't source previous result
+ * @last: Last field: stop at the first match and return bit index
+ *
+ * See nft_pipapo_avx2_lookup_4b_2().
+ *
+ * This is used for 48-bit fields (i.e. MAC addresses/EUI-48).
+ *
+ * Return: -1 on no match, rule index of match if @last, otherwise first long
+ * word index to be checked next (i.e. first filled word).
+ */
+static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
+ struct nft_pipapo_field *f, int offset,
+ const u8 *pkt, bool first, bool last)
+{
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ unsigned long *lt = f->lt, bsize = f->bsize;
+
+ lt += offset * NFT_PIPAPO_LONGS_PER_M256;
+ for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
+ int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
+
+ if (first) {
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(0, lt, 0, pkt[0], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 1, pkt[1], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(2, lt, 2, pkt[2], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 3, pkt[3], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(4, lt, 4, pkt[4], bsize);
+
+ NFT_PIPAPO_AVX2_AND(5, 0, 1);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(6, lt, 6, pkt[5], bsize);
+ NFT_PIPAPO_AVX2_AND(7, 2, 3);
+
+ /* Stall */
+ NFT_PIPAPO_AVX2_AND(0, 4, 5);
+ NFT_PIPAPO_AVX2_AND(1, 6, 7);
+ NFT_PIPAPO_AVX2_AND(4, 0, 1);
+ } else {
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(0, lt, 0, pkt[0], bsize);
+ NFT_PIPAPO_AVX2_LOAD(1, map[i_ul]);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(2, lt, 1, pkt[1], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 2, pkt[2], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(4, lt, 3, pkt[3], bsize);
+
+ NFT_PIPAPO_AVX2_AND(5, 0, 1);
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(1, nothing);
+
+ NFT_PIPAPO_AVX2_AND(6, 2, 3);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(7, lt, 4, pkt[4], bsize);
+ NFT_PIPAPO_AVX2_AND(0, 4, 5);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 5, pkt[5], bsize);
+ NFT_PIPAPO_AVX2_AND(2, 6, 7);
+
+ /* Stall */
+ NFT_PIPAPO_AVX2_AND(3, 0, 1);
+ NFT_PIPAPO_AVX2_AND(4, 2, 3);
+ }
+
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(4, nomatch);
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 4);
+
+ b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
+ if (last)
+ return b;
+
+ if (unlikely(ret == -1))
+ ret = b / XSAVE_YMM_SIZE;
+
+ continue;
+
+nomatch:
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
+nothing:
+ ;
+ }
+
+ return ret;
+}
+
+/**
+ * nft_pipapo_avx2_lookup_8b_16() - AVX2-based lookup for 16 eight-bit groups
+ * @map: Previous match result, used as initial bitmap
+ * @fill: Destination bitmap to be filled with current match result
+ * @f: Field, containing lookup and mapping tables
+ * @offset: Ignore buckets before the given index, no bits are filled there
+ * @pkt: Packet data, pointer to input nftables register
+ * @first: If this is the first field, don't source previous result
+ * @last: Last field: stop at the first match and return bit index
+ *
+ * See nft_pipapo_avx2_lookup_4b_2().
+ *
+ * This is used for 128-bit fields (i.e. IPv6 addresses).
+ *
+ * Return: -1 on no match, rule index of match if @last, otherwise first long
+ * word index to be checked next (i.e. first filled word).
+ */
+static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
+ struct nft_pipapo_field *f, int offset,
+ const u8 *pkt, bool first, bool last)
+{
+ int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
+ unsigned long *lt = f->lt, bsize = f->bsize;
+
+ lt += offset * NFT_PIPAPO_LONGS_PER_M256;
+ for (i = offset; i < m256_size; i++, lt += NFT_PIPAPO_LONGS_PER_M256) {
+ int i_ul = i * NFT_PIPAPO_LONGS_PER_M256;
+
+ if (!first)
+ NFT_PIPAPO_AVX2_LOAD(0, map[i_ul]);
+
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 0, pkt[0], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(2, lt, 1, pkt[1], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 2, pkt[2], bsize);
+ if (!first) {
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(0, nothing);
+ NFT_PIPAPO_AVX2_AND(1, 1, 0);
+ }
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(4, lt, 3, pkt[3], bsize);
+
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(5, lt, 4, pkt[4], bsize);
+ NFT_PIPAPO_AVX2_AND(6, 1, 2);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(7, lt, 5, pkt[5], bsize);
+ NFT_PIPAPO_AVX2_AND(0, 3, 4);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 6, pkt[6], bsize);
+
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(2, lt, 7, pkt[7], bsize);
+ NFT_PIPAPO_AVX2_AND(3, 5, 6);
+ NFT_PIPAPO_AVX2_AND(4, 0, 1);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(5, lt, 8, pkt[8], bsize);
+
+ NFT_PIPAPO_AVX2_AND(6, 2, 3);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(7, lt, 9, pkt[9], bsize);
+ NFT_PIPAPO_AVX2_AND(0, 4, 5);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 10, pkt[10], bsize);
+ NFT_PIPAPO_AVX2_AND(2, 6, 7);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 11, pkt[11], bsize);
+ NFT_PIPAPO_AVX2_AND(4, 0, 1);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(5, lt, 12, pkt[12], bsize);
+ NFT_PIPAPO_AVX2_AND(6, 2, 3);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(7, lt, 13, pkt[13], bsize);
+ NFT_PIPAPO_AVX2_AND(0, 4, 5);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 14, pkt[14], bsize);
+ NFT_PIPAPO_AVX2_AND(2, 6, 7);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 15, pkt[15], bsize);
+ NFT_PIPAPO_AVX2_AND(4, 0, 1);
+
+ /* Stall */
+ NFT_PIPAPO_AVX2_AND(5, 2, 3);
+ NFT_PIPAPO_AVX2_AND(6, 4, 5);
+
+ NFT_PIPAPO_AVX2_NOMATCH_GOTO(6, nomatch);
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 6);
+
+ b = nft_pipapo_avx2_refill(i_ul, &map[i_ul], fill, f->mt, last);
+ if (last)
+ return b;
+
+ if (unlikely(ret == -1))
+ ret = b / XSAVE_YMM_SIZE;
+
+ continue;
+
+nomatch:
+ NFT_PIPAPO_AVX2_STORE(map[i_ul], 15);
+nothing:
+ ;
+ }
+
+ return ret;
+}
+
+/**
+ * nft_pipapo_avx2_lookup_slow() - Fallback function for uncommon field sizes
+ * @map: Previous match result, used as initial bitmap
+ * @fill: Destination bitmap to be filled with current match result
+ * @f: Field, containing lookup and mapping tables
+ * @offset: Ignore buckets before the given index, no bits are filled there
+ * @pkt: Packet data, pointer to input nftables register
+ * @first: If this is the first field, don't source previous result
+ * @last: Last field: stop at the first match and return bit index
+ *
+ * This function should never be called, but is provided for the case the field
+ * size doesn't match any of the known data types. Matching rate is
+ * substantially lower than AVX2 routines.
+ *
+ * Return: -1 on no match, rule index of match if @last, otherwise first long
+ * word index to be checked next (i.e. first filled word).
+ */
+static int nft_pipapo_avx2_lookup_slow(unsigned long *map, unsigned long *fill,
+ struct nft_pipapo_field *f, int offset,
+ const u8 *pkt, bool first, bool last)
+{
+ unsigned long *lt = f->lt, bsize = f->bsize;
+ int i, ret = -1, b;
+
+ lt += offset * NFT_PIPAPO_LONGS_PER_M256;
+
+ if (first)
+ memset(map, 0xff, bsize * sizeof(*map));
+
+ for (i = offset; i < bsize; i++) {
+ if (f->bb == 8)
+ pipapo_and_field_buckets_8bit(f, map, pkt);
+ else
+ pipapo_and_field_buckets_4bit(f, map, pkt);
+ NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4;
+
+ b = pipapo_refill(map, bsize, f->rules, fill, f->mt, last);
+
+ if (last)
+ return b;
+
+ if (ret == -1)
+ ret = b / XSAVE_YMM_SIZE;
+ }
+
+ return ret;
+}
+
+/**
+ * nft_pipapo_avx2_estimate() - Set size, space and lookup complexity
+ * @desc: Set description, element count and field description used
+ * @features: Flags: NFT_SET_INTERVAL needs to be there
+ * @est: Storage for estimation data
+ *
+ * Return: true if set is compatible and AVX2 available, false otherwise.
+ */
+bool nft_pipapo_avx2_estimate(const struct nft_set_desc *desc, u32 features,
+ struct nft_set_estimate *est)
+{
+ if (!(features & NFT_SET_INTERVAL) ||
+ desc->field_count < NFT_PIPAPO_MIN_FIELDS)
+ return false;
+
+ if (!boot_cpu_has(X86_FEATURE_AVX2) || !boot_cpu_has(X86_FEATURE_AVX))
+ return false;
+
+ est->size = pipapo_estimate_size(desc);
+ if (!est->size)
+ return false;
+
+ est->lookup = NFT_SET_CLASS_O_LOG_N;
+
+ est->space = NFT_SET_CLASS_O_N;
+
+ return true;
+}
+
+/**
+ * nft_pipapo_avx2_lookup() - Lookup function for AVX2 implementation
+ * @net: Network namespace
+ * @set: nftables API set representation
+ * @elem: nftables API element representation containing key data
+ * @ext: nftables API extension pointer, filled with matching reference
+ *
+ * For more details, see DOC: Theory of Operation in nft_set_pipapo.c.
+ *
+ * This implementation exploits the repetitive characteristic of the algorithm
+ * to provide a fast, vectorised version using the AVX2 SIMD instruction set.
+ *
+ * Return: true on match, false otherwise.
+ */
+bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext)
+{
+ struct nft_pipapo *priv = nft_set_priv(set);
+ unsigned long *res, *fill, *scratch;
+ u8 genmask = nft_genmask_cur(net);
+ const u8 *rp = (const u8 *)key;
+ struct nft_pipapo_match *m;
+ struct nft_pipapo_field *f;
+ bool map_index;
+ int i, ret = 0;
+
+ m = rcu_dereference(priv->match);
+
+ /* This also protects access to all data related to scratch maps */
+ kernel_fpu_begin();
+
+ scratch = *raw_cpu_ptr(m->scratch_aligned);
+ if (unlikely(!scratch)) {
+ kernel_fpu_end();
+ return false;
+ }
+ map_index = raw_cpu_read(nft_pipapo_avx2_scratch_index);
+
+ res = scratch + (map_index ? m->bsize_max : 0);
+ fill = scratch + (map_index ? 0 : m->bsize_max);
+
+ /* Starting map doesn't need to be set for this implementation */
+
+ nft_pipapo_avx2_prepare();
+
+next_match:
+ nft_pipapo_for_each_field(f, i, m) {
+ bool last = i == m->field_count - 1, first = !i;
+
+#define NFT_SET_PIPAPO_AVX2_LOOKUP(b, n) \
+ (ret = nft_pipapo_avx2_lookup_##b##b_##n(res, fill, f, \
+ ret, rp, \
+ first, last))
+
+ if (likely(f->bb == 8)) {
+ if (f->groups == 1) {
+ NFT_SET_PIPAPO_AVX2_LOOKUP(8, 1);
+ } else if (f->groups == 2) {
+ NFT_SET_PIPAPO_AVX2_LOOKUP(8, 2);
+ } else if (f->groups == 4) {
+ NFT_SET_PIPAPO_AVX2_LOOKUP(8, 4);
+ } else if (f->groups == 6) {
+ NFT_SET_PIPAPO_AVX2_LOOKUP(8, 6);
+ } else if (f->groups == 16) {
+ NFT_SET_PIPAPO_AVX2_LOOKUP(8, 16);
+ } else {
+ ret = nft_pipapo_avx2_lookup_slow(res, fill, f,
+ ret, rp,
+ first, last);
+ }
+ } else {
+ if (f->groups == 2) {
+ NFT_SET_PIPAPO_AVX2_LOOKUP(4, 2);
+ } else if (f->groups == 4) {
+ NFT_SET_PIPAPO_AVX2_LOOKUP(4, 4);
+ } else if (f->groups == 8) {
+ NFT_SET_PIPAPO_AVX2_LOOKUP(4, 8);
+ } else if (f->groups == 12) {
+ NFT_SET_PIPAPO_AVX2_LOOKUP(4, 12);
+ } else if (f->groups == 32) {
+ NFT_SET_PIPAPO_AVX2_LOOKUP(4, 32);
+ } else {
+ ret = nft_pipapo_avx2_lookup_slow(res, fill, f,
+ ret, rp,
+ first, last);
+ }
+ }
+ NFT_PIPAPO_GROUP_BITS_ARE_8_OR_4;
+
+#undef NFT_SET_PIPAPO_AVX2_LOOKUP
+
+ if (ret < 0)
+ goto out;
+
+ if (last) {
+ *ext = &f->mt[ret].e->ext;
+ if (unlikely(nft_set_elem_expired(*ext) ||
+ !nft_set_elem_active(*ext, genmask))) {
+ ret = 0;
+ goto next_match;
+ }
+
+ goto out;
+ }
+
+ swap(res, fill);
+ rp += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
+ }
+
+out:
+ if (i % 2)
+ raw_cpu_write(nft_pipapo_avx2_scratch_index, !map_index);
+ kernel_fpu_end();
+
+ return ret >= 0;
+}
diff --git a/net/netfilter/nft_set_pipapo_avx2.h b/net/netfilter/nft_set_pipapo_avx2.h
new file mode 100644
index 000000000000..396caf7bfca8
--- /dev/null
+++ b/net/netfilter/nft_set_pipapo_avx2.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _NFT_SET_PIPAPO_AVX2_H
+
+#ifdef CONFIG_AS_AVX2
+#include <asm/fpu/xstate.h>
+#define NFT_PIPAPO_ALIGN (XSAVE_YMM_SIZE / BITS_PER_BYTE)
+
+bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key, const struct nft_set_ext **ext);
+bool nft_pipapo_avx2_estimate(const struct nft_set_desc *desc, u32 features,
+ struct nft_set_estimate *est);
+#endif /* CONFIG_AS_AVX2 */
+
+#endif /* _NFT_SET_PIPAPO_AVX2_H */
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 8617fc16a1ed..3a5552e14f75 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -550,8 +550,7 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
return true;
}
-struct nft_set_type nft_set_rbtree_type __read_mostly = {
- .owner = THIS_MODULE,
+const struct nft_set_type nft_set_rbtree_type = {
.features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
.ops = {
.privsize = nft_rbtree_privsize,
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
index 764e88682a81..30be5787fbde 100644
--- a/net/netfilter/nft_tunnel.c
+++ b/net/netfilter/nft_tunnel.c
@@ -11,6 +11,7 @@
#include <net/ip_tunnels.h>
#include <net/vxlan.h>
#include <net/erspan.h>
+#include <net/geneve.h>
struct nft_tunnel {
enum nft_tunnel_keys key:8;
@@ -144,6 +145,7 @@ struct nft_tunnel_opts {
union {
struct vxlan_metadata vxlan;
struct erspan_metadata erspan;
+ u8 data[IP_TUNNEL_OPTS_MAX];
} u;
u32 len;
__be16 flags;
@@ -301,9 +303,53 @@ static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
return 0;
}
+static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
+ [NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 },
+ [NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 },
+ [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
+};
+
+static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
+ struct nft_tunnel_opts *opts)
+{
+ struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
+ struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
+ int err, data_len;
+
+ err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_GENEVE_MAX, attr,
+ nft_tunnel_opts_geneve_policy, NULL);
+ if (err < 0)
+ return err;
+
+ if (!tb[NFTA_TUNNEL_KEY_GENEVE_CLASS] ||
+ !tb[NFTA_TUNNEL_KEY_GENEVE_TYPE] ||
+ !tb[NFTA_TUNNEL_KEY_GENEVE_DATA])
+ return -EINVAL;
+
+ attr = tb[NFTA_TUNNEL_KEY_GENEVE_DATA];
+ data_len = nla_len(attr);
+ if (data_len % 4)
+ return -EINVAL;
+
+ opts->len += sizeof(*opt) + data_len;
+ if (opts->len > IP_TUNNEL_OPTS_MAX)
+ return -EINVAL;
+
+ memcpy(opt->opt_data, nla_data(attr), data_len);
+ opt->length = data_len / 4;
+ opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
+ opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
+ opts->flags = TUNNEL_GENEVE_OPT;
+
+ return 0;
+}
+
static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
+ [NFTA_TUNNEL_KEY_OPTS_UNSPEC] = {
+ .strict_start_type = NFTA_TUNNEL_KEY_OPTS_GENEVE },
[NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, },
[NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, },
+ [NFTA_TUNNEL_KEY_OPTS_GENEVE] = { .type = NLA_NESTED, },
};
static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
@@ -311,22 +357,43 @@ static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
struct ip_tunnel_info *info,
struct nft_tunnel_opts *opts)
{
- struct nlattr *tb[NFTA_TUNNEL_KEY_OPTS_MAX + 1];
- int err;
+ int err, rem, type = 0;
+ struct nlattr *nla;
- err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_OPTS_MAX, attr,
- nft_tunnel_opts_policy, NULL);
+ err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
+ nft_tunnel_opts_policy, NULL);
if (err < 0)
return err;
- if (tb[NFTA_TUNNEL_KEY_OPTS_VXLAN]) {
- err = nft_tunnel_obj_vxlan_init(tb[NFTA_TUNNEL_KEY_OPTS_VXLAN],
- opts);
- } else if (tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN]) {
- err = nft_tunnel_obj_erspan_init(tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN],
- opts);
- } else {
- return -EOPNOTSUPP;
+ nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
+ switch (nla_type(nla)) {
+ case NFTA_TUNNEL_KEY_OPTS_VXLAN:
+ if (type)
+ return -EINVAL;
+ err = nft_tunnel_obj_vxlan_init(nla, opts);
+ if (err)
+ return err;
+ type = TUNNEL_VXLAN_OPT;
+ break;
+ case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
+ if (type)
+ return -EINVAL;
+ err = nft_tunnel_obj_erspan_init(nla, opts);
+ if (err)
+ return err;
+ type = TUNNEL_ERSPAN_OPT;
+ break;
+ case NFTA_TUNNEL_KEY_OPTS_GENEVE:
+ if (type && type != TUNNEL_GENEVE_OPT)
+ return -EINVAL;
+ err = nft_tunnel_obj_geneve_init(nla, opts);
+ if (err)
+ return err;
+ type = TUNNEL_GENEVE_OPT;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
}
return err;
@@ -518,6 +585,25 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb,
break;
}
nla_nest_end(skb, inner);
+ } else if (opts->flags & TUNNEL_GENEVE_OPT) {
+ struct geneve_opt *opt;
+ int offset = 0;
+
+ inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE);
+ if (!inner)
+ goto failure;
+ while (opts->len > offset) {
+ opt = (struct geneve_opt *)opts->u.data + offset;
+ if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
+ opt->opt_class) ||
+ nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE,
+ opt->type) ||
+ nla_put(skb, NFTA_TUNNEL_KEY_GENEVE_DATA,
+ opt->length * 4, opt->opt_data))
+ goto inner_failure;
+ offset += sizeof(*opt) + opt->length * 4;
+ }
+ nla_nest_end(skb, inner);
}
nla_nest_end(skb, nest);
return 0;
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index f56d3ed93e56..75bd0e5dd312 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/timer.h>
+#include <linux/alarmtimer.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/netfilter.h>
@@ -30,6 +31,7 @@
struct idletimer_tg {
struct list_head entry;
+ struct alarm alarm;
struct timer_list timer;
struct work_struct work;
@@ -37,6 +39,7 @@ struct idletimer_tg {
struct device_attribute attr;
unsigned int refcnt;
+ u8 timer_type;
};
static LIST_HEAD(idletimer_tg_list);
@@ -62,20 +65,29 @@ static ssize_t idletimer_tg_show(struct device *dev,
{
struct idletimer_tg *timer;
unsigned long expires = 0;
+ struct timespec64 ktimespec = {};
+ long time_diff = 0;
mutex_lock(&list_mutex);
timer = __idletimer_tg_find_by_label(attr->attr.name);
- if (timer)
- expires = timer->timer.expires;
+ if (timer) {
+ if (timer->timer_type & XT_IDLETIMER_ALARM) {
+ ktime_t expires_alarm = alarm_expires_remaining(&timer->alarm);
+ ktimespec = ktime_to_timespec64(expires_alarm);
+ time_diff = ktimespec.tv_sec;
+ } else {
+ expires = timer->timer.expires;
+ time_diff = jiffies_to_msecs(expires - jiffies) / 1000;
+ }
+ }
mutex_unlock(&list_mutex);
- if (time_after(expires, jiffies))
- return sprintf(buf, "%u\n",
- jiffies_to_msecs(expires - jiffies) / 1000);
+ if (time_after(expires, jiffies) || ktimespec.tv_sec > 0)
+ return snprintf(buf, PAGE_SIZE, "%ld\n", time_diff);
- return sprintf(buf, "0\n");
+ return snprintf(buf, PAGE_SIZE, "0\n");
}
static void idletimer_tg_work(struct work_struct *work)
@@ -95,6 +107,16 @@ static void idletimer_tg_expired(struct timer_list *t)
schedule_work(&timer->work);
}
+static enum alarmtimer_restart idletimer_tg_alarmproc(struct alarm *alarm,
+ ktime_t now)
+{
+ struct idletimer_tg *timer = alarm->data;
+
+ pr_debug("alarm %s expired\n", timer->attr.attr.name);
+ schedule_work(&timer->work);
+ return ALARMTIMER_NORESTART;
+}
+
static int idletimer_check_sysfs_name(const char *name, unsigned int size)
{
int ret;
@@ -160,6 +182,68 @@ out:
return ret;
}
+static int idletimer_tg_create_v1(struct idletimer_tg_info_v1 *info)
+{
+ int ret;
+
+ info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
+ if (!info->timer) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = idletimer_check_sysfs_name(info->label, sizeof(info->label));
+ if (ret < 0)
+ goto out_free_timer;
+
+ sysfs_attr_init(&info->timer->attr.attr);
+ info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
+ if (!info->timer->attr.attr.name) {
+ ret = -ENOMEM;
+ goto out_free_timer;
+ }
+ info->timer->attr.attr.mode = 0444;
+ info->timer->attr.show = idletimer_tg_show;
+
+ ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr);
+ if (ret < 0) {
+ pr_debug("couldn't add file to sysfs");
+ goto out_free_attr;
+ }
+
+ /* notify userspace */
+ kobject_uevent(idletimer_tg_kobj,KOBJ_ADD);
+
+ list_add(&info->timer->entry, &idletimer_tg_list);
+ pr_debug("timer type value is %u", info->timer_type);
+ info->timer->timer_type = info->timer_type;
+ info->timer->refcnt = 1;
+
+ INIT_WORK(&info->timer->work, idletimer_tg_work);
+
+ if (info->timer->timer_type & XT_IDLETIMER_ALARM) {
+ ktime_t tout;
+ alarm_init(&info->timer->alarm, ALARM_BOOTTIME,
+ idletimer_tg_alarmproc);
+ info->timer->alarm.data = info->timer;
+ tout = ktime_set(info->timeout, 0);
+ alarm_start_relative(&info->timer->alarm, tout);
+ } else {
+ timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
+ mod_timer(&info->timer->timer,
+ msecs_to_jiffies(info->timeout * 1000) + jiffies);
+ }
+
+ return 0;
+
+out_free_attr:
+ kfree(info->timer->attr.attr.name);
+out_free_timer:
+ kfree(info->timer);
+out:
+ return ret;
+}
+
/*
* The actual xt_tables plugin.
*/
@@ -177,13 +261,30 @@ static unsigned int idletimer_tg_target(struct sk_buff *skb,
return XT_CONTINUE;
}
-static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
+/*
+ * The actual xt_tables plugin.
+ */
+static unsigned int idletimer_tg_target_v1(struct sk_buff *skb,
+ const struct xt_action_param *par)
{
- struct idletimer_tg_info *info = par->targinfo;
- int ret;
+ const struct idletimer_tg_info_v1 *info = par->targinfo;
- pr_debug("checkentry targinfo%s\n", info->label);
+ pr_debug("resetting timer %s, timeout period %u\n",
+ info->label, info->timeout);
+
+ if (info->timer->timer_type & XT_IDLETIMER_ALARM) {
+ ktime_t tout = ktime_set(info->timeout, 0);
+ alarm_start_relative(&info->timer->alarm, tout);
+ } else {
+ mod_timer(&info->timer->timer,
+ msecs_to_jiffies(info->timeout * 1000) + jiffies);
+ }
+ return XT_CONTINUE;
+}
+
+static int idletimer_tg_helper(struct idletimer_tg_info *info)
+{
if (info->timeout == 0) {
pr_debug("timeout value is zero\n");
return -EINVAL;
@@ -198,7 +299,23 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
pr_debug("label is empty or not nul-terminated\n");
return -EINVAL;
}
+ return 0;
+}
+
+static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
+{
+ struct idletimer_tg_info *info = par->targinfo;
+ int ret;
+
+ pr_debug("checkentry targinfo%s\n", info->label);
+
+ ret = idletimer_tg_helper(info);
+ if(ret < 0)
+ {
+ pr_debug("checkentry helper return invalid\n");
+ return -EINVAL;
+ }
mutex_lock(&list_mutex);
info->timer = __idletimer_tg_find_by_label(info->label);
@@ -222,6 +339,65 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
return 0;
}
+static int idletimer_tg_checkentry_v1(const struct xt_tgchk_param *par)
+{
+ struct idletimer_tg_info_v1 *info = par->targinfo;
+ int ret;
+
+ pr_debug("checkentry targinfo%s\n", info->label);
+
+ ret = idletimer_tg_helper((struct idletimer_tg_info *)info);
+ if(ret < 0)
+ {
+ pr_debug("checkentry helper return invalid\n");
+ return -EINVAL;
+ }
+
+ if (info->timer_type > XT_IDLETIMER_ALARM) {
+ pr_debug("invalid value for timer type\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&list_mutex);
+
+ info->timer = __idletimer_tg_find_by_label(info->label);
+ if (info->timer) {
+ if (info->timer->timer_type != info->timer_type) {
+ pr_debug("Adding/Replacing rule with same label and different timer type is not allowed\n");
+ mutex_unlock(&list_mutex);
+ return -EINVAL;
+ }
+
+ info->timer->refcnt++;
+ if (info->timer_type & XT_IDLETIMER_ALARM) {
+ /* calculate remaining expiry time */
+ ktime_t tout = alarm_expires_remaining(&info->timer->alarm);
+ struct timespec64 ktimespec = ktime_to_timespec64(tout);
+
+ if (ktimespec.tv_sec > 0) {
+ pr_debug("time_expiry_remaining %lld\n",
+ ktimespec.tv_sec);
+ alarm_start_relative(&info->timer->alarm, tout);
+ }
+ } else {
+ mod_timer(&info->timer->timer,
+ msecs_to_jiffies(info->timeout * 1000) + jiffies);
+ }
+ pr_debug("increased refcnt of timer %s to %u\n",
+ info->label, info->timer->refcnt);
+ } else {
+ ret = idletimer_tg_create_v1(info);
+ if (ret < 0) {
+ pr_debug("failed to create timer\n");
+ mutex_unlock(&list_mutex);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&list_mutex);
+ return 0;
+}
+
static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
{
const struct idletimer_tg_info *info = par->targinfo;
@@ -247,7 +423,38 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
mutex_unlock(&list_mutex);
}
-static struct xt_target idletimer_tg __read_mostly = {
+static void idletimer_tg_destroy_v1(const struct xt_tgdtor_param *par)
+{
+ const struct idletimer_tg_info_v1 *info = par->targinfo;
+
+ pr_debug("destroy targinfo %s\n", info->label);
+
+ mutex_lock(&list_mutex);
+
+ if (--info->timer->refcnt == 0) {
+ pr_debug("deleting timer %s\n", info->label);
+
+ list_del(&info->timer->entry);
+ if (info->timer->timer_type & XT_IDLETIMER_ALARM) {
+ alarm_cancel(&info->timer->alarm);
+ } else {
+ del_timer_sync(&info->timer->timer);
+ }
+ cancel_work_sync(&info->timer->work);
+ sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
+ kfree(info->timer->attr.attr.name);
+ kfree(info->timer);
+ } else {
+ pr_debug("decreased refcnt of timer %s to %u\n",
+ info->label, info->timer->refcnt);
+ }
+
+ mutex_unlock(&list_mutex);
+}
+
+
+static struct xt_target idletimer_tg[] __read_mostly = {
+ {
.name = "IDLETIMER",
.family = NFPROTO_UNSPEC,
.target = idletimer_tg_target,
@@ -256,6 +463,20 @@ static struct xt_target idletimer_tg __read_mostly = {
.checkentry = idletimer_tg_checkentry,
.destroy = idletimer_tg_destroy,
.me = THIS_MODULE,
+ },
+ {
+ .name = "IDLETIMER",
+ .family = NFPROTO_UNSPEC,
+ .revision = 1,
+ .target = idletimer_tg_target_v1,
+ .targetsize = sizeof(struct idletimer_tg_info_v1),
+ .usersize = offsetof(struct idletimer_tg_info_v1, timer),
+ .checkentry = idletimer_tg_checkentry_v1,
+ .destroy = idletimer_tg_destroy_v1,
+ .me = THIS_MODULE,
+ },
+
+
};
static struct class *idletimer_tg_class;
@@ -283,7 +504,8 @@ static int __init idletimer_tg_init(void)
idletimer_tg_kobj = &idletimer_tg_device->kobj;
- err = xt_register_target(&idletimer_tg);
+ err = xt_register_targets(idletimer_tg, ARRAY_SIZE(idletimer_tg));
+
if (err < 0) {
pr_debug("couldn't register xt target\n");
goto out_dev;
@@ -300,7 +522,7 @@ out:
static void __exit idletimer_tg_exit(void)
{
- xt_unregister_target(&idletimer_tg);
+ xt_unregister_targets(idletimer_tg, ARRAY_SIZE(idletimer_tg));
device_destroy(idletimer_tg_class, MKDEV(0, 0));
class_destroy(idletimer_tg_class);
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index 2317721f3ecb..75625d13e976 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -21,8 +21,6 @@ MODULE_DESCRIPTION("Xtables: packet security mark modification");
MODULE_ALIAS("ipt_SECMARK");
MODULE_ALIAS("ip6t_SECMARK");
-#define PFX "SECMARK: "
-
static u8 mode;
static unsigned int
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 8c835ad63729..9c5cfd74a0ee 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -132,7 +132,7 @@ struct xt_hashlimit_htable {
const char *name;
struct net *net;
- struct hlist_head hash[0]; /* hashtable itself */
+ struct hlist_head hash[]; /* hashtable itself */
};
static int
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 225a7ab6d79a..19bef176145e 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -71,7 +71,7 @@ struct recent_entry {
u_int8_t ttl;
u_int8_t index;
u_int16_t nstamps;
- unsigned long stamps[0];
+ unsigned long stamps[];
};
struct recent_table {
@@ -82,7 +82,7 @@ struct recent_table {
unsigned int entries;
u8 nstamps_max_mask;
struct list_head lru_list;
- struct list_head iphash[0];
+ struct list_head iphash[];
};
struct recent_net {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 2f234791b879..5ded01ca8b20 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -71,7 +71,7 @@
struct listeners {
struct rcu_head rcu;
- unsigned long masks[0];
+ unsigned long masks[];
};
/* state bits */
@@ -2574,6 +2574,7 @@ static void *__netlink_seq_next(struct seq_file *seq)
}
static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
+ __acquires(RCU)
{
struct nl_seq_iter *iter = seq->private;
void *obj = SEQ_START_TOKEN;
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 58d5373c513c..7b1a74f74aad 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1230,6 +1230,7 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_PROC_FS
static void *nr_info_start(struct seq_file *seq, loff_t *pos)
+ __acquires(&nr_list_lock)
{
spin_lock_bh(&nr_list_lock);
return seq_hlist_start_head(&nr_list, *pos);
@@ -1241,6 +1242,7 @@ static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void nr_info_stop(struct seq_file *seq, void *v)
+ __releases(&nr_list_lock)
{
spin_unlock_bh(&nr_list_lock);
}
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index d41335bad1f8..79f12d8c7b86 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -838,6 +838,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
#ifdef CONFIG_PROC_FS
static void *nr_node_start(struct seq_file *seq, loff_t *pos)
+ __acquires(&nr_node_list_lock)
{
spin_lock_bh(&nr_node_list_lock);
return seq_hlist_start_head(&nr_node_list, *pos);
@@ -849,6 +850,7 @@ static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void nr_node_stop(struct seq_file *seq, void *v)
+ __releases(&nr_node_list_lock)
{
spin_unlock_bh(&nr_node_list_lock);
}
@@ -893,6 +895,7 @@ const struct seq_operations nr_node_seqops = {
};
static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
+ __acquires(&nr_neigh_list_lock)
{
spin_lock_bh(&nr_neigh_list_lock);
return seq_hlist_start_head(&nr_neigh_list, *pos);
@@ -904,6 +907,7 @@ static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void nr_neigh_stop(struct seq_file *seq, void *v)
+ __releases(&nr_neigh_list_lock)
{
spin_unlock_bh(&nr_neigh_list_lock);
}
diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
index 65aaa9d7c813..304b1a9bb18a 100644
--- a/net/nfc/digital_dep.c
+++ b/net/nfc/digital_dep.c
@@ -71,7 +71,7 @@ struct digital_atr_req {
u8 bs;
u8 br;
u8 pp;
- u8 gb[0];
+ u8 gb[];
} __packed;
struct digital_atr_res {
@@ -83,7 +83,7 @@ struct digital_atr_res {
u8 br;
u8 to;
u8 pp;
- u8 gb[0];
+ u8 gb[];
} __packed;
struct digital_psl_req {
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 7fbfe2adfffa..fc0efd8833c8 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -964,6 +964,25 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
}
+static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
+ struct sw_flow_key *key,
+ const struct nlattr *attr, bool last)
+{
+ /* The first action is always 'OVS_DEC_TTL_ATTR_ARG'. */
+ struct nlattr *dec_ttl_arg = nla_data(attr);
+ int rem = nla_len(attr);
+
+ if (nla_len(dec_ttl_arg)) {
+ struct nlattr *actions = nla_next(dec_ttl_arg, &rem);
+
+ if (actions)
+ return clone_execute(dp, skb, key, 0, actions, rem,
+ last, false);
+ }
+ consume_skb(skb);
+ return 0;
+}
+
/* When 'last' is true, sample() should always consume the 'skb'.
* Otherwise, sample() should keep 'skb' intact regardless what
* actions are executed within sample().
@@ -1180,6 +1199,45 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
nla_len(actions), last, clone_flow_key);
}
+static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
+{
+ int err;
+
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *nh;
+
+ err = skb_ensure_writable(skb, skb_network_offset(skb) +
+ sizeof(*nh));
+ if (unlikely(err))
+ return err;
+
+ nh = ipv6_hdr(skb);
+
+ if (nh->hop_limit <= 1)
+ return -EHOSTUNREACH;
+
+ key->ip.ttl = --nh->hop_limit;
+ } else {
+ struct iphdr *nh;
+ u8 old_ttl;
+
+ err = skb_ensure_writable(skb, skb_network_offset(skb) +
+ sizeof(*nh));
+ if (unlikely(err))
+ return err;
+
+ nh = ip_hdr(skb);
+ if (nh->ttl <= 1)
+ return -EHOSTUNREACH;
+
+ old_ttl = nh->ttl--;
+ csum_replace2(&nh->check, htons(old_ttl << 8),
+ htons(nh->ttl << 8));
+ key->ip.ttl = nh->ttl;
+ }
+ return 0;
+}
+
/* Execute a list of actions against 'skb'. */
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key,
@@ -1365,6 +1423,15 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
break;
}
+
+ case OVS_ACTION_ATTR_DEC_TTL:
+ err = execute_dec_ttl(skb, key);
+ if (err == -EHOSTUNREACH) {
+ err = dec_ttl_exception_handler(dp, skb, key,
+ a, true);
+ return err;
+ }
+ break;
}
if (unlikely(err)) {
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 07a7dd185995..d8ae541d22a8 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -305,7 +305,7 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
struct sk_buff *segs, *nskb;
int err;
- BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
+ BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_GSO_CB_OFFSET);
segs = __skb_gso_segment(skb, NETIF_F_SG, false);
if (IS_ERR(segs))
return PTR_ERR(segs);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 288122eec7c8..79252d4887ff 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -80,6 +80,7 @@ static bool actions_may_change_flow(const struct nlattr *actions)
case OVS_ACTION_ATTR_METER:
case OVS_ACTION_ATTR_CHECK_PKT_LEN:
case OVS_ACTION_ATTR_ADD_MPLS:
+ case OVS_ACTION_ATTR_DEC_TTL:
default:
return true;
}
@@ -2495,6 +2496,39 @@ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
return 0;
}
+static int validate_and_copy_dec_ttl(struct net *net,
+ const struct nlattr *attr,
+ const struct sw_flow_key *key,
+ struct sw_flow_actions **sfa,
+ __be16 eth_type, __be16 vlan_tci,
+ u32 mpls_label_count, bool log)
+{
+ int start, err;
+ u32 nested = true;
+
+ if (!nla_len(attr))
+ return ovs_nla_add_action(sfa, OVS_ACTION_ATTR_DEC_TTL,
+ NULL, 0, log);
+
+ start = add_nested_action_start(sfa, OVS_ACTION_ATTR_DEC_TTL, log);
+ if (start < 0)
+ return start;
+
+ err = ovs_nla_add_action(sfa, OVS_DEC_TTL_ATTR_ACTION, &nested,
+ sizeof(nested), log);
+
+ if (err)
+ return err;
+
+ err = __ovs_nla_copy_actions(net, attr, key, sfa, eth_type,
+ vlan_tci, mpls_label_count, log);
+ if (err)
+ return err;
+
+ add_nested_action_end(*sfa, start);
+ return 0;
+}
+
static int validate_and_copy_clone(struct net *net,
const struct nlattr *attr,
const struct sw_flow_key *key,
@@ -3009,6 +3043,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
[OVS_ACTION_ATTR_CLONE] = (u32)-1,
[OVS_ACTION_ATTR_CHECK_PKT_LEN] = (u32)-1,
[OVS_ACTION_ATTR_ADD_MPLS] = sizeof(struct ovs_action_add_mpls),
+ [OVS_ACTION_ATTR_DEC_TTL] = (u32)-1,
};
const struct ovs_action_push_vlan *vlan;
int type = nla_type(a);
@@ -3269,6 +3304,15 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
break;
}
+ case OVS_ACTION_ATTR_DEC_TTL:
+ err = validate_and_copy_dec_ttl(net, a, key, sfa,
+ eth_type, vlan_tci,
+ mpls_label_count, log);
+ if (err)
+ return err;
+ skip_copy = true;
+ break;
+
default:
OVS_NLERR(log, "Unknown Action type %d", type);
return -EINVAL;
@@ -3440,6 +3484,26 @@ out:
return err;
}
+static int dec_ttl_action_to_attr(const struct nlattr *attr,
+ struct sk_buff *skb)
+{
+ int err = 0, rem = nla_len(attr);
+ struct nlattr *start;
+
+ start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_DEC_TTL);
+
+ if (!start)
+ return -EMSGSIZE;
+
+ err = ovs_nla_put_actions(nla_data(attr), rem, skb);
+ if (err)
+ nla_nest_cancel(skb, start);
+ else
+ nla_nest_end(skb, start);
+
+ return err;
+}
+
static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
{
const struct nlattr *ovs_key = nla_data(a);
@@ -3540,6 +3604,12 @@ int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
return err;
break;
+ case OVS_ACTION_ATTR_DEC_TTL:
+ err = dec_ttl_action_to_attr(a, skb);
+ if (err)
+ return err;
+ break;
+
default:
if (nla_put(skb, type, nla_len(a), nla_data(a)))
return -EMSGSIZE;
diff --git a/net/qrtr/Makefile b/net/qrtr/Makefile
index 1c6d6c120fb7..32d4e923925d 100644
--- a/net/qrtr/Makefile
+++ b/net/qrtr/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_QRTR) := qrtr.o
+obj-$(CONFIG_QRTR) := qrtr.o ns.o
obj-$(CONFIG_QRTR_SMD) += qrtr-smd.o
qrtr-smd-y := smd.o
diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
new file mode 100644
index 000000000000..e7d0fe3f4330
--- /dev/null
+++ b/net/qrtr/ns.c
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, Linaro Ltd.
+ */
+
+#include <linux/module.h>
+#include <linux/qrtr.h>
+#include <linux/workqueue.h>
+#include <net/sock.h>
+
+#include "qrtr.h"
+
+static RADIX_TREE(nodes, GFP_KERNEL);
+
+static struct {
+ struct socket *sock;
+ struct sockaddr_qrtr bcast_sq;
+ struct list_head lookups;
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+ int local_node;
+} qrtr_ns;
+
+static const char * const qrtr_ctrl_pkt_strings[] = {
+ [QRTR_TYPE_HELLO] = "hello",
+ [QRTR_TYPE_BYE] = "bye",
+ [QRTR_TYPE_NEW_SERVER] = "new-server",
+ [QRTR_TYPE_DEL_SERVER] = "del-server",
+ [QRTR_TYPE_DEL_CLIENT] = "del-client",
+ [QRTR_TYPE_RESUME_TX] = "resume-tx",
+ [QRTR_TYPE_EXIT] = "exit",
+ [QRTR_TYPE_PING] = "ping",
+ [QRTR_TYPE_NEW_LOOKUP] = "new-lookup",
+ [QRTR_TYPE_DEL_LOOKUP] = "del-lookup",
+};
+
+struct qrtr_server_filter {
+ unsigned int service;
+ unsigned int instance;
+ unsigned int ifilter;
+};
+
+struct qrtr_lookup {
+ unsigned int service;
+ unsigned int instance;
+
+ struct sockaddr_qrtr sq;
+ struct list_head li;
+};
+
+struct qrtr_server {
+ unsigned int service;
+ unsigned int instance;
+
+ unsigned int node;
+ unsigned int port;
+
+ struct list_head qli;
+};
+
+struct qrtr_node {
+ unsigned int id;
+ struct radix_tree_root servers;
+};
+
+static struct qrtr_node *node_get(unsigned int node_id)
+{
+ struct qrtr_node *node;
+
+ node = radix_tree_lookup(&nodes, node_id);
+ if (node)
+ return node;
+
+ /* If node didn't exist, allocate and insert it to the tree */
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return NULL;
+
+ node->id = node_id;
+
+ radix_tree_insert(&nodes, node_id, node);
+
+ return node;
+}
+
+static int server_match(const struct qrtr_server *srv,
+ const struct qrtr_server_filter *f)
+{
+ unsigned int ifilter = f->ifilter;
+
+ if (f->service != 0 && srv->service != f->service)
+ return 0;
+ if (!ifilter && f->instance)
+ ifilter = ~0;
+
+ return (srv->instance & ifilter) == f->instance;
+}
+
+static int service_announce_new(struct sockaddr_qrtr *dest,
+ struct qrtr_server *srv)
+{
+ struct qrtr_ctrl_pkt pkt;
+ struct msghdr msg = { };
+ struct kvec iv;
+
+ trace_printk("advertising new server [%d:%x]@[%d:%d]\n",
+ srv->service, srv->instance, srv->node, srv->port);
+
+ iv.iov_base = &pkt;
+ iv.iov_len = sizeof(pkt);
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_SERVER);
+ pkt.server.service = cpu_to_le32(srv->service);
+ pkt.server.instance = cpu_to_le32(srv->instance);
+ pkt.server.node = cpu_to_le32(srv->node);
+ pkt.server.port = cpu_to_le32(srv->port);
+
+ msg.msg_name = (struct sockaddr *)dest;
+ msg.msg_namelen = sizeof(*dest);
+
+ return kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
+}
+
+static int service_announce_del(struct sockaddr_qrtr *dest,
+ struct qrtr_server *srv)
+{
+ struct qrtr_ctrl_pkt pkt;
+ struct msghdr msg = { };
+ struct kvec iv;
+ int ret;
+
+ trace_printk("advertising removal of server [%d:%x]@[%d:%d]\n",
+ srv->service, srv->instance, srv->node, srv->port);
+
+ iv.iov_base = &pkt;
+ iv.iov_len = sizeof(pkt);
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.cmd = cpu_to_le32(QRTR_TYPE_DEL_SERVER);
+ pkt.server.service = cpu_to_le32(srv->service);
+ pkt.server.instance = cpu_to_le32(srv->instance);
+ pkt.server.node = cpu_to_le32(srv->node);
+ pkt.server.port = cpu_to_le32(srv->port);
+
+ msg.msg_name = (struct sockaddr *)dest;
+ msg.msg_namelen = sizeof(*dest);
+
+ ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
+ if (ret < 0)
+ pr_err("failed to announce del service\n");
+
+ return ret;
+}
+
+static void lookup_notify(struct sockaddr_qrtr *to, struct qrtr_server *srv,
+ bool new)
+{
+ struct qrtr_ctrl_pkt pkt;
+ struct msghdr msg = { };
+ struct kvec iv;
+ int ret;
+
+ iv.iov_base = &pkt;
+ iv.iov_len = sizeof(pkt);
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.cmd = new ? cpu_to_le32(QRTR_TYPE_NEW_SERVER) :
+ cpu_to_le32(QRTR_TYPE_DEL_SERVER);
+ if (srv) {
+ pkt.server.service = cpu_to_le32(srv->service);
+ pkt.server.instance = cpu_to_le32(srv->instance);
+ pkt.server.node = cpu_to_le32(srv->node);
+ pkt.server.port = cpu_to_le32(srv->port);
+ }
+
+ msg.msg_name = (struct sockaddr *)to;
+ msg.msg_namelen = sizeof(*to);
+
+ ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
+ if (ret < 0)
+ pr_err("failed to send lookup notification\n");
+}
+
+static int announce_servers(struct sockaddr_qrtr *sq)
+{
+ struct radix_tree_iter iter;
+ struct qrtr_server *srv;
+ struct qrtr_node *node;
+ void __rcu **slot;
+ int ret;
+
+ node = node_get(qrtr_ns.local_node);
+ if (!node)
+ return 0;
+
+ /* Announce the list of servers registered in this node */
+ radix_tree_for_each_slot(slot, &node->servers, &iter, 0) {
+ srv = radix_tree_deref_slot(slot);
+
+ ret = service_announce_new(sq, srv);
+ if (ret < 0) {
+ pr_err("failed to announce new service\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct qrtr_server *server_add(unsigned int service,
+ unsigned int instance,
+ unsigned int node_id,
+ unsigned int port)
+{
+ struct qrtr_server *srv;
+ struct qrtr_server *old;
+ struct qrtr_node *node;
+
+ if (!service || !port)
+ return NULL;
+
+ srv = kzalloc(sizeof(*srv), GFP_KERNEL);
+ if (!srv)
+ return NULL;
+
+ srv->service = service;
+ srv->instance = instance;
+ srv->node = node_id;
+ srv->port = port;
+
+ node = node_get(node_id);
+ if (!node)
+ goto err;
+
+ /* Delete the old server on the same port */
+ old = radix_tree_lookup(&node->servers, port);
+ if (old) {
+ radix_tree_delete(&node->servers, port);
+ kfree(old);
+ }
+
+ radix_tree_insert(&node->servers, port, srv);
+
+ trace_printk("add server [%d:%x]@[%d:%d]\n", srv->service,
+ srv->instance, srv->node, srv->port);
+
+ return srv;
+
+err:
+ kfree(srv);
+ return NULL;
+}
+
+static int server_del(struct qrtr_node *node, unsigned int port)
+{
+ struct qrtr_lookup *lookup;
+ struct qrtr_server *srv;
+ struct list_head *li;
+
+ srv = radix_tree_lookup(&node->servers, port);
+ if (!srv)
+ return -ENOENT;
+
+ radix_tree_delete(&node->servers, port);
+
+ /* Broadcast the removal of local servers */
+ if (srv->node == qrtr_ns.local_node)
+ service_announce_del(&qrtr_ns.bcast_sq, srv);
+
+ /* Announce the service's disappearance to observers */
+ list_for_each(li, &qrtr_ns.lookups) {
+ lookup = container_of(li, struct qrtr_lookup, li);
+ if (lookup->service && lookup->service != srv->service)
+ continue;
+ if (lookup->instance && lookup->instance != srv->instance)
+ continue;
+
+ lookup_notify(&lookup->sq, srv, false);
+ }
+
+ kfree(srv);
+
+ return 0;
+}
+
+static int say_hello(struct sockaddr_qrtr *dest)
+{
+ struct qrtr_ctrl_pkt pkt;
+ struct msghdr msg = { };
+ struct kvec iv;
+ int ret;
+
+ iv.iov_base = &pkt;
+ iv.iov_len = sizeof(pkt);
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.cmd = cpu_to_le32(QRTR_TYPE_HELLO);
+
+ msg.msg_name = (struct sockaddr *)dest;
+ msg.msg_namelen = sizeof(*dest);
+
+ ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
+ if (ret < 0)
+ pr_err("failed to send hello msg\n");
+
+ return ret;
+}
+
+/* Announce the list of servers registered on the local node */
+static int ctrl_cmd_hello(struct sockaddr_qrtr *sq)
+{
+ int ret;
+
+ ret = say_hello(sq);
+ if (ret < 0)
+ return ret;
+
+ return announce_servers(sq);
+}
+
+static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
+{
+ struct qrtr_node *local_node;
+ struct radix_tree_iter iter;
+ struct qrtr_ctrl_pkt pkt;
+ struct qrtr_server *srv;
+ struct sockaddr_qrtr sq;
+ struct msghdr msg = { };
+ struct qrtr_node *node;
+ void __rcu **slot;
+ struct kvec iv;
+ int ret;
+
+ iv.iov_base = &pkt;
+ iv.iov_len = sizeof(pkt);
+
+ node = node_get(from->sq_node);
+ if (!node)
+ return 0;
+
+ /* Advertise removal of this client to all servers of remote node */
+ radix_tree_for_each_slot(slot, &node->servers, &iter, 0) {
+ srv = radix_tree_deref_slot(slot);
+ server_del(node, srv->port);
+ }
+
+ /* Advertise the removal of this client to all local servers */
+ local_node = node_get(qrtr_ns.local_node);
+ if (!local_node)
+ return 0;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.cmd = cpu_to_le32(QRTR_TYPE_BYE);
+ pkt.client.node = cpu_to_le32(from->sq_node);
+
+ radix_tree_for_each_slot(slot, &local_node->servers, &iter, 0) {
+ srv = radix_tree_deref_slot(slot);
+
+ sq.sq_family = AF_QIPCRTR;
+ sq.sq_node = srv->node;
+ sq.sq_port = srv->port;
+
+ msg.msg_name = (struct sockaddr *)&sq;
+ msg.msg_namelen = sizeof(sq);
+
+ ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
+ if (ret < 0) {
+ pr_err("failed to send bye cmd\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
+ unsigned int node_id, unsigned int port)
+{
+ struct qrtr_node *local_node;
+ struct radix_tree_iter iter;
+ struct qrtr_lookup *lookup;
+ struct qrtr_ctrl_pkt pkt;
+ struct msghdr msg = { };
+ struct qrtr_server *srv;
+ struct sockaddr_qrtr sq;
+ struct qrtr_node *node;
+ struct list_head *tmp;
+ struct list_head *li;
+ void __rcu **slot;
+ struct kvec iv;
+ int ret;
+
+ iv.iov_base = &pkt;
+ iv.iov_len = sizeof(pkt);
+
+ /* Don't accept spoofed messages */
+ if (from->sq_node != node_id)
+ return -EINVAL;
+
+ /* Local DEL_CLIENT messages comes from the port being closed */
+ if (from->sq_node == qrtr_ns.local_node && from->sq_port != port)
+ return -EINVAL;
+
+ /* Remove any lookups by this client */
+ list_for_each_safe(li, tmp, &qrtr_ns.lookups) {
+ lookup = container_of(li, struct qrtr_lookup, li);
+ if (lookup->sq.sq_node != node_id)
+ continue;
+ if (lookup->sq.sq_port != port)
+ continue;
+
+ list_del(&lookup->li);
+ kfree(lookup);
+ }
+
+ /* Remove the server belonging to this port */
+ node = node_get(node_id);
+ if (node)
+ server_del(node, port);
+
+ /* Advertise the removal of this client to all local servers */
+ local_node = node_get(qrtr_ns.local_node);
+ if (!local_node)
+ return 0;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
+ pkt.client.node = cpu_to_le32(node_id);
+ pkt.client.port = cpu_to_le32(port);
+
+ radix_tree_for_each_slot(slot, &local_node->servers, &iter, 0) {
+ srv = radix_tree_deref_slot(slot);
+
+ sq.sq_family = AF_QIPCRTR;
+ sq.sq_node = srv->node;
+ sq.sq_port = srv->port;
+
+ msg.msg_name = (struct sockaddr *)&sq;
+ msg.msg_namelen = sizeof(sq);
+
+ ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
+ if (ret < 0) {
+ pr_err("failed to send del client cmd\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ctrl_cmd_new_server(struct sockaddr_qrtr *from,
+ unsigned int service, unsigned int instance,
+ unsigned int node_id, unsigned int port)
+{
+ struct qrtr_lookup *lookup;
+ struct qrtr_server *srv;
+ struct list_head *li;
+ int ret = 0;
+
+ /* Ignore specified node and port for local servers */
+ if (from->sq_node == qrtr_ns.local_node) {
+ node_id = from->sq_node;
+ port = from->sq_port;
+ }
+
+ /* Don't accept spoofed messages */
+ if (from->sq_node != node_id)
+ return -EINVAL;
+
+ srv = server_add(service, instance, node_id, port);
+ if (!srv)
+ return -EINVAL;
+
+ if (srv->node == qrtr_ns.local_node) {
+ ret = service_announce_new(&qrtr_ns.bcast_sq, srv);
+ if (ret < 0) {
+ pr_err("failed to announce new service\n");
+ return ret;
+ }
+ }
+
+ /* Notify any potential lookups about the new server */
+ list_for_each(li, &qrtr_ns.lookups) {
+ lookup = container_of(li, struct qrtr_lookup, li);
+ if (lookup->service && lookup->service != service)
+ continue;
+ if (lookup->instance && lookup->instance != instance)
+ continue;
+
+ lookup_notify(&lookup->sq, srv, true);
+ }
+
+ return ret;
+}
+
+static int ctrl_cmd_del_server(struct sockaddr_qrtr *from,
+ unsigned int service, unsigned int instance,
+ unsigned int node_id, unsigned int port)
+{
+ struct qrtr_node *node;
+
+ /* Ignore specified node and port for local servers*/
+ if (from->sq_node == qrtr_ns.local_node) {
+ node_id = from->sq_node;
+ port = from->sq_port;
+ }
+
+ /* Don't accept spoofed messages */
+ if (from->sq_node != node_id)
+ return -EINVAL;
+
+ /* Local servers may only unregister themselves */
+ if (from->sq_node == qrtr_ns.local_node && from->sq_port != port)
+ return -EINVAL;
+
+ node = node_get(node_id);
+ if (!node)
+ return -ENOENT;
+
+ return server_del(node, port);
+}
+
+static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from,
+ unsigned int service, unsigned int instance)
+{
+ struct radix_tree_iter node_iter;
+ struct qrtr_server_filter filter;
+ struct radix_tree_iter srv_iter;
+ struct qrtr_lookup *lookup;
+ struct qrtr_node *node;
+ void __rcu **node_slot;
+ void __rcu **srv_slot;
+
+ /* Accept only local observers */
+ if (from->sq_node != qrtr_ns.local_node)
+ return -EINVAL;
+
+ lookup = kzalloc(sizeof(*lookup), GFP_KERNEL);
+ if (!lookup)
+ return -ENOMEM;
+
+ lookup->sq = *from;
+ lookup->service = service;
+ lookup->instance = instance;
+ list_add_tail(&lookup->li, &qrtr_ns.lookups);
+
+ memset(&filter, 0, sizeof(filter));
+ filter.service = service;
+ filter.instance = instance;
+
+ radix_tree_for_each_slot(node_slot, &nodes, &node_iter, 0) {
+ node = radix_tree_deref_slot(node_slot);
+
+ radix_tree_for_each_slot(srv_slot, &node->servers,
+ &srv_iter, 0) {
+ struct qrtr_server *srv;
+
+ srv = radix_tree_deref_slot(srv_slot);
+ if (!server_match(srv, &filter))
+ continue;
+
+ lookup_notify(from, srv, true);
+ }
+ }
+
+ /* Empty notification, to indicate end of listing */
+ lookup_notify(from, NULL, true);
+
+ return 0;
+}
+
+static void ctrl_cmd_del_lookup(struct sockaddr_qrtr *from,
+ unsigned int service, unsigned int instance)
+{
+ struct qrtr_lookup *lookup;
+ struct list_head *tmp;
+ struct list_head *li;
+
+ list_for_each_safe(li, tmp, &qrtr_ns.lookups) {
+ lookup = container_of(li, struct qrtr_lookup, li);
+ if (lookup->sq.sq_node != from->sq_node)
+ continue;
+ if (lookup->sq.sq_port != from->sq_port)
+ continue;
+ if (lookup->service != service)
+ continue;
+ if (lookup->instance && lookup->instance != instance)
+ continue;
+
+ list_del(&lookup->li);
+ kfree(lookup);
+ }
+}
+
+static void qrtr_ns_worker(struct work_struct *work)
+{
+ const struct qrtr_ctrl_pkt *pkt;
+ size_t recv_buf_size = 4096;
+ struct sockaddr_qrtr sq;
+ struct msghdr msg = { };
+ unsigned int cmd;
+ ssize_t msglen;
+ void *recv_buf;
+ struct kvec iv;
+ int ret;
+
+ msg.msg_name = (struct sockaddr *)&sq;
+ msg.msg_namelen = sizeof(sq);
+
+ recv_buf = kzalloc(recv_buf_size, GFP_KERNEL);
+ if (!recv_buf)
+ return;
+
+ for (;;) {
+ iv.iov_base = recv_buf;
+ iv.iov_len = recv_buf_size;
+
+ msglen = kernel_recvmsg(qrtr_ns.sock, &msg, &iv, 1,
+ iv.iov_len, MSG_DONTWAIT);
+
+ if (msglen == -EAGAIN)
+ break;
+
+ if (msglen < 0) {
+ pr_err("error receiving packet: %zd\n", msglen);
+ break;
+ }
+
+ pkt = recv_buf;
+ cmd = le32_to_cpu(pkt->cmd);
+ if (cmd < ARRAY_SIZE(qrtr_ctrl_pkt_strings) &&
+ qrtr_ctrl_pkt_strings[cmd])
+ trace_printk("%s from %d:%d\n",
+ qrtr_ctrl_pkt_strings[cmd], sq.sq_node,
+ sq.sq_port);
+
+ ret = 0;
+ switch (cmd) {
+ case QRTR_TYPE_HELLO:
+ ret = ctrl_cmd_hello(&sq);
+ break;
+ case QRTR_TYPE_BYE:
+ ret = ctrl_cmd_bye(&sq);
+ break;
+ case QRTR_TYPE_DEL_CLIENT:
+ ret = ctrl_cmd_del_client(&sq,
+ le32_to_cpu(pkt->client.node),
+ le32_to_cpu(pkt->client.port));
+ break;
+ case QRTR_TYPE_NEW_SERVER:
+ ret = ctrl_cmd_new_server(&sq,
+ le32_to_cpu(pkt->server.service),
+ le32_to_cpu(pkt->server.instance),
+ le32_to_cpu(pkt->server.node),
+ le32_to_cpu(pkt->server.port));
+ break;
+ case QRTR_TYPE_DEL_SERVER:
+ ret = ctrl_cmd_del_server(&sq,
+ le32_to_cpu(pkt->server.service),
+ le32_to_cpu(pkt->server.instance),
+ le32_to_cpu(pkt->server.node),
+ le32_to_cpu(pkt->server.port));
+ break;
+ case QRTR_TYPE_EXIT:
+ case QRTR_TYPE_PING:
+ case QRTR_TYPE_RESUME_TX:
+ break;
+ case QRTR_TYPE_NEW_LOOKUP:
+ ret = ctrl_cmd_new_lookup(&sq,
+ le32_to_cpu(pkt->server.service),
+ le32_to_cpu(pkt->server.instance));
+ break;
+ case QRTR_TYPE_DEL_LOOKUP:
+ ctrl_cmd_del_lookup(&sq,
+ le32_to_cpu(pkt->server.service),
+ le32_to_cpu(pkt->server.instance));
+ break;
+ }
+
+ if (ret < 0)
+ pr_err("failed while handling packet from %d:%d",
+ sq.sq_node, sq.sq_port);
+ }
+
+ kfree(recv_buf);
+}
+
+static void qrtr_ns_data_ready(struct sock *sk)
+{
+ queue_work(qrtr_ns.workqueue, &qrtr_ns.work);
+}
+
+void qrtr_ns_init(void)
+{
+ struct sockaddr_qrtr sq;
+ int ret;
+
+ INIT_LIST_HEAD(&qrtr_ns.lookups);
+ INIT_WORK(&qrtr_ns.work, qrtr_ns_worker);
+
+ ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM,
+ PF_QIPCRTR, &qrtr_ns.sock);
+ if (ret < 0)
+ return;
+
+ ret = kernel_getsockname(qrtr_ns.sock, (struct sockaddr *)&sq);
+ if (ret < 0) {
+ pr_err("failed to get socket name\n");
+ goto err_sock;
+ }
+
+ qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready;
+
+ sq.sq_port = QRTR_PORT_CTRL;
+ qrtr_ns.local_node = sq.sq_node;
+
+ ret = kernel_bind(qrtr_ns.sock, (struct sockaddr *)&sq, sizeof(sq));
+ if (ret < 0) {
+ pr_err("failed to bind to socket\n");
+ goto err_sock;
+ }
+
+ qrtr_ns.bcast_sq.sq_family = AF_QIPCRTR;
+ qrtr_ns.bcast_sq.sq_node = QRTR_NODE_BCAST;
+ qrtr_ns.bcast_sq.sq_port = QRTR_PORT_CTRL;
+
+ qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1);
+ if (!qrtr_ns.workqueue)
+ goto err_sock;
+
+ ret = say_hello(&qrtr_ns.bcast_sq);
+ if (ret < 0)
+ goto err_wq;
+
+ return;
+
+err_wq:
+ destroy_workqueue(qrtr_ns.workqueue);
+err_sock:
+ sock_release(qrtr_ns.sock);
+}
+EXPORT_SYMBOL_GPL(qrtr_ns_init);
+
+void qrtr_ns_remove(void)
+{
+ cancel_work_sync(&qrtr_ns.work);
+ destroy_workqueue(qrtr_ns.workqueue);
+ sock_release(qrtr_ns.sock);
+}
+EXPORT_SYMBOL_GPL(qrtr_ns_remove);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm IPC Router Nameservice");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 5a8e42ad1504..e22092e4a783 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -7,7 +7,6 @@
#include <linux/netlink.h>
#include <linux/qrtr.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
-#include <linux/numa.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
@@ -96,7 +95,7 @@ static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
return container_of(sk, struct qrtr_sock, sk);
}
-static unsigned int qrtr_local_nid = NUMA_NO_NODE;
+static unsigned int qrtr_local_nid = 1;
/* for node ids */
static RADIX_TREE(qrtr_nodes, GFP_ATOMIC);
@@ -1241,38 +1240,6 @@ static int qrtr_create(struct net *net, struct socket *sock,
return 0;
}
-static const struct nla_policy qrtr_policy[IFA_MAX + 1] = {
- [IFA_LOCAL] = { .type = NLA_U32 },
-};
-
-static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack)
-{
- struct nlattr *tb[IFA_MAX + 1];
- struct ifaddrmsg *ifm;
- int rc;
-
- if (!netlink_capable(skb, CAP_NET_ADMIN))
- return -EPERM;
-
- if (!netlink_capable(skb, CAP_SYS_ADMIN))
- return -EPERM;
-
- ASSERT_RTNL();
-
- rc = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
- qrtr_policy, extack);
- if (rc < 0)
- return rc;
-
- ifm = nlmsg_data(nlh);
- if (!tb[IFA_LOCAL])
- return -EINVAL;
-
- qrtr_local_nid = nla_get_u32(tb[IFA_LOCAL]);
- return 0;
-}
-
static const struct net_proto_family qrtr_family = {
.owner = THIS_MODULE,
.family = AF_QIPCRTR,
@@ -1293,11 +1260,7 @@ static int __init qrtr_proto_init(void)
return rc;
}
- rc = rtnl_register_module(THIS_MODULE, PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, 0);
- if (rc) {
- sock_unregister(qrtr_family.family);
- proto_unregister(&qrtr_proto);
- }
+ qrtr_ns_init();
return rc;
}
@@ -1305,7 +1268,7 @@ postcore_initcall(qrtr_proto_init);
static void __exit qrtr_proto_fini(void)
{
- rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR);
+ qrtr_ns_remove();
sock_unregister(qrtr_family.family);
proto_unregister(&qrtr_proto);
}
diff --git a/net/qrtr/qrtr.h b/net/qrtr/qrtr.h
index b81e6953c04b..dc2b67f17927 100644
--- a/net/qrtr/qrtr.h
+++ b/net/qrtr/qrtr.h
@@ -29,4 +29,8 @@ void qrtr_endpoint_unregister(struct qrtr_endpoint *ep);
int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len);
+void qrtr_ns_init(void);
+
+void qrtr_ns_remove(void);
+
#endif
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index edde0e519438..bfbefb7bff9d 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -972,7 +972,7 @@ config NET_ACT_TUNNEL_KEY
config NET_ACT_CT
tristate "connection tracking tc action"
- depends on NET_CLS_ACT && NF_CONNTRACK && NF_NAT
+ depends on NET_CLS_ACT && NF_CONNTRACK && NF_NAT && NF_FLOW_TABLE
help
Say Y here to allow sending the packets to conntrack module.
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 8c466a712cda..df4560909157 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -185,6 +185,7 @@ static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
return nla_total_size(0) /* action number nested */
+ nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
+ cookie_len /* TCA_ACT_COOKIE */
+ + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */
+ nla_total_size(0) /* TCA_ACT_STATS nested */
+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
/* TCA_STATS_BASIC */
@@ -788,13 +789,20 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
}
rcu_read_unlock();
- if (a->tcfa_flags) {
- struct nla_bitfield32 flags = { a->tcfa_flags,
- a->tcfa_flags, };
+ if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
+ nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
+ a->hw_stats, TCA_ACT_HW_STATS_ANY))
+ goto nla_put_failure;
- if (nla_put(skb, TCA_ACT_FLAGS, sizeof(flags), &flags))
- goto nla_put_failure;
- }
+ if (a->used_hw_stats_valid &&
+ nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
+ a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
+ goto nla_put_failure;
+
+ if (a->tcfa_flags &&
+ nla_put_bitfield32(skb, TCA_ACT_FLAGS,
+ a->tcfa_flags, a->tcfa_flags))
+ goto nla_put_failure;
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (nest == NULL)
@@ -854,7 +862,23 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
return c;
}
+static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr)
+{
+ struct nla_bitfield32 hw_stats_bf;
+
+ /* If the user did not pass the attr, that means he does
+ * not care about the type. Return "any" in that case
+ * which is setting on all supported types.
+ */
+ if (!hw_stats_attr)
+ return TCA_ACT_HW_STATS_ANY;
+ hw_stats_bf = nla_get_bitfield32(hw_stats_attr);
+ return hw_stats_bf.value;
+}
+
static const u32 tca_act_flags_allowed = TCA_ACT_FLAGS_NO_PERCPU_STATS;
+static const u32 tca_act_hw_stats_allowed = TCA_ACT_HW_STATS_ANY;
+
static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
[TCA_ACT_KIND] = { .type = NLA_STRING },
[TCA_ACT_INDEX] = { .type = NLA_U32 },
@@ -863,6 +887,8 @@ static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
[TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
[TCA_ACT_FLAGS] = { .type = NLA_BITFIELD32,
.validation_data = &tca_act_flags_allowed },
+ [TCA_ACT_HW_STATS] = { .type = NLA_BITFIELD32,
+ .validation_data = &tca_act_hw_stats_allowed },
};
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
@@ -872,6 +898,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
struct netlink_ext_ack *extack)
{
struct nla_bitfield32 flags = { 0, 0 };
+ u8 hw_stats = TCA_ACT_HW_STATS_ANY;
struct tc_action *a;
struct tc_action_ops *a_o;
struct tc_cookie *cookie = NULL;
@@ -903,6 +930,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
goto err_out;
}
}
+ hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
if (tb[TCA_ACT_FLAGS])
flags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
} else {
@@ -953,6 +981,9 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
if (!name && tb[TCA_ACT_COOKIE])
tcf_set_action_cookie(&a->act_cookie, cookie);
+ if (!name)
+ a->hw_stats = hw_stats;
+
/* module count goes up only when brand new policy is created
* if it exists and is only bound to in a_o->init() then
* ACT_P_CREATED is not returned (a zero is).
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 46f47e58b3be..54d5652cfe6c 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -12,6 +12,7 @@
#include <linux/bpf.h>
#include <net/netlink.h>
+#include <net/sock.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
@@ -53,6 +54,8 @@ static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
bpf_compute_data_pointers(skb);
filter_res = BPF_PROG_RUN(filter, skb);
}
+ if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK)
+ skb_orphan(skb);
rcu_read_unlock();
/* A BPF program may overwrite the default action opcode.
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 41114b463161..1a766393be62 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -15,6 +15,7 @@
#include <linux/pkt_cls.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/rhashtable.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
@@ -24,6 +25,7 @@
#include <uapi/linux/tc_act/tc_ct.h>
#include <net/tc_act/tc_ct.h>
+#include <net/netfilter/nf_flow_table.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_zones.h>
@@ -31,6 +33,523 @@
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#include <uapi/linux/netfilter/nf_nat.h>
+static struct workqueue_struct *act_ct_wq;
+static struct rhashtable zones_ht;
+static DEFINE_MUTEX(zones_mutex);
+
+struct tcf_ct_flow_table {
+ struct rhash_head node; /* In zones tables */
+
+ struct rcu_work rwork;
+ struct nf_flowtable nf_ft;
+ refcount_t ref;
+ u16 zone;
+
+ bool dying;
+};
+
+static const struct rhashtable_params zones_params = {
+ .head_offset = offsetof(struct tcf_ct_flow_table, node),
+ .key_offset = offsetof(struct tcf_ct_flow_table, zone),
+ .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
+ .automatic_shrinking = true,
+};
+
+static struct flow_action_entry *
+tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
+{
+ int i = flow_action->num_entries++;
+
+ return &flow_action->entries[i];
+}
+
+static void tcf_ct_add_mangle_action(struct flow_action *action,
+ enum flow_action_mangle_base htype,
+ u32 offset,
+ u32 mask,
+ u32 val)
+{
+ struct flow_action_entry *entry;
+
+ entry = tcf_ct_flow_table_flow_action_get_next(action);
+ entry->id = FLOW_ACTION_MANGLE;
+ entry->mangle.htype = htype;
+ entry->mangle.mask = ~mask;
+ entry->mangle.offset = offset;
+ entry->mangle.val = val;
+}
+
+/* The following nat helper functions check if the inverted reverse tuple
+ * (target) is different then the current dir tuple - meaning nat for ports
+ * and/or ip is needed, and add the relevant mangle actions.
+ */
+static void
+tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple target,
+ struct flow_action *action)
+{
+ if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
+ offsetof(struct iphdr, saddr),
+ 0xFFFFFFFF,
+ be32_to_cpu(target.src.u3.ip));
+ if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
+ offsetof(struct iphdr, daddr),
+ 0xFFFFFFFF,
+ be32_to_cpu(target.dst.u3.ip));
+}
+
+static void
+tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
+ union nf_inet_addr *addr,
+ u32 offset)
+{
+ int i;
+
+ for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
+ i * sizeof(u32) + offset,
+ 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
+}
+
+static void
+tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple target,
+ struct flow_action *action)
+{
+ if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
+ tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
+ offsetof(struct ipv6hdr,
+ saddr));
+ if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
+ tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
+ offsetof(struct ipv6hdr,
+ daddr));
+}
+
+static void
+tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple target,
+ struct flow_action *action)
+{
+ __be16 target_src = target.src.u.tcp.port;
+ __be16 target_dst = target.dst.u.tcp.port;
+
+ if (target_src != tuple->src.u.tcp.port)
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
+ offsetof(struct tcphdr, source),
+ 0xFFFF, be16_to_cpu(target_src));
+ if (target_dst != tuple->dst.u.tcp.port)
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
+ offsetof(struct tcphdr, dest),
+ 0xFFFF, be16_to_cpu(target_dst));
+}
+
+static void
+tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple target,
+ struct flow_action *action)
+{
+ __be16 target_src = target.src.u.udp.port;
+ __be16 target_dst = target.dst.u.udp.port;
+
+ if (target_src != tuple->src.u.udp.port)
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
+ offsetof(struct udphdr, source),
+ 0xFFFF, be16_to_cpu(target_src));
+ if (target_dst != tuple->dst.u.udp.port)
+ tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
+ offsetof(struct udphdr, dest),
+ 0xFFFF, be16_to_cpu(target_dst));
+}
+
+static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
+ enum ip_conntrack_dir dir,
+ struct flow_action *action)
+{
+ struct nf_conn_labels *ct_labels;
+ struct flow_action_entry *entry;
+ enum ip_conntrack_info ctinfo;
+ u32 *act_ct_labels;
+
+ entry = tcf_ct_flow_table_flow_action_get_next(action);
+ entry->id = FLOW_ACTION_CT_METADATA;
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
+ entry->ct_metadata.mark = ct->mark;
+#endif
+ ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
+ IP_CT_ESTABLISHED_REPLY;
+ /* aligns with the CT reference on the SKB nf_ct_set */
+ entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
+
+ act_ct_labels = entry->ct_metadata.labels;
+ ct_labels = nf_ct_labels_find(ct);
+ if (ct_labels)
+ memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
+ else
+ memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
+}
+
+static int tcf_ct_flow_table_add_action_nat(struct net *net,
+ struct nf_conn *ct,
+ enum ip_conntrack_dir dir,
+ struct flow_action *action)
+{
+ const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
+ struct nf_conntrack_tuple target;
+
+ nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
+
+ switch (tuple->src.l3num) {
+ case NFPROTO_IPV4:
+ tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
+ action);
+ break;
+ case NFPROTO_IPV6:
+ tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
+ action);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ switch (nf_ct_protonum(ct)) {
+ case IPPROTO_TCP:
+ tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
+ break;
+ case IPPROTO_UDP:
+ tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int tcf_ct_flow_table_fill_actions(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir tdir,
+ struct nf_flow_rule *flow_rule)
+{
+ struct flow_action *action = &flow_rule->rule->action;
+ int num_entries = action->num_entries;
+ struct nf_conn *ct = flow->ct;
+ enum ip_conntrack_dir dir;
+ int i, err;
+
+ switch (tdir) {
+ case FLOW_OFFLOAD_DIR_ORIGINAL:
+ dir = IP_CT_DIR_ORIGINAL;
+ break;
+ case FLOW_OFFLOAD_DIR_REPLY:
+ dir = IP_CT_DIR_REPLY;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
+ if (err)
+ goto err_nat;
+
+ tcf_ct_flow_table_add_action_meta(ct, dir, action);
+ return 0;
+
+err_nat:
+ /* Clear filled actions */
+ for (i = num_entries; i < action->num_entries; i++)
+ memset(&action->entries[i], 0, sizeof(action->entries[i]));
+ action->num_entries = num_entries;
+
+ return err;
+}
+
+static struct nf_flowtable_type flowtable_ct = {
+ .action = tcf_ct_flow_table_fill_actions,
+ .owner = THIS_MODULE,
+};
+
+static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
+{
+ struct tcf_ct_flow_table *ct_ft;
+ int err = -ENOMEM;
+
+ mutex_lock(&zones_mutex);
+ ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
+ if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
+ goto out_unlock;
+
+ ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
+ if (!ct_ft)
+ goto err_alloc;
+ refcount_set(&ct_ft->ref, 1);
+
+ ct_ft->zone = params->zone;
+ err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
+ if (err)
+ goto err_insert;
+
+ ct_ft->nf_ft.type = &flowtable_ct;
+ ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD;
+ err = nf_flow_table_init(&ct_ft->nf_ft);
+ if (err)
+ goto err_init;
+
+ __module_get(THIS_MODULE);
+out_unlock:
+ params->ct_ft = ct_ft;
+ params->nf_ft = &ct_ft->nf_ft;
+ mutex_unlock(&zones_mutex);
+
+ return 0;
+
+err_init:
+ rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
+err_insert:
+ kfree(ct_ft);
+err_alloc:
+ mutex_unlock(&zones_mutex);
+ return err;
+}
+
+static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
+{
+ struct tcf_ct_flow_table *ct_ft;
+
+ ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
+ rwork);
+ nf_flow_table_free(&ct_ft->nf_ft);
+ kfree(ct_ft);
+
+ module_put(THIS_MODULE);
+}
+
+static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
+{
+ struct tcf_ct_flow_table *ct_ft = params->ct_ft;
+
+ if (refcount_dec_and_test(&params->ct_ft->ref)) {
+ rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
+ INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
+ queue_rcu_work(act_ct_wq, &ct_ft->rwork);
+ }
+}
+
+static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
+ struct nf_conn *ct,
+ bool tcp)
+{
+ struct flow_offload *entry;
+ int err;
+
+ if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
+ return;
+
+ entry = flow_offload_alloc(ct);
+ if (!entry) {
+ WARN_ON_ONCE(1);
+ goto err_alloc;
+ }
+
+ if (tcp) {
+ ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+ ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+ }
+
+ err = flow_offload_add(&ct_ft->nf_ft, entry);
+ if (err)
+ goto err_add;
+
+ return;
+
+err_add:
+ flow_offload_free(entry);
+err_alloc:
+ clear_bit(IPS_OFFLOAD_BIT, &ct->status);
+}
+
+static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ bool tcp = false;
+
+ if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
+ return;
+
+ switch (nf_ct_protonum(ct)) {
+ case IPPROTO_TCP:
+ tcp = true;
+ if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
+ return;
+ break;
+ case IPPROTO_UDP:
+ break;
+ default:
+ return;
+ }
+
+ if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
+ ct->status & IPS_SEQ_ADJUST)
+ return;
+
+ tcf_ct_flow_table_add(ct_ft, ct, tcp);
+}
+
+static bool
+tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
+ struct flow_offload_tuple *tuple,
+ struct tcphdr **tcph)
+{
+ struct flow_ports *ports;
+ unsigned int thoff;
+ struct iphdr *iph;
+
+ if (!pskb_network_may_pull(skb, sizeof(*iph)))
+ return false;
+
+ iph = ip_hdr(skb);
+ thoff = iph->ihl * 4;
+
+ if (ip_is_fragment(iph) ||
+ unlikely(thoff != sizeof(struct iphdr)))
+ return false;
+
+ if (iph->protocol != IPPROTO_TCP &&
+ iph->protocol != IPPROTO_UDP)
+ return false;
+
+ if (iph->ttl <= 1)
+ return false;
+
+ if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
+ thoff + sizeof(struct tcphdr) :
+ thoff + sizeof(*ports)))
+ return false;
+
+ iph = ip_hdr(skb);
+ if (iph->protocol == IPPROTO_TCP)
+ *tcph = (void *)(skb_network_header(skb) + thoff);
+
+ ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
+ tuple->src_v4.s_addr = iph->saddr;
+ tuple->dst_v4.s_addr = iph->daddr;
+ tuple->src_port = ports->source;
+ tuple->dst_port = ports->dest;
+ tuple->l3proto = AF_INET;
+ tuple->l4proto = iph->protocol;
+
+ return true;
+}
+
+static bool
+tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
+ struct flow_offload_tuple *tuple,
+ struct tcphdr **tcph)
+{
+ struct flow_ports *ports;
+ struct ipv6hdr *ip6h;
+ unsigned int thoff;
+
+ if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
+ return false;
+
+ ip6h = ipv6_hdr(skb);
+
+ if (ip6h->nexthdr != IPPROTO_TCP &&
+ ip6h->nexthdr != IPPROTO_UDP)
+ return false;
+
+ if (ip6h->hop_limit <= 1)
+ return false;
+
+ thoff = sizeof(*ip6h);
+ if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
+ thoff + sizeof(struct tcphdr) :
+ thoff + sizeof(*ports)))
+ return false;
+
+ ip6h = ipv6_hdr(skb);
+ if (ip6h->nexthdr == IPPROTO_TCP)
+ *tcph = (void *)(skb_network_header(skb) + thoff);
+
+ ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
+ tuple->src_v6 = ip6h->saddr;
+ tuple->dst_v6 = ip6h->daddr;
+ tuple->src_port = ports->source;
+ tuple->dst_port = ports->dest;
+ tuple->l3proto = AF_INET6;
+ tuple->l4proto = ip6h->nexthdr;
+
+ return true;
+}
+
+static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
+ struct sk_buff *skb,
+ u8 family)
+{
+ struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
+ struct flow_offload_tuple_rhash *tuplehash;
+ struct flow_offload_tuple tuple = {};
+ enum ip_conntrack_info ctinfo;
+ struct tcphdr *tcph = NULL;
+ struct flow_offload *flow;
+ struct nf_conn *ct;
+ u8 dir;
+
+ /* Previously seen or loopback */
+ ct = nf_ct_get(skb, &ctinfo);
+ if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
+ return false;
+
+ switch (family) {
+ case NFPROTO_IPV4:
+ if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
+ return false;
+ break;
+ case NFPROTO_IPV6:
+ if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ tuplehash = flow_offload_lookup(nf_ft, &tuple);
+ if (!tuplehash)
+ return false;
+
+ dir = tuplehash->tuple.dir;
+ flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
+ ct = flow->ct;
+
+ if (tcph && (unlikely(tcph->fin || tcph->rst))) {
+ flow_offload_teardown(flow);
+ return false;
+ }
+
+ ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
+ IP_CT_ESTABLISHED_REPLY;
+
+ flow_offload_refresh(nf_ft, flow);
+ nf_conntrack_get(&ct->ct_general);
+ nf_ct_set(skb, ct, ctinfo);
+
+ return true;
+}
+
+static int tcf_ct_flow_tables_init(void)
+{
+ return rhashtable_init(&zones_ht, &zones_params);
+}
+
+static void tcf_ct_flow_tables_uninit(void)
+{
+ rhashtable_destroy(&zones_ht);
+}
+
static struct tc_action_ops act_ct_ops;
static unsigned int ct_net_id;
@@ -207,6 +726,8 @@ static void tcf_ct_params_free(struct rcu_head *head)
struct tcf_ct_params *params = container_of(head,
struct tcf_ct_params, rcu);
+ tcf_ct_flow_table_put(params);
+
if (params->tmpl)
nf_conntrack_put(&params->tmpl->ct_general);
kfree(params);
@@ -387,6 +908,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
struct nf_hook_state state;
int nh_ofs, err, retval;
struct tcf_ct_params *p;
+ bool skip_add = false;
struct nf_conn *ct;
u8 family;
@@ -436,6 +958,11 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
*/
cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
if (!cached) {
+ if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
+ skip_add = true;
+ goto do_nat;
+ }
+
/* Associate skb with specified zone. */
if (tmpl) {
ct = nf_ct_get(skb, &ctinfo);
@@ -453,6 +980,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
goto out_push;
}
+do_nat:
ct = nf_ct_get(skb, &ctinfo);
if (!ct)
goto out_push;
@@ -470,6 +998,8 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
* even if the connection is already confirmed.
*/
nf_conntrack_confirm(skb);
+ } else if (!skip_add) {
+ tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
}
out_push:
@@ -730,6 +1260,10 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
if (err)
goto cleanup;
+ err = tcf_ct_flow_table_get(params);
+ if (err)
+ goto cleanup;
+
spin_lock_bh(&c->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
params = rcu_replace_pointer(c->params, params,
@@ -974,13 +1508,46 @@ static struct pernet_operations ct_net_ops = {
static int __init ct_init_module(void)
{
- return tcf_register_action(&act_ct_ops, &ct_net_ops);
+ int err;
+
+ act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
+ if (!act_ct_wq)
+ return -ENOMEM;
+
+ err = tcf_ct_flow_tables_init();
+ if (err)
+ goto err_tbl_init;
+
+ err = tcf_register_action(&act_ct_ops, &ct_net_ops);
+ if (err)
+ goto err_register;
+
+ return 0;
+
+err_tbl_init:
+ destroy_workqueue(act_ct_wq);
+err_register:
+ tcf_ct_flow_tables_uninit();
+ return err;
}
static void __exit ct_cleanup_module(void)
{
tcf_unregister_action(&act_ct_ops, &ct_net_ops);
+ tcf_ct_flow_tables_uninit();
+ destroy_workqueue(act_ct_wq);
+}
+
+void tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie)
+{
+ enum ip_conntrack_info ctinfo = cookie & NFCT_INFOMASK;
+ struct nf_conn *ct;
+
+ ct = (struct nf_conn *)(cookie & NFCT_PTRMASK);
+ nf_conntrack_get(&ct->ct_general);
+ nf_ct_set(skb, ct, ctinfo);
}
+EXPORT_SYMBOL_GPL(tcf_ct_flow_table_restore_skb);
module_init(ct_init_module);
module_exit(ct_cleanup_module);
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 3ad718576304..d41d6200d9de 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -409,6 +409,16 @@ done:
return p->tcf_action;
}
+static void tcf_pedit_stats_update(struct tc_action *a, u64 bytes, u32 packets,
+ u64 lastuse, bool hw)
+{
+ struct tcf_pedit *d = to_pedit(a);
+ struct tcf_t *tm = &d->tcf_tm;
+
+ tcf_action_update_stats(a, bytes, packets, false, hw);
+ tm->lastuse = max_t(u64, tm->lastuse, lastuse);
+}
+
static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
@@ -485,6 +495,7 @@ static struct tc_action_ops act_pedit_ops = {
.id = TCA_ID_PEDIT,
.owner = THIS_MODULE,
.act = tcf_pedit_act,
+ .stats_update = tcf_pedit_stats_update,
.dump = tcf_pedit_dump,
.cleanup = tcf_pedit_cleanup,
.init = tcf_pedit_init,
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index ce948c1e24dc..5e2df590bb58 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -267,14 +267,12 @@ tcf_sample_get_group(const struct tc_action *a,
struct tcf_sample *s = to_sample(a);
struct psample_group *group;
- spin_lock_bh(&s->tcf_lock);
group = rcu_dereference_protected(s->psample_group,
lockdep_is_held(&s->tcf_lock));
if (group) {
psample_group_take(group);
*destructor = tcf_psample_group_put;
}
- spin_unlock_bh(&s->tcf_lock);
return group;
}
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index e857424c387c..b125b2be4467 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -73,6 +73,16 @@ err:
return TC_ACT_SHOT;
}
+static void tcf_skbedit_stats_update(struct tc_action *a, u64 bytes,
+ u32 packets, u64 lastuse, bool hw)
+{
+ struct tcf_skbedit *d = to_skbedit(a);
+ struct tcf_t *tm = &d->tcf_tm;
+
+ tcf_action_update_stats(a, bytes, packets, false, hw);
+ tm->lastuse = max_t(u64, tm->lastuse, lastuse);
+}
+
static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
[TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) },
[TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) },
@@ -323,6 +333,7 @@ static struct tc_action_ops act_skbedit_ops = {
.id = TCA_ID_SKBEDIT,
.owner = THIS_MODULE,
.act = tcf_skbedit_act,
+ .stats_update = tcf_skbedit_stats_update,
.dump = tcf_skbedit_dump,
.init = tcf_skbedit_init,
.cleanup = tcf_skbedit_cleanup,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index c2cdd0fc2e70..f6a3b969ead0 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -22,6 +22,7 @@
#include <linux/idr.h>
#include <linux/rhashtable.h>
#include <linux/jhash.h>
+#include <linux/rculist.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/netlink.h>
@@ -354,7 +355,7 @@ static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
if (!chain)
return NULL;
- list_add_tail(&chain->list, &block->chain_list);
+ list_add_tail_rcu(&chain->list, &block->chain_list);
mutex_init(&chain->filter_chain_lock);
chain->block = block;
chain->index = chain_index;
@@ -394,7 +395,7 @@ static bool tcf_chain_detach(struct tcf_chain *chain)
ASSERT_BLOCK_LOCKED(block);
- list_del(&chain->list);
+ list_del_rcu(&chain->list);
if (!chain->index)
block->chain0.chain = NULL;
@@ -453,6 +454,20 @@ static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
return NULL;
}
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
+ u32 chain_index)
+{
+ struct tcf_chain *chain;
+
+ list_for_each_entry_rcu(chain, &block->chain_list, list) {
+ if (chain->index == chain_index)
+ return chain;
+ }
+ return NULL;
+}
+#endif
+
static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
u32 seq, u16 flags, int event, bool unicast);
@@ -693,7 +708,7 @@ static void tc_indr_block_call(struct tcf_block *block,
};
INIT_LIST_HEAD(&bo.cb_list);
- flow_indr_block_call(dev, &bo, command);
+ flow_indr_block_call(dev, &bo, command, TC_SETUP_BLOCK);
tcf_block_setup(block, &bo);
}
@@ -1559,12 +1574,15 @@ static int tcf_block_setup(struct tcf_block *block,
* to this qdisc, (optionally) tests for protocol and asks
* specific classifiers.
*/
-int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res, bool compat_mode)
+static inline int __tcf_classify(struct sk_buff *skb,
+ const struct tcf_proto *tp,
+ const struct tcf_proto *orig_tp,
+ struct tcf_result *res,
+ bool compat_mode,
+ u32 *last_executed_chain)
{
#ifdef CONFIG_NET_CLS_ACT
const int max_reclassify_loop = 4;
- const struct tcf_proto *orig_tp = tp;
const struct tcf_proto *first_tp;
int limit = 0;
@@ -1582,21 +1600,11 @@ reclassify:
#ifdef CONFIG_NET_CLS_ACT
if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
first_tp = orig_tp;
+ *last_executed_chain = first_tp->chain->index;
goto reset;
} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
first_tp = res->goto_tp;
-
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- {
- struct tc_skb_ext *ext;
-
- ext = skb_ext_add(skb, TC_SKB_EXT);
- if (WARN_ON_ONCE(!ext))
- return TC_ACT_SHOT;
-
- ext->chain = err & TC_ACT_EXT_VAL_MASK;
- }
-#endif
+ *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
goto reset;
}
#endif
@@ -1619,8 +1627,64 @@ reset:
goto reclassify;
#endif
}
+
+int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res, bool compat_mode)
+{
+ u32 last_executed_chain = 0;
+
+ return __tcf_classify(skb, tp, tp, res, compat_mode,
+ &last_executed_chain);
+}
EXPORT_SYMBOL(tcf_classify);
+int tcf_classify_ingress(struct sk_buff *skb,
+ const struct tcf_block *ingress_block,
+ const struct tcf_proto *tp,
+ struct tcf_result *res, bool compat_mode)
+{
+#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ u32 last_executed_chain = 0;
+
+ return __tcf_classify(skb, tp, tp, res, compat_mode,
+ &last_executed_chain);
+#else
+ u32 last_executed_chain = tp ? tp->chain->index : 0;
+ const struct tcf_proto *orig_tp = tp;
+ struct tc_skb_ext *ext;
+ int ret;
+
+ ext = skb_ext_find(skb, TC_SKB_EXT);
+
+ if (ext && ext->chain) {
+ struct tcf_chain *fchain;
+
+ fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain);
+ if (!fchain)
+ return TC_ACT_SHOT;
+
+ /* Consume, so cloned/redirect skbs won't inherit ext */
+ skb_ext_del(skb, TC_SKB_EXT);
+
+ tp = rcu_dereference_bh(fchain->filter_chain);
+ }
+
+ ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
+ &last_executed_chain);
+
+ /* If we missed on some chain */
+ if (ret == TC_ACT_UNSPEC && last_executed_chain) {
+ ext = skb_ext_add(skb, TC_SKB_EXT);
+ if (WARN_ON_ONCE(!ext))
+ return TC_ACT_SHOT;
+ ext->chain = last_executed_chain;
+ }
+
+ return ret;
+#endif
+}
+EXPORT_SYMBOL(tcf_classify_ingress);
+
struct tcf_chain_info {
struct tcf_proto __rcu **pprev;
struct tcf_proto __rcu *next;
@@ -3382,14 +3446,40 @@ int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
}
EXPORT_SYMBOL(tc_setup_cb_reoffload);
+static int tcf_act_get_cookie(struct flow_action_entry *entry,
+ const struct tc_action *act)
+{
+ struct tc_cookie *cookie;
+ int err = 0;
+
+ rcu_read_lock();
+ cookie = rcu_dereference(act->act_cookie);
+ if (cookie) {
+ entry->cookie = flow_action_cookie_create(cookie->data,
+ cookie->len,
+ GFP_ATOMIC);
+ if (!entry->cookie)
+ err = -ENOMEM;
+ }
+ rcu_read_unlock();
+ return err;
+}
+
+static void tcf_act_put_cookie(struct flow_action_entry *entry)
+{
+ flow_action_cookie_destroy(entry->cookie);
+}
+
void tc_cleanup_flow_action(struct flow_action *flow_action)
{
struct flow_action_entry *entry;
int i;
- flow_action_for_each(i, entry, flow_action)
+ flow_action_for_each(i, entry, flow_action) {
+ tcf_act_put_cookie(entry);
if (entry->destructor)
entry->destructor(entry->destructor_priv);
+ }
}
EXPORT_SYMBOL(tc_cleanup_flow_action);
@@ -3433,22 +3523,30 @@ static void tcf_sample_get_group(struct flow_action_entry *entry,
}
int tc_setup_flow_action(struct flow_action *flow_action,
- const struct tcf_exts *exts, bool rtnl_held)
+ const struct tcf_exts *exts)
{
- const struct tc_action *act;
+ struct tc_action *act;
int i, j, k, err = 0;
+ BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
+ BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
+ BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
+
if (!exts)
return 0;
- if (!rtnl_held)
- rtnl_lock();
-
j = 0;
tcf_exts_for_each_action(i, act, exts) {
struct flow_action_entry *entry;
entry = &flow_action->entries[j];
+ spin_lock_bh(&act->tcfa_lock);
+ err = tcf_act_get_cookie(entry, act);
+ if (err)
+ goto err_out_locked;
+
+ entry->hw_stats = act->hw_stats;
+
if (is_tcf_gact_ok(act)) {
entry->id = FLOW_ACTION_ACCEPT;
} else if (is_tcf_gact_shot(act)) {
@@ -3489,13 +3587,13 @@ int tc_setup_flow_action(struct flow_action *flow_action,
break;
default:
err = -EOPNOTSUPP;
- goto err_out;
+ goto err_out_locked;
}
} else if (is_tcf_tunnel_set(act)) {
entry->id = FLOW_ACTION_TUNNEL_ENCAP;
err = tcf_tunnel_encap_get_tunnel(entry, act);
if (err)
- goto err_out;
+ goto err_out_locked;
} else if (is_tcf_tunnel_release(act)) {
entry->id = FLOW_ACTION_TUNNEL_DECAP;
} else if (is_tcf_pedit(act)) {
@@ -3509,12 +3607,13 @@ int tc_setup_flow_action(struct flow_action *flow_action,
break;
default:
err = -EOPNOTSUPP;
- goto err_out;
+ goto err_out_locked;
}
entry->mangle.htype = tcf_pedit_htype(act, k);
entry->mangle.mask = tcf_pedit_mask(act, k);
entry->mangle.val = tcf_pedit_val(act, k);
entry->mangle.offset = tcf_pedit_offset(act, k);
+ entry->hw_stats = act->hw_stats;
entry = &flow_action->entries[++j];
}
} else if (is_tcf_csum(act)) {
@@ -3538,6 +3637,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
entry->id = FLOW_ACTION_CT;
entry->ct.action = tcf_ct_action(act);
entry->ct.zone = tcf_ct_zone(act);
+ entry->ct.flow_table = tcf_ct_ft(act);
} else if (is_tcf_mpls(act)) {
switch (tcf_mpls_action(act)) {
case TCA_MPLS_ACT_PUSH:
@@ -3560,28 +3660,32 @@ int tc_setup_flow_action(struct flow_action *flow_action,
entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
break;
default:
- goto err_out;
+ goto err_out_locked;
}
} else if (is_tcf_skbedit_ptype(act)) {
entry->id = FLOW_ACTION_PTYPE;
entry->ptype = tcf_skbedit_ptype(act);
+ } else if (is_tcf_skbedit_priority(act)) {
+ entry->id = FLOW_ACTION_PRIORITY;
+ entry->priority = tcf_skbedit_priority(act);
} else {
err = -EOPNOTSUPP;
- goto err_out;
+ goto err_out_locked;
}
+ spin_unlock_bh(&act->tcfa_lock);
if (!is_tcf_pedit(act))
j++;
}
err_out:
- if (!rtnl_held)
- rtnl_unlock();
-
if (err)
tc_cleanup_flow_action(flow_action);
return err;
+err_out_locked:
+ spin_unlock_bh(&act->tcfa_lock);
+ goto err_out;
}
EXPORT_SYMBOL(tc_setup_flow_action);
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index d32d4233d337..74a0febcafb8 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -450,8 +450,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
cls_flower.rule->match.key = &f->mkey;
cls_flower.classid = f->res.classid;
- err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts,
- rtnl_held);
+ err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
if (err) {
kfree(cls_flower.rule);
if (skip_sw) {
@@ -493,7 +492,9 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
cls_flower.stats.pkts,
- cls_flower.stats.lastused);
+ cls_flower.stats.lastused,
+ cls_flower.stats.used_hw_stats,
+ cls_flower.stats.used_hw_stats_valid);
}
static void __fl_put(struct cls_fl_filter *f)
@@ -739,7 +740,8 @@ static void fl_set_key_val(struct nlattr **tb,
}
static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
- struct fl_flow_key *mask)
+ struct fl_flow_key *mask,
+ struct netlink_ext_ack *extack)
{
fl_set_key_val(tb, &key->tp_range.tp_min.dst,
TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
@@ -754,20 +756,30 @@ static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
- if ((mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
- htons(key->tp_range.tp_max.dst) <=
- htons(key->tp_range.tp_min.dst)) ||
- (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
- htons(key->tp_range.tp_max.src) <=
- htons(key->tp_range.tp_min.src)))
+ if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
+ htons(key->tp_range.tp_max.dst) <=
+ htons(key->tp_range.tp_min.dst)) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ tb[TCA_FLOWER_KEY_PORT_DST_MIN],
+ "Invalid destination port range (min must be strictly smaller than max)");
return -EINVAL;
+ }
+ if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
+ htons(key->tp_range.tp_max.src) <=
+ htons(key->tp_range.tp_min.src)) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
+ "Invalid source port range (min must be strictly smaller than max)");
+ return -EINVAL;
+ }
return 0;
}
static int fl_set_key_mpls(struct nlattr **tb,
struct flow_dissector_key_mpls *key_val,
- struct flow_dissector_key_mpls *key_mask)
+ struct flow_dissector_key_mpls *key_mask,
+ struct netlink_ext_ack *extack)
{
if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
@@ -776,24 +788,36 @@ static int fl_set_key_mpls(struct nlattr **tb,
if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
- if (bos & ~MPLS_BOS_MASK)
+ if (bos & ~MPLS_BOS_MASK) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ tb[TCA_FLOWER_KEY_MPLS_BOS],
+ "Bottom Of Stack (BOS) must be 0 or 1");
return -EINVAL;
+ }
key_val->mpls_bos = bos;
key_mask->mpls_bos = MPLS_BOS_MASK;
}
if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
- if (tc & ~MPLS_TC_MASK)
+ if (tc & ~MPLS_TC_MASK) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ tb[TCA_FLOWER_KEY_MPLS_TC],
+ "Traffic Class (TC) must be between 0 and 7");
return -EINVAL;
+ }
key_val->mpls_tc = tc;
key_mask->mpls_tc = MPLS_TC_MASK;
}
if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
- if (label & ~MPLS_LABEL_MASK)
+ if (label & ~MPLS_LABEL_MASK) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ tb[TCA_FLOWER_KEY_MPLS_LABEL],
+ "Label must be between 0 and 1048575");
return -EINVAL;
+ }
key_val->mpls_label = label;
key_mask->mpls_label = MPLS_LABEL_MASK;
}
@@ -834,14 +858,16 @@ static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
}
}
-static int fl_set_key_flags(struct nlattr **tb,
- u32 *flags_key, u32 *flags_mask)
+static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
+ u32 *flags_mask, struct netlink_ext_ack *extack)
{
u32 key, mask;
/* mask is mandatory for flags */
- if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
+ if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
+ NL_SET_ERR_MSG(extack, "Missing flags mask");
return -EINVAL;
+ }
key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
@@ -1365,7 +1391,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
sizeof(key->icmp.code));
} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
- ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
+ ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
if (ret)
return ret;
} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
@@ -1390,7 +1416,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
if (key->basic.ip_proto == IPPROTO_TCP ||
key->basic.ip_proto == IPPROTO_UDP ||
key->basic.ip_proto == IPPROTO_SCTP) {
- ret = fl_set_key_port_range(tb, key, mask);
+ ret = fl_set_key_port_range(tb, key, mask, extack);
if (ret)
return ret;
}
@@ -1452,7 +1478,8 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
return ret;
if (tb[TCA_FLOWER_KEY_FLAGS])
- ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
+ ret = fl_set_key_flags(tb, &key->control.flags,
+ &mask->control.flags, extack);
return ret;
}
@@ -2001,8 +2028,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
cls_flower.rule->match.mask = &f->mask->key;
cls_flower.rule->match.key = &f->mkey;
- err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts,
- true);
+ err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
if (err) {
kfree(cls_flower.rule);
if (tc_skip_sw(f->flags)) {
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 610a0b728161..8d39dbcf1746 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -97,7 +97,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
cls_mall.command = TC_CLSMATCHALL_REPLACE;
cls_mall.cookie = cookie;
- err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts, true);
+ err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
if (err) {
kfree(cls_mall.rule);
mall_destroy_hw_filter(tp, head, cookie, NULL);
@@ -302,7 +302,7 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
cls_mall.cookie = (unsigned long)head;
- err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts, true);
+ err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
if (err) {
kfree(cls_mall.rule);
if (add && tc_skip_sw(head->flags)) {
@@ -338,7 +338,9 @@ static void mall_stats_hw_filter(struct tcf_proto *tp,
tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true);
tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes,
- cls_mall.stats.pkts, cls_mall.stats.lastused);
+ cls_mall.stats.pkts, cls_mall.stats.lastused,
+ cls_mall.stats.used_hw_stats,
+ cls_mall.stats.used_hw_stats_valid);
}
static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
diff --git a/net/sched/em_ipt.c b/net/sched/em_ipt.c
index 9fff6480acc6..eecfe072c508 100644
--- a/net/sched/em_ipt.c
+++ b/net/sched/em_ipt.c
@@ -22,7 +22,7 @@ struct em_ipt_match {
const struct xt_match *match;
u32 hook;
u8 nfproto;
- u8 match_data[0] __aligned(8);
+ u8 match_data[] __aligned(8);
};
struct em_ipt_xt_match {
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index 88c7ce42df7e..2c1192a2ee5e 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -16,7 +16,7 @@
struct nbyte_data {
struct tcf_em_nbyte hdr;
- char pattern[0];
+ char pattern[];
};
static int em_nbyte_change(struct net *net, void *data, int data_len,
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 50794125bf02..0d99df1e764d 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -618,21 +618,28 @@ void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
}
EXPORT_SYMBOL(qdisc_watchdog_init);
-void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
+void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
+ u64 delta_ns)
{
if (test_bit(__QDISC_STATE_DEACTIVATED,
&qdisc_root_sleeping(wd->qdisc)->state))
return;
- if (wd->last_expires == expires)
- return;
+ if (hrtimer_is_queued(&wd->timer)) {
+ /* If timer is already set in [expires, expires + delta_ns],
+ * do not reprogram it.
+ */
+ if (wd->last_expires - expires <= delta_ns)
+ return;
+ }
wd->last_expires = expires;
- hrtimer_start(&wd->timer,
- ns_to_ktime(expires),
- HRTIMER_MODE_ABS_PINNED);
+ hrtimer_start_range_ns(&wd->timer,
+ ns_to_ktime(expires),
+ delta_ns,
+ HRTIMER_MODE_ABS_PINNED);
}
-EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
+EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns);
void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
{
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index f4f9b8cdbffb..ee12ca9f55b4 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -58,7 +58,7 @@ struct atm_flow_data {
struct atm_flow_data *excess; /* flow for excess traffic;
NULL to set CLP instead */
int hdr_len;
- unsigned char hdr[0]; /* header data; MUST BE LAST */
+ unsigned char hdr[]; /* header data; MUST BE LAST */
};
struct atm_qdisc_data {
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 37c8aa75d70c..a579a4131d22 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -12,6 +12,7 @@
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
/* 1 band FIFO pseudo-"scheduler" */
@@ -51,8 +52,49 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_CN;
}
-static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
- struct netlink_ext_ack *extack)
+static void fifo_offload_init(struct Qdisc *sch)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct tc_fifo_qopt_offload qopt;
+
+ if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+ return;
+
+ qopt.command = TC_FIFO_REPLACE;
+ qopt.handle = sch->handle;
+ qopt.parent = sch->parent;
+ dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
+}
+
+static void fifo_offload_destroy(struct Qdisc *sch)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct tc_fifo_qopt_offload qopt;
+
+ if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+ return;
+
+ qopt.command = TC_FIFO_DESTROY;
+ qopt.handle = sch->handle;
+ qopt.parent = sch->parent;
+ dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
+}
+
+static int fifo_offload_dump(struct Qdisc *sch)
+{
+ struct tc_fifo_qopt_offload qopt;
+
+ qopt.command = TC_FIFO_STATS;
+ qopt.handle = sch->handle;
+ qopt.parent = sch->parent;
+ qopt.stats.bstats = &sch->bstats;
+ qopt.stats.qstats = &sch->qstats;
+
+ return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_FIFO, &qopt);
+}
+
+static int __fifo_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
bool bypass;
bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
@@ -82,10 +124,35 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
sch->flags |= TCQ_F_CAN_BYPASS;
else
sch->flags &= ~TCQ_F_CAN_BYPASS;
+
return 0;
}
-static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
+static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = __fifo_init(sch, opt, extack);
+ if (err)
+ return err;
+
+ fifo_offload_init(sch);
+ return 0;
+}
+
+static int fifo_hd_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ return __fifo_init(sch, opt, extack);
+}
+
+static void fifo_destroy(struct Qdisc *sch)
+{
+ fifo_offload_destroy(sch);
+}
+
+static int __fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct tc_fifo_qopt opt = { .limit = sch->limit };
@@ -97,6 +164,22 @@ nla_put_failure:
return -1;
}
+static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ int err;
+
+ err = fifo_offload_dump(sch);
+ if (err)
+ return err;
+
+ return __fifo_dump(sch, skb);
+}
+
+static int fifo_hd_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ return __fifo_dump(sch, skb);
+}
+
struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
.id = "pfifo",
.priv_size = 0,
@@ -104,6 +187,7 @@ struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
.dequeue = qdisc_dequeue_head,
.peek = qdisc_peek_head,
.init = fifo_init,
+ .destroy = fifo_destroy,
.reset = qdisc_reset_queue,
.change = fifo_init,
.dump = fifo_dump,
@@ -118,6 +202,7 @@ struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
.dequeue = qdisc_dequeue_head,
.peek = qdisc_peek_head,
.init = fifo_init,
+ .destroy = fifo_destroy,
.reset = qdisc_reset_queue,
.change = fifo_init,
.dump = fifo_dump,
@@ -131,10 +216,10 @@ struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
.enqueue = pfifo_tail_enqueue,
.dequeue = qdisc_dequeue_head,
.peek = qdisc_peek_head,
- .init = fifo_init,
+ .init = fifo_hd_init,
.reset = qdisc_reset_queue,
- .change = fifo_init,
- .dump = fifo_dump,
+ .change = fifo_hd_init,
+ .dump = fifo_hd_dump,
.owner = THIS_MODULE,
};
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 371ad84def3b..4c060134c736 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -121,6 +121,8 @@ struct fq_sched_data {
u64 stat_flows_plimit;
u64 stat_pkts_too_long;
u64 stat_allocation_errors;
+
+ u32 timer_slack; /* hrtimer slack in ns */
struct qdisc_watchdog watchdog;
};
@@ -504,8 +506,9 @@ begin:
head = &q->old_flows;
if (!head->first) {
if (q->time_next_delayed_flow != ~0ULL)
- qdisc_watchdog_schedule_ns(&q->watchdog,
- q->time_next_delayed_flow);
+ qdisc_watchdog_schedule_range_ns(&q->watchdog,
+ q->time_next_delayed_flow,
+ q->timer_slack);
return NULL;
}
}
@@ -735,6 +738,8 @@ static int fq_resize(struct Qdisc *sch, u32 log)
}
static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
+ [TCA_FQ_UNSPEC] = { .strict_start_type = TCA_FQ_TIMER_SLACK },
+
[TCA_FQ_PLIMIT] = { .type = NLA_U32 },
[TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
[TCA_FQ_QUANTUM] = { .type = NLA_U32 },
@@ -747,6 +752,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
[TCA_FQ_ORPHAN_MASK] = { .type = NLA_U32 },
[TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
[TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 },
+ [TCA_FQ_TIMER_SLACK] = { .type = NLA_U32 },
};
static int fq_change(struct Qdisc *sch, struct nlattr *opt,
@@ -833,6 +839,9 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
q->ce_threshold = (u64)NSEC_PER_USEC *
nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]);
+ if (tb[TCA_FQ_TIMER_SLACK])
+ q->timer_slack = nla_get_u32(tb[TCA_FQ_TIMER_SLACK]);
+
if (!err) {
sch_tree_unlock(sch);
err = fq_resize(sch, fq_log);
@@ -884,6 +893,8 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
q->orphan_mask = 1024 - 1;
q->low_rate_threshold = 550000 / 8;
+ q->timer_slack = 10 * NSEC_PER_USEC; /* 10 usec of hrtimer slack */
+
/* Default ce_threshold of 4294 seconds */
q->ce_threshold = (u64)NSEC_PER_USEC * ~0U;
@@ -924,7 +935,8 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
q->low_rate_threshold) ||
nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
- nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
+ nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log) ||
+ nla_put_u32(skb, TCA_FQ_TIMER_SLACK, q->timer_slack))
goto nla_put_failure;
return nla_nest_end(skb, opts);
@@ -947,7 +959,8 @@ static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
st.flows_plimit = q->stat_flows_plimit;
st.pkts_too_long = q->stat_pkts_too_long;
st.allocation_errors = q->stat_allocation_errors;
- st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
+ st.time_next_delayed_flow = q->time_next_delayed_flow + q->timer_slack -
+ ktime_get_ns();
st.flows = q->flows;
st.inactive_flows = q->inactive_flows;
st.throttled_flows = q->throttled_flows;
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index 214657eb3dfd..a9da8776bf5b 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -189,7 +189,6 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
out:
q->stats.dropped++;
sel_flow->vars.accu_prob = 0;
- sel_flow->vars.accu_prob_overflows = 0;
__qdisc_drop(skb, to_free);
qdisc_qstats_drop(sch);
return NET_XMIT_CN;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 6c9595f1048a..2efd5b61acef 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1391,6 +1391,14 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
}
EXPORT_SYMBOL(mini_qdisc_pair_swap);
+void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
+ struct tcf_block *block)
+{
+ miniqp->miniq1.block = block;
+ miniqp->miniq2.block = block;
+}
+EXPORT_SYMBOL(mini_qdisc_pair_block_init);
+
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
struct mini_Qdisc __rcu **p_miniq)
{
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index bf56aa519797..84838128b9c5 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -78,6 +78,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
{
struct ingress_sched_data *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
+ int err;
net_inc_ingress_queue();
@@ -87,7 +88,13 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
q->block_info.chain_head_change = clsact_chain_head_change;
q->block_info.chain_head_change_priv = &q->miniqp;
- return tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
+ err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
+ if (err)
+ return err;
+
+ mini_qdisc_pair_block_init(&q->miniqp, q->block);
+
+ return 0;
}
static void ingress_destroy(struct Qdisc *sch)
@@ -226,6 +233,8 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
if (err)
return err;
+ mini_qdisc_pair_block_init(&q->miniqp_ingress, q->ingress_block);
+
mini_qdisc_pair_init(&q->miniqp_egress, sch, &dev->miniq_egress);
q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 42e557d48e4e..84f82771cdf5 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -66,7 +66,7 @@
struct disttable {
u32 size;
- s16 table[0];
+ s16 table[];
};
struct netem_sched_data {
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 915bcdb59a9f..c65077f0c0f3 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -31,7 +31,7 @@ struct pie_sched_data {
};
bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
- struct pie_vars *vars, u32 qlen, u32 packet_size)
+ struct pie_vars *vars, u32 backlog, u32 packet_size)
{
u64 rnd;
u64 local_prob = vars->prob;
@@ -51,7 +51,7 @@ bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
/* If we have fewer than 2 mtu-sized packets, disable pie_drop_early,
* similar to min_th in RED
*/
- if (qlen < 2 * mtu)
+ if (backlog < 2 * mtu)
return false;
/* If bytemode is turned on, use packet size to compute new
@@ -62,27 +62,19 @@ bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
else
local_prob = vars->prob;
- if (local_prob == 0) {
+ if (local_prob == 0)
vars->accu_prob = 0;
- vars->accu_prob_overflows = 0;
- }
-
- if (local_prob > MAX_PROB - vars->accu_prob)
- vars->accu_prob_overflows++;
-
- vars->accu_prob += local_prob;
+ else
+ vars->accu_prob += local_prob;
- if (vars->accu_prob_overflows == 0 &&
- vars->accu_prob < (MAX_PROB / 100) * 85)
+ if (vars->accu_prob < (MAX_PROB / 100) * 85)
return false;
- if (vars->accu_prob_overflows == 8 &&
- vars->accu_prob >= MAX_PROB / 2)
+ if (vars->accu_prob >= (MAX_PROB / 2) * 17)
return true;
prandom_bytes(&rnd, 8);
- if (rnd < local_prob) {
+ if ((rnd >> BITS_PER_BYTE) < local_prob) {
vars->accu_prob = 0;
- vars->accu_prob_overflows = 0;
return true;
}
@@ -129,7 +121,6 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
out:
q->stats.dropped++;
q->vars.accu_prob = 0;
- q->vars.accu_prob_overflows = 0;
return qdisc_drop(skb, sch, to_free);
}
@@ -215,7 +206,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
}
void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
- struct pie_vars *vars, u32 qlen)
+ struct pie_vars *vars, u32 backlog)
{
psched_time_t now = psched_get_time();
u32 dtime = 0;
@@ -231,7 +222,7 @@ void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
vars->dq_tstamp = now;
- if (qlen == 0)
+ if (backlog == 0)
vars->qdelay = 0;
if (dtime == 0)
@@ -244,7 +235,7 @@ void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
* we have enough packets to calculate the drain rate. Save
* current time as dq_tstamp and start measurement cycle.
*/
- if (qlen >= QUEUE_THRESHOLD && vars->dq_count == DQCOUNT_INVALID) {
+ if (backlog >= QUEUE_THRESHOLD && vars->dq_count == DQCOUNT_INVALID) {
vars->dq_tstamp = psched_get_time();
vars->dq_count = 0;
}
@@ -283,7 +274,7 @@ void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
* dq_count to 0 to re-enter the if block when the next
* packet is dequeued
*/
- if (qlen < QUEUE_THRESHOLD) {
+ if (backlog < QUEUE_THRESHOLD) {
vars->dq_count = DQCOUNT_INVALID;
} else {
vars->dq_count = 0;
@@ -307,7 +298,7 @@ burst_allowance_reduction:
EXPORT_SYMBOL_GPL(pie_process_dequeue);
void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
- u32 qlen)
+ u32 backlog)
{
psched_time_t qdelay = 0; /* in pschedtime */
psched_time_t qdelay_old = 0; /* in pschedtime */
@@ -322,7 +313,7 @@ void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
vars->qdelay_old = vars->qdelay;
if (vars->avg_dq_rate > 0)
- qdelay = (qlen << PIE_SCALE) / vars->avg_dq_rate;
+ qdelay = (backlog << PIE_SCALE) / vars->avg_dq_rate;
else
qdelay = 0;
} else {
@@ -330,10 +321,10 @@ void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
qdelay_old = vars->qdelay_old;
}
- /* If qdelay is zero and qlen is not, it means qlen is very small,
+ /* If qdelay is zero and backlog is not, it means backlog is very small,
* so we do not update probabilty in this round.
*/
- if (qdelay == 0 && qlen != 0)
+ if (qdelay == 0 && backlog != 0)
update_prob = false;
/* In the algorithm, alpha and beta are between 0 and 2 with typical
@@ -363,8 +354,8 @@ void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
}
/* alpha and beta should be between 0 and 32, in multiples of 1/16 */
- delta += alpha * (u64)(qdelay - params->target);
- delta += beta * (u64)(qdelay - qdelay_old);
+ delta += alpha * (qdelay - params->target);
+ delta += beta * (qdelay - qdelay_old);
oldprob = vars->prob;
@@ -409,7 +400,7 @@ void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
vars->prob -= vars->prob / 64;
vars->qdelay = qdelay;
- vars->qlen_old = qlen;
+ vars->backlog_old = backlog;
/* We restart the measurement cycle if the following conditions are met
* 1. If the delay has been low for 2 consecutive Tupdate periods
@@ -502,7 +493,7 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct pie_sched_data *q = qdisc_priv(sch);
struct tc_pie_xstats st = {
- .prob = q->vars.prob,
+ .prob = q->vars.prob << BITS_PER_BYTE,
.delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) /
NSEC_PER_USEC,
.packets_in = q->stats.packets_in,
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 1695421333e3..c7de47c942e3 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -35,7 +35,11 @@
struct red_sched_data {
u32 limit; /* HARD maximal queue length */
+
unsigned char flags;
+ /* Non-flags in tc_red_qopt.flags. */
+ unsigned char userbits;
+
struct timer_list adapt_timer;
struct Qdisc *sch;
struct red_parms parms;
@@ -44,6 +48,8 @@ struct red_sched_data {
struct Qdisc *qdisc;
};
+static const u32 red_supported_flags = TC_RED_HISTORIC_FLAGS | TC_RED_NODROP;
+
static inline int red_use_ecn(struct red_sched_data *q)
{
return q->flags & TC_RED_ECN;
@@ -54,6 +60,11 @@ static inline int red_use_harddrop(struct red_sched_data *q)
return q->flags & TC_RED_HARDDROP;
}
+static int red_use_nodrop(struct red_sched_data *q)
+{
+ return q->flags & TC_RED_NODROP;
+}
+
static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
@@ -74,23 +85,36 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
case RED_PROB_MARK:
qdisc_qstats_overlimit(sch);
- if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
+ if (!red_use_ecn(q)) {
q->stats.prob_drop++;
goto congestion_drop;
}
- q->stats.prob_mark++;
+ if (INET_ECN_set_ce(skb)) {
+ q->stats.prob_mark++;
+ } else if (!red_use_nodrop(q)) {
+ q->stats.prob_drop++;
+ goto congestion_drop;
+ }
+
+ /* Non-ECT packet in ECN nodrop mode: queue it. */
break;
case RED_HARD_MARK:
qdisc_qstats_overlimit(sch);
- if (red_use_harddrop(q) || !red_use_ecn(q) ||
- !INET_ECN_set_ce(skb)) {
+ if (red_use_harddrop(q) || !red_use_ecn(q)) {
+ q->stats.forced_drop++;
+ goto congestion_drop;
+ }
+
+ if (INET_ECN_set_ce(skb)) {
+ q->stats.forced_mark++;
+ } else if (!red_use_nodrop(q)) {
q->stats.forced_drop++;
goto congestion_drop;
}
- q->stats.forced_mark++;
+ /* Non-ECT packet in ECN nodrop mode: queue it. */
break;
}
@@ -165,6 +189,7 @@ static int red_offload(struct Qdisc *sch, bool enable)
opt.set.limit = q->limit;
opt.set.is_ecn = red_use_ecn(q);
opt.set.is_harddrop = red_use_harddrop(q);
+ opt.set.is_nodrop = red_use_nodrop(q);
opt.set.qstats = &sch->qstats;
} else {
opt.command = TC_RED_DESTROY;
@@ -183,9 +208,12 @@ static void red_destroy(struct Qdisc *sch)
}
static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
+ [TCA_RED_UNSPEC] = { .strict_start_type = TCA_RED_FLAGS },
[TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
[TCA_RED_STAB] = { .len = RED_STAB_SIZE },
[TCA_RED_MAX_P] = { .type = NLA_U32 },
+ [TCA_RED_FLAGS] = { .type = NLA_BITFIELD32,
+ .validation_data = &red_supported_flags },
};
static int red_change(struct Qdisc *sch, struct nlattr *opt,
@@ -194,7 +222,10 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
struct Qdisc *old_child = NULL, *child = NULL;
struct red_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_RED_MAX + 1];
+ struct nla_bitfield32 flags_bf;
struct tc_red_qopt *ctl;
+ unsigned char userbits;
+ unsigned char flags;
int err;
u32 max_P;
@@ -216,6 +247,12 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
return -EINVAL;
+ err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
+ tb[TCA_RED_FLAGS], red_supported_flags,
+ &flags_bf, &userbits, extack);
+ if (err)
+ return err;
+
if (ctl->limit > 0) {
child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
extack);
@@ -227,7 +264,14 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
}
sch_tree_lock(sch);
- q->flags = ctl->flags;
+
+ flags = (q->flags & ~flags_bf.selector) | flags_bf.value;
+ err = red_validate_flags(flags, extack);
+ if (err)
+ goto unlock_out;
+
+ q->flags = flags;
+ q->userbits = userbits;
q->limit = ctl->limit;
if (child) {
qdisc_tree_flush_backlog(q->qdisc);
@@ -256,6 +300,12 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
if (old_child)
qdisc_put(old_child);
return 0;
+
+unlock_out:
+ sch_tree_unlock(sch);
+ if (child)
+ qdisc_put(child);
+ return err;
}
static inline void red_adaptative_timer(struct timer_list *t)
@@ -302,7 +352,8 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
struct nlattr *opts = NULL;
struct tc_red_qopt opt = {
.limit = q->limit,
- .flags = q->flags,
+ .flags = (q->flags & TC_RED_HISTORIC_FLAGS) |
+ q->userbits,
.qth_min = q->parms.qth_min >> q->parms.Wlog,
.qth_max = q->parms.qth_max >> q->parms.Wlog,
.Wlog = q->parms.Wlog,
@@ -319,7 +370,9 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
if (opts == NULL)
goto nla_put_failure;
if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
- nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
+ nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) ||
+ nla_put_bitfield32(skb, TCA_RED_FLAGS,
+ q->flags, red_supported_flags))
goto nla_put_failure;
return nla_nest_end(skb, opts);
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index 1069d7af3672..493fc01e5d2b 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -428,11 +428,12 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
}
-static int sctp_diag_dump_one(struct sk_buff *in_skb,
- const struct nlmsghdr *nlh,
+static int sctp_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
+ struct sk_buff *in_skb = cb->skb;
struct net *net = sock_net(in_skb->sk);
+ const struct nlmsghdr *nlh = cb->nlh;
union sctp_addr laddr, paddr;
struct sctp_comm_param commp = {
.skb = in_skb,
@@ -466,7 +467,7 @@ static int sctp_diag_dump_one(struct sk_buff *in_skb,
}
static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r, struct nlattr *bc)
+ const struct inet_diag_req_v2 *r)
{
u32 idiag_states = r->idiag_states;
struct net *net = sock_net(skb->sk);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index efaaefc3bb1c..55d4fc6f371d 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -548,6 +548,7 @@ out:
/* Common cleanup code for icmp/icmpv6 error handler. */
void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
+ __releases(&((__sk)->sk_lock.slock))
{
bh_unlock_sock(sk);
sctp_transport_put(t);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index bc734cfaa29e..c87af430107a 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -228,7 +228,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
{
struct sctp_association *asoc = t->asoc;
struct dst_entry *dst = NULL;
- struct flowi6 *fl6 = &fl->u.ip6;
+ struct flowi _fl;
+ struct flowi6 *fl6 = &_fl.u.ip6;
struct sctp_bind_addr *bp;
struct ipv6_pinfo *np = inet6_sk(sk);
struct sctp_sockaddr_entry *laddr;
@@ -238,7 +239,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
enum sctp_scope scope;
__u8 matchlen = 0;
- memset(fl6, 0, sizeof(struct flowi6));
+ memset(&_fl, 0, sizeof(_fl));
fl6->daddr = daddr->v6.sin6_addr;
fl6->fl6_dport = daddr->v6.sin6_port;
fl6->flowi6_proto = IPPROTO_SCTP;
@@ -276,8 +277,11 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
rcu_read_unlock();
dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
- if (!asoc || saddr)
+ if (!asoc || saddr) {
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
goto out;
+ }
bp = &asoc->base.bind_addr;
scope = sctp_scope(daddr);
@@ -300,6 +304,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if ((laddr->a.sa.sa_family == AF_INET6) &&
(sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) {
rcu_read_unlock();
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
goto out;
}
}
@@ -338,6 +344,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if (!IS_ERR_OR_NULL(dst))
dst_release(dst);
dst = bdst;
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
break;
}
@@ -351,6 +359,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
dst_release(dst);
dst = bdst;
matchlen = bmatchlen;
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
}
rcu_read_unlock();
@@ -359,14 +369,12 @@ out:
struct rt6_info *rt;
rt = (struct rt6_info *)dst;
- t->dst = dst;
t->dst_cookie = rt6_get_cookie(rt);
pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n",
&rt->rt6i_dst.addr, rt->rt6i_dst.plen,
- &fl6->saddr);
+ &fl->u.ip6.saddr);
} else {
t->dst = NULL;
-
pr_debug("no route\n");
}
}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 78af2fcf90cc..092d1afdee0d 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -409,7 +409,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
{
struct sctp_association *asoc = t->asoc;
struct rtable *rt;
- struct flowi4 *fl4 = &fl->u.ip4;
+ struct flowi _fl;
+ struct flowi4 *fl4 = &_fl.u.ip4;
struct sctp_bind_addr *bp;
struct sctp_sockaddr_entry *laddr;
struct dst_entry *dst = NULL;
@@ -419,7 +420,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if (t->dscp & SCTP_DSCP_SET_MASK)
tos = t->dscp & SCTP_DSCP_VAL_MASK;
- memset(fl4, 0x0, sizeof(struct flowi4));
+ memset(&_fl, 0x0, sizeof(_fl));
fl4->daddr = daddr->v4.sin_addr.s_addr;
fl4->fl4_dport = daddr->v4.sin_port;
fl4->flowi4_proto = IPPROTO_SCTP;
@@ -438,8 +439,11 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
&fl4->saddr);
rt = ip_route_output_key(sock_net(sk), fl4);
- if (!IS_ERR(rt))
+ if (!IS_ERR(rt)) {
dst = &rt->dst;
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
+ }
/* If there is no association or if a source address is passed, no
* more validation is required.
@@ -502,27 +506,33 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
false);
if (!odev || odev->ifindex != fl4->flowi4_oif) {
- if (!dst)
+ if (!dst) {
dst = &rt->dst;
- else
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
+ } else {
dst_release(&rt->dst);
+ }
continue;
}
dst_release(dst);
dst = &rt->dst;
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
break;
}
out_unlock:
rcu_read_unlock();
out:
- t->dst = dst;
- if (dst)
+ if (dst) {
pr_debug("rt_dst:%pI4, rt_src:%pI4\n",
- &fl4->daddr, &fl4->saddr);
- else
+ &fl->u.ip4.daddr, &fl->u.ip4.saddr);
+ } else {
+ t->dst = NULL;
pr_debug("no route\n");
+ }
}
/* For v4, the source address is cached in the route entry(dst). So no need
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 1b56fc440606..827a9903ee28 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -147,29 +147,44 @@ static void sctp_clear_owner_w(struct sctp_chunk *chunk)
skb_orphan(chunk->skb);
}
+#define traverse_and_process() \
+do { \
+ msg = chunk->msg; \
+ if (msg == prev_msg) \
+ continue; \
+ list_for_each_entry(c, &msg->chunks, frag_list) { \
+ if ((clear && asoc->base.sk == c->skb->sk) || \
+ (!clear && asoc->base.sk != c->skb->sk)) \
+ cb(c); \
+ } \
+ prev_msg = msg; \
+} while (0)
+
static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
+ bool clear,
void (*cb)(struct sctp_chunk *))
{
+ struct sctp_datamsg *msg, *prev_msg = NULL;
struct sctp_outq *q = &asoc->outqueue;
+ struct sctp_chunk *chunk, *c;
struct sctp_transport *t;
- struct sctp_chunk *chunk;
list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
list_for_each_entry(chunk, &t->transmitted, transmitted_list)
- cb(chunk);
+ traverse_and_process();
list_for_each_entry(chunk, &q->retransmit, transmitted_list)
- cb(chunk);
+ traverse_and_process();
list_for_each_entry(chunk, &q->sacked, transmitted_list)
- cb(chunk);
+ traverse_and_process();
list_for_each_entry(chunk, &q->abandoned, transmitted_list)
- cb(chunk);
+ traverse_and_process();
list_for_each_entry(chunk, &q->out_chunk_list, list)
- cb(chunk);
+ traverse_and_process();
}
static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk,
@@ -5333,14 +5348,14 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
EXPORT_SYMBOL_GPL(sctp_get_sctp_info);
/* use callback to avoid exporting the core structure */
-void sctp_transport_walk_start(struct rhashtable_iter *iter)
+void sctp_transport_walk_start(struct rhashtable_iter *iter) __acquires(RCU)
{
rhltable_walk_enter(&sctp_transport_hashtable, iter);
rhashtable_walk_start(iter);
}
-void sctp_transport_walk_stop(struct rhashtable_iter *iter)
+void sctp_transport_walk_stop(struct rhashtable_iter *iter) __releases(RCU)
{
rhashtable_walk_stop(iter);
rhashtable_walk_exit(iter);
@@ -9574,9 +9589,9 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
* paths won't try to lock it and then oldsk.
*/
lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
- sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
+ sctp_for_each_tx_datachunk(assoc, true, sctp_clear_owner_w);
sctp_assoc_migrate(assoc, newsk);
- sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
+ sctp_for_each_tx_datachunk(assoc, false, sctp_set_owner_w);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 86cccc24e52e..ea0068f0173c 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -349,7 +349,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
smc->peer_diagnosis = ntohl(dclc->peer_diagnosis);
if (((struct smc_clc_msg_decline *)buf)->hdr.flag) {
smc->conn.lgr->sync_err = 1;
- smc_lgr_terminate(smc->conn.lgr, true);
+ smc_lgr_terminate_sched(smc->conn.lgr);
}
}
@@ -372,7 +372,8 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
dclc.hdr.version = SMC_CLC_V1;
dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0;
- if (smc->conn.lgr && !smc->conn.lgr->is_smcd)
+ if ((!smc->conn.lgr || !smc->conn.lgr->is_smcd) &&
+ smc_ib_is_valid_local_systemid())
memcpy(dclc.id_for_peer, local_systemid,
sizeof(local_systemid));
dclc.peer_diagnosis = htonl(peer_diag_info);
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 5b085efa3bce..824c5211b027 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -46,6 +46,7 @@ static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
struct smc_buf_desc *buf_desc);
+static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
/* return head of link group list and its lock for a given link group */
static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
@@ -241,7 +242,7 @@ static void smc_lgr_terminate_work(struct work_struct *work)
struct smc_link_group *lgr = container_of(work, struct smc_link_group,
terminate_work);
- smc_lgr_terminate(lgr, true);
+ __smc_lgr_terminate(lgr, true);
}
/* create a new SMC link group */
@@ -588,15 +589,15 @@ static void smc_lgr_cleanup(struct smc_link_group *lgr)
} else {
struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
- wake_up(&lnk->wr_reg_wait);
- if (lnk->state != SMC_LNK_INACTIVE) {
- smc_link_send_delete(lnk, false);
+ if (lnk->state != SMC_LNK_INACTIVE)
smc_llc_link_inactive(lnk);
- }
}
}
-/* terminate link group */
+/* terminate link group
+ * @soft: true if link group shutdown can take its time
+ * false if immediate link group shutdown is required
+ */
static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
{
struct smc_connection *conn;
@@ -634,25 +635,20 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
smc_lgr_free(lgr);
}
-/* unlink and terminate link group
- * @soft: true if link group shutdown can take its time
- * false if immediate link group shutdown is required
- */
-void smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
+/* unlink link group and schedule termination */
+void smc_lgr_terminate_sched(struct smc_link_group *lgr)
{
spinlock_t *lgr_lock;
smc_lgr_list_head(lgr, &lgr_lock);
spin_lock_bh(lgr_lock);
- if (lgr->terminating) {
+ if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
spin_unlock_bh(lgr_lock);
return; /* lgr already terminating */
}
- if (!soft)
- lgr->freeing = 1;
list_del_init(&lgr->list);
spin_unlock_bh(lgr_lock);
- __smc_lgr_terminate(lgr, soft);
+ schedule_work(&lgr->terminate_work);
}
/* Called when IB port is terminated */
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 234ae25f0025..8041db20c753 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -285,19 +285,13 @@ static inline struct smc_connection *smc_lgr_find_conn(
return res;
}
-static inline void smc_lgr_terminate_sched(struct smc_link_group *lgr)
-{
- if (!lgr->terminating && !lgr->freeing)
- schedule_work(&lgr->terminate_work);
-}
-
struct smc_sock;
struct smc_clc_msg_accept_confirm;
struct smc_clc_msg_local;
void smc_lgr_forget(struct smc_link_group *lgr);
void smc_lgr_cleanup_early(struct smc_connection *conn);
-void smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
+void smc_lgr_terminate_sched(struct smc_link_group *lgr);
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
unsigned short vlan);
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 05b825b3cfa4..04b6fefb8bce 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -37,11 +37,7 @@ struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */
.list = LIST_HEAD_INIT(smc_ib_devices.list),
};
-#define SMC_LOCAL_SYSTEMID_RESET "%%%%%%%"
-
-u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system
- * identifier
- */
+u8 local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */
static int smc_ib_modify_qp_init(struct smc_link *lnk)
{
@@ -168,6 +164,15 @@ static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
{
memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
sizeof(smcibdev->mac[ibport - 1]));
+}
+
+bool smc_ib_is_valid_local_systemid(void)
+{
+ return !is_zero_ether_addr(&local_systemid[2]);
+}
+
+static void smc_ib_init_local_systemid(void)
+{
get_random_bytes(&local_systemid[0], 2);
}
@@ -224,8 +229,7 @@ static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
rc = smc_ib_fill_mac(smcibdev, ibport);
if (rc)
goto out;
- if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET,
- sizeof(local_systemid)) &&
+ if (!smc_ib_is_valid_local_systemid() &&
smc_ib_port_active(smcibdev, ibport))
/* create unique system identifier */
smc_ib_define_local_systemid(smcibdev, ibport);
@@ -257,6 +261,7 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
struct ib_event *ibevent)
{
struct smc_ib_device *smcibdev;
+ bool schedule = false;
u8 port_idx;
smcibdev = container_of(handler, struct smc_ib_device, event_handler);
@@ -266,22 +271,35 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
/* terminate all ports on device */
for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) {
set_bit(port_idx, &smcibdev->port_event_mask);
- set_bit(port_idx, smcibdev->ports_going_away);
+ if (!test_and_set_bit(port_idx,
+ smcibdev->ports_going_away))
+ schedule = true;
}
- schedule_work(&smcibdev->port_event_work);
+ if (schedule)
+ schedule_work(&smcibdev->port_event_work);
break;
- case IB_EVENT_PORT_ERR:
case IB_EVENT_PORT_ACTIVE:
- case IB_EVENT_GID_CHANGE:
port_idx = ibevent->element.port_num - 1;
- if (port_idx < SMC_MAX_PORTS) {
- set_bit(port_idx, &smcibdev->port_event_mask);
- if (ibevent->event == IB_EVENT_PORT_ERR)
- set_bit(port_idx, smcibdev->ports_going_away);
- else if (ibevent->event == IB_EVENT_PORT_ACTIVE)
- clear_bit(port_idx, smcibdev->ports_going_away);
+ if (port_idx >= SMC_MAX_PORTS)
+ break;
+ set_bit(port_idx, &smcibdev->port_event_mask);
+ if (test_and_clear_bit(port_idx, smcibdev->ports_going_away))
schedule_work(&smcibdev->port_event_work);
- }
+ break;
+ case IB_EVENT_PORT_ERR:
+ port_idx = ibevent->element.port_num - 1;
+ if (port_idx >= SMC_MAX_PORTS)
+ break;
+ set_bit(port_idx, &smcibdev->port_event_mask);
+ if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
+ schedule_work(&smcibdev->port_event_work);
+ break;
+ case IB_EVENT_GID_CHANGE:
+ port_idx = ibevent->element.port_num - 1;
+ if (port_idx >= SMC_MAX_PORTS)
+ break;
+ set_bit(port_idx, &smcibdev->port_event_mask);
+ schedule_work(&smcibdev->port_event_work);
break;
default:
break;
@@ -316,11 +334,11 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
case IB_EVENT_QP_FATAL:
case IB_EVENT_QP_ACCESS_ERR:
port_idx = ibevent->element.qp->port - 1;
- if (port_idx < SMC_MAX_PORTS) {
- set_bit(port_idx, &smcibdev->port_event_mask);
- set_bit(port_idx, smcibdev->ports_going_away);
+ if (port_idx >= SMC_MAX_PORTS)
+ break;
+ set_bit(port_idx, &smcibdev->port_event_mask);
+ if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
schedule_work(&smcibdev->port_event_work);
- }
break;
default:
break;
@@ -594,6 +612,7 @@ static struct ib_client smc_ib_client = {
int __init smc_ib_register_client(void)
{
+ smc_ib_init_local_systemid();
return ib_register_client(&smc_ib_client);
}
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index 255db87547d3..5c2b115d36da 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -84,4 +84,5 @@ void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev,
enum dma_data_direction data_direction);
int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
unsigned short vlan_id, u8 gid[], u8 *sgid_index);
+bool smc_ib_is_valid_local_systemid(void);
#endif
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index a9f6431dd69a..0e52aab53d97 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -614,7 +614,7 @@ static void smc_llc_testlink_work(struct work_struct *work)
rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
SMC_LLC_WAIT_TIME);
if (rc <= 0) {
- smc_lgr_terminate(smc_get_lgr(link), true);
+ smc_lgr_terminate_sched(smc_get_lgr(link));
return;
}
next_interval = link->llc_testlink_time;
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index 0d42e7716b91..9f1ade86d70e 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -284,7 +284,7 @@ static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL);
if (rc)
- smc_lgr_terminate(lgr, true);
+ smc_lgr_terminate_sched(lgr);
return rc;
}
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 3a1d428c1336..f25604d68337 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -29,7 +29,7 @@ struct switchdev_deferred_item {
struct list_head list;
struct net_device *dev;
switchdev_deferred_func_t *func;
- unsigned long data[0];
+ unsigned long data[];
};
static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
@@ -475,6 +475,9 @@ static int __switchdev_handle_port_obj_add(struct net_device *dev,
* necessary to go through this helper.
*/
netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ if (netif_is_bridge_master(lower_dev))
+ continue;
+
err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
check_cb, add_cb);
if (err && err != -EOPNOTSUPP)
@@ -526,6 +529,9 @@ static int __switchdev_handle_port_obj_del(struct net_device *dev,
* necessary to go through this helper.
*/
netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ if (netif_is_bridge_master(lower_dev))
+ continue;
+
err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
check_cb, del_cb);
if (err && err != -EOPNOTSUPP)
@@ -576,6 +582,9 @@ static int __switchdev_handle_port_attr_set(struct net_device *dev,
* necessary to go through this helper.
*/
netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ if (netif_is_bridge_master(lower_dev))
+ continue;
+
err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
check_cb, set_cb);
if (err && err != -EOPNOTSUPP)
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index 58708b4c7719..6dce2abf436e 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -322,9 +322,13 @@ static void mon_assign_roles(struct tipc_monitor *mon, struct tipc_peer *head)
void tipc_mon_remove_peer(struct net *net, u32 addr, int bearer_id)
{
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
- struct tipc_peer *self = get_self(net, bearer_id);
+ struct tipc_peer *self;
struct tipc_peer *peer, *prev, *head;
+ if (!mon)
+ return;
+
+ self = get_self(net, bearer_id);
write_lock_bh(&mon->lock);
peer = get_peer(mon, addr);
if (!peer)
@@ -407,11 +411,15 @@ exit:
void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id)
{
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
- struct tipc_peer *self = get_self(net, bearer_id);
+ struct tipc_peer *self;
struct tipc_peer *peer, *head;
struct tipc_mon_domain *dom;
int applied;
+ if (!mon)
+ return;
+
+ self = get_self(net, bearer_id);
write_lock_bh(&mon->lock);
peer = get_peer(mon, addr);
if (!peer) {
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 0d515d20b056..4d0e0bdd997b 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -736,9 +736,6 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
msg_set_destport(msg, dport);
*err = TIPC_OK;
- if (!skb_cloned(skb))
- return true;
-
return true;
}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 6d466ebdb64f..871feadbbc19 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -394,6 +394,11 @@ static inline u32 msg_connected(struct tipc_msg *m)
return msg_type(m) == TIPC_CONN_MSG;
}
+static inline u32 msg_direct(struct tipc_msg *m)
+{
+ return msg_type(m) == TIPC_DIRECT_MSG;
+}
+
static inline u32 msg_errcode(struct tipc_msg *m)
{
return msg_bits(m, 1, 25, 0xf);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 0c88778c88b5..10292c942384 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1586,7 +1586,8 @@ static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
case TIPC_MEDIUM_IMPORTANCE:
case TIPC_HIGH_IMPORTANCE:
case TIPC_CRITICAL_IMPORTANCE:
- if (msg_connected(hdr) || msg_named(hdr)) {
+ if (msg_connected(hdr) || msg_named(hdr) ||
+ msg_direct(hdr)) {
tipc_loopback_trace(peer_net, list);
spin_lock_init(&list->lock);
tipc_sk_rcv(peer_net, list);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 693e8902161e..87466607097f 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1461,7 +1461,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
}
__skb_queue_head_init(&pkts);
- mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
+ mtu = tipc_node_get_mtu(net, dnode, tsk->portid, true);
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
return rc;
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 1c5574e2e058..a562ebaaa33c 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -366,7 +366,7 @@ static int tls_do_allocation(struct sock *sk,
if (!offload_ctx->open_record) {
if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
sk->sk_allocation))) {
- sk->sk_prot->enter_memory_pressure(sk);
+ READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
sk_stream_moderate_sndbuf(sk);
return -ENOMEM;
}
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 94774c0e5ff3..156efce50dbd 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -63,13 +63,14 @@ static DEFINE_MUTEX(tcpv4_prot_mutex);
static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
static struct proto_ops tls_sw_proto_ops;
static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
- struct proto *base);
+ const struct proto *base);
void update_sk_prot(struct sock *sk, struct tls_context *ctx)
{
int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
- sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf];
+ WRITE_ONCE(sk->sk_prot,
+ &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]);
}
int wait_on_pending_writer(struct sock *sk, long *timeo)
@@ -312,7 +313,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
write_lock_bh(&sk->sk_callback_lock);
if (free_ctx)
rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
- sk->sk_prot = ctx->sk_proto;
+ WRITE_ONCE(sk->sk_prot, ctx->sk_proto);
if (sk->sk_write_space == tls_write_space)
sk->sk_write_space = ctx->sk_write_space;
write_unlock_bh(&sk->sk_callback_lock);
@@ -621,38 +622,39 @@ struct tls_context *tls_ctx_create(struct sock *sk)
mutex_init(&ctx->tx_lock);
rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
- ctx->sk_proto = sk->sk_prot;
+ ctx->sk_proto = READ_ONCE(sk->sk_prot);
return ctx;
}
static void tls_build_proto(struct sock *sk)
{
int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
+ const struct proto *prot = READ_ONCE(sk->sk_prot);
/* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
if (ip_ver == TLSV6 &&
- unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
+ unlikely(prot != smp_load_acquire(&saved_tcpv6_prot))) {
mutex_lock(&tcpv6_prot_mutex);
- if (likely(sk->sk_prot != saved_tcpv6_prot)) {
- build_protos(tls_prots[TLSV6], sk->sk_prot);
- smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
+ if (likely(prot != saved_tcpv6_prot)) {
+ build_protos(tls_prots[TLSV6], prot);
+ smp_store_release(&saved_tcpv6_prot, prot);
}
mutex_unlock(&tcpv6_prot_mutex);
}
if (ip_ver == TLSV4 &&
- unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) {
+ unlikely(prot != smp_load_acquire(&saved_tcpv4_prot))) {
mutex_lock(&tcpv4_prot_mutex);
- if (likely(sk->sk_prot != saved_tcpv4_prot)) {
- build_protos(tls_prots[TLSV4], sk->sk_prot);
- smp_store_release(&saved_tcpv4_prot, sk->sk_prot);
+ if (likely(prot != saved_tcpv4_prot)) {
+ build_protos(tls_prots[TLSV4], prot);
+ smp_store_release(&saved_tcpv4_prot, prot);
}
mutex_unlock(&tcpv4_prot_mutex);
}
}
static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
- struct proto *base)
+ const struct proto *base)
{
prot[TLS_BASE][TLS_BASE] = *base;
prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt;
@@ -742,7 +744,8 @@ static void tls_update(struct sock *sk, struct proto *p,
ctx->sk_write_space = write_space;
ctx->sk_proto = p;
} else {
- sk->sk_prot = p;
+ /* Pairs with lockless read in sk_clone_lock(). */
+ WRITE_ONCE(sk->sk_prot, p);
sk->sk_write_space = write_space;
}
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 68debcb28fa4..3385a7a0b231 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -690,7 +690,8 @@ static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
if (sk) {
u = unix_sk(sock->sk);
- seq_printf(m, "scm_fds: %u\n", READ_ONCE(u->scm_stat.nr_fds));
+ seq_printf(m, "scm_fds: %u\n",
+ atomic_read(&u->scm_stat.nr_fds));
}
}
#else
@@ -1211,6 +1212,7 @@ out:
}
static long unix_wait_for_peer(struct sock *other, long timeo)
+ __releases(&unix_sk(other)->lock)
{
struct unix_sock *u = unix_sk(other);
int sched;
@@ -1601,10 +1603,8 @@ static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
struct scm_fp_list *fp = UNIXCB(skb).fp;
struct unix_sock *u = unix_sk(sk);
- lockdep_assert_held(&sk->sk_receive_queue.lock);
-
if (unlikely(fp && fp->count))
- u->scm_stat.nr_fds += fp->count;
+ atomic_add(fp->count, &u->scm_stat.nr_fds);
}
static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
@@ -1612,10 +1612,8 @@ static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
struct scm_fp_list *fp = UNIXCB(skb).fp;
struct unix_sock *u = unix_sk(sk);
- lockdep_assert_held(&sk->sk_receive_queue.lock);
-
if (unlikely(fp && fp->count))
- u->scm_stat.nr_fds -= fp->count;
+ atomic_sub(fp->count, &u->scm_stat.nr_fds);
}
/*
@@ -1804,10 +1802,8 @@ restart_locked:
if (sock_flag(other, SOCK_RCVTSTAMP))
__net_timestamp(skb);
maybe_add_creds(skb, sock, other);
- spin_lock(&other->sk_receive_queue.lock);
scm_stat_add(other, skb);
- __skb_queue_tail(&other->sk_receive_queue, skb);
- spin_unlock(&other->sk_receive_queue.lock);
+ skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
other->sk_data_ready(other);
sock_put(other);
@@ -1909,10 +1905,8 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
goto pipe_err_free;
maybe_add_creds(skb, sock, other);
- spin_lock(&other->sk_receive_queue.lock);
scm_stat_add(other, skb);
- __skb_queue_tail(&other->sk_receive_queue, skb);
- spin_unlock(&other->sk_receive_queue.lock);
+ skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
other->sk_data_ready(other);
sent += size;
@@ -2112,9 +2106,12 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
skip = sk_peek_offset(sk, flags);
skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
- scm_stat_del, &skip, &err, &last);
- if (skb)
+ &skip, &err, &last);
+ if (skb) {
+ if (!(flags & MSG_PEEK))
+ scm_stat_del(sk, skb);
break;
+ }
mutex_unlock(&u->iolock);
@@ -2408,9 +2405,7 @@ unlock:
sk_peek_offset_bwd(sk, chunk);
if (UNIXCB(skb).fp) {
- spin_lock(&sk->sk_receive_queue.lock);
scm_stat_del(sk, skb);
- spin_unlock(&sk->sk_receive_queue.lock);
unix_detach_fds(&scm, skb);
}
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index f3c4bab2f737..709038a4783e 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -1151,6 +1151,7 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
virtio_transport_free_pkt(pkt);
break;
default:
+ (void)virtio_transport_reset_no_sock(t, pkt);
virtio_transport_free_pkt(pkt);
break;
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3e25229a059d..341402b4f178 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -693,8 +693,14 @@ int wiphy_register(struct wiphy *wiphy)
~(BIT(NL80211_PREAMBLE_LEGACY) |
BIT(NL80211_PREAMBLE_HT) |
BIT(NL80211_PREAMBLE_VHT) |
+ BIT(NL80211_PREAMBLE_HE) |
BIT(NL80211_PREAMBLE_DMG))))
return -EINVAL;
+ if (WARN_ON((wiphy->pmsr_capa->ftm.trigger_based ||
+ wiphy->pmsr_capa->ftm.non_trigger_based) &&
+ !(wiphy->pmsr_capa->ftm.preambles &
+ BIT(NL80211_PREAMBLE_HE))))
+ return -EINVAL;
if (WARN_ON(wiphy->pmsr_capa->ftm.bandwidths &
~(BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
diff --git a/net/wireless/core.h b/net/wireless/core.h
index ed487e324571..bb897a803ffe 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -385,7 +385,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
struct net_device *dev);
int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
u16 frame_type, const u8 *match_data,
- int match_len);
+ int match_len, struct netlink_ext_ack *extack);
void cfg80211_mlme_unreg_wk(struct work_struct *wk);
void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid);
void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index f9462010575f..e4805a3bd310 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -4,6 +4,7 @@
*
* Copyright (c) 2009, Jouni Malinen <j@w1.fi>
* Copyright (c) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2019 Intel Corporation
*/
#include <linux/kernel.h>
@@ -470,7 +471,7 @@ void cfg80211_mlme_unreg_wk(struct work_struct *wk)
int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
u16 frame_type, const u8 *match_data,
- int match_len)
+ int match_len, struct netlink_ext_ack *extack)
{
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
@@ -481,15 +482,38 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
if (!wdev->wiphy->mgmt_stypes)
return -EOPNOTSUPP;
- if ((frame_type & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT)
+ if ((frame_type & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT) {
+ NL_SET_ERR_MSG(extack, "frame type not management");
return -EINVAL;
+ }
- if (frame_type & ~(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE))
+ if (frame_type & ~(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) {
+ NL_SET_ERR_MSG(extack, "Invalid frame type");
return -EINVAL;
+ }
mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4;
- if (!(wdev->wiphy->mgmt_stypes[wdev->iftype].rx & BIT(mgmt_type)))
+ if (!(wdev->wiphy->mgmt_stypes[wdev->iftype].rx & BIT(mgmt_type))) {
+ NL_SET_ERR_MSG(extack,
+ "Registration to specific type not supported");
+ return -EINVAL;
+ }
+
+ /*
+ * To support Pre Association Security Negotiation (PASN), registration
+ * for authentication frames should be supported. However, as some
+ * versions of the user space daemons wrongly register to all types of
+ * authentication frames (which might result in unexpected behavior)
+ * allow such registration if the request is for a specific
+ * authentication algorithm number.
+ */
+ if (wdev->iftype == NL80211_IFTYPE_STATION &&
+ (frame_type & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_AUTH &&
+ !(match_data && match_len >= 2)) {
+ NL_SET_ERR_MSG(extack,
+ "Authentication algorithm number required");
return -EINVAL;
+ }
nreg = kzalloc(sizeof(*reg) + match_len, GFP_KERNEL);
if (!nreg)
@@ -504,6 +528,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
continue;
if (memcmp(reg->match, match_data, mlen) == 0) {
+ NL_SET_ERR_MSG(extack, "Match already configured");
err = -EALREADY;
break;
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index f0af23c1634a..5fa402144cda 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5,7 +5,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*/
#include <linux/if.h>
@@ -276,6 +276,8 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = {
[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 },
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG },
[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG },
+ [NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED] = { .type = NLA_FLAG },
+ [NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED] = { .type = NLA_FLAG },
};
static const struct nla_policy
@@ -322,6 +324,29 @@ he_obss_pd_policy[NL80211_HE_OBSS_PD_ATTR_MAX + 1] = {
NLA_POLICY_RANGE(NLA_U8, 1, 20),
};
+static const struct nla_policy
+he_bss_color_policy[NL80211_HE_BSS_COLOR_ATTR_MAX + 1] = {
+ [NL80211_HE_BSS_COLOR_ATTR_COLOR] = NLA_POLICY_RANGE(NLA_U8, 1, 63),
+ [NL80211_HE_BSS_COLOR_ATTR_DISABLED] = { .type = NLA_FLAG },
+ [NL80211_HE_BSS_COLOR_ATTR_PARTIAL] = { .type = NLA_FLAG },
+};
+
+static const struct nla_policy
+nl80211_tid_config_attr_policy[NL80211_TID_CONFIG_ATTR_MAX + 1] = {
+ [NL80211_TID_CONFIG_ATTR_VIF_SUPP] = { .type = NLA_U64 },
+ [NL80211_TID_CONFIG_ATTR_PEER_SUPP] = { .type = NLA_U64 },
+ [NL80211_TID_CONFIG_ATTR_OVERRIDE] = { .type = NLA_FLAG },
+ [NL80211_TID_CONFIG_ATTR_TIDS] = NLA_POLICY_RANGE(NLA_U16, 1, 0xff),
+ [NL80211_TID_CONFIG_ATTR_NOACK] =
+ NLA_POLICY_MAX(NLA_U8, NL80211_TID_CONFIG_DISABLE),
+ [NL80211_TID_CONFIG_ATTR_RETRY_SHORT] = NLA_POLICY_MIN(NLA_U8, 1),
+ [NL80211_TID_CONFIG_ATTR_RETRY_LONG] = NLA_POLICY_MIN(NLA_U8, 1),
+ [NL80211_TID_CONFIG_ATTR_AMPDU_CTRL] =
+ NLA_POLICY_MAX(NLA_U8, NL80211_TID_CONFIG_DISABLE),
+ [NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL] =
+ NLA_POLICY_MAX(NLA_U8, NL80211_TID_CONFIG_DISABLE),
+};
+
const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD },
[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
@@ -362,7 +387,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
[NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
.len = WLAN_MAX_KEY_LEN },
- [NL80211_ATTR_KEY_IDX] = NLA_POLICY_MAX(NLA_U8, 5),
+ [NL80211_ATTR_KEY_IDX] = NLA_POLICY_MAX(NLA_U8, 7),
[NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 },
[NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG },
[NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
@@ -632,6 +657,12 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_TWT_RESPONDER] = { .type = NLA_FLAG },
[NL80211_ATTR_HE_OBSS_PD] = NLA_POLICY_NESTED(he_obss_pd_policy),
[NL80211_ATTR_VLAN_ID] = NLA_POLICY_RANGE(NLA_U16, 1, VLAN_N_VID - 2),
+ [NL80211_ATTR_HE_BSS_COLOR] = NLA_POLICY_NESTED(he_bss_color_policy),
+ [NL80211_ATTR_TID_CONFIG] =
+ NLA_POLICY_NESTED_ARRAY(nl80211_tid_config_attr_policy),
+ [NL80211_ATTR_CONTROL_PORT_NO_PREAUTH] = { .type = NLA_FLAG },
+ [NL80211_ATTR_PMK_LIFETIME] = NLA_POLICY_MIN(NLA_U32, 1),
+ [NL80211_ATTR_PMK_REAUTH_THRESHOLD] = NLA_POLICY_RANGE(NLA_U8, 1, 100),
};
/* policy for the key attributes */
@@ -971,6 +1002,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
if ((chan->flags & IEEE80211_CHAN_NO_10MHZ) &&
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_10MHZ))
goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_NO_HE) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HE))
+ goto nla_put_failure;
}
if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
@@ -1032,7 +1066,7 @@ struct key_parse {
struct key_params p;
int idx;
int type;
- bool def, defmgmt;
+ bool def, defmgmt, defbeacon;
bool def_uni, def_multi;
};
@@ -1048,12 +1082,13 @@ static int nl80211_parse_key_new(struct genl_info *info, struct nlattr *key,
k->def = !!tb[NL80211_KEY_DEFAULT];
k->defmgmt = !!tb[NL80211_KEY_DEFAULT_MGMT];
+ k->defbeacon = !!tb[NL80211_KEY_DEFAULT_BEACON];
if (k->def) {
k->def_uni = true;
k->def_multi = true;
}
- if (k->defmgmt)
+ if (k->defmgmt || k->defbeacon)
k->def_multi = true;
if (tb[NL80211_KEY_IDX])
@@ -1160,14 +1195,17 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k)
if (err)
return err;
- if (k->def && k->defmgmt) {
- GENL_SET_ERR_MSG(info, "key with def && defmgmt is invalid");
+ if ((k->def ? 1 : 0) + (k->defmgmt ? 1 : 0) +
+ (k->defbeacon ? 1 : 0) > 1) {
+ GENL_SET_ERR_MSG(info,
+ "key with multiple default flags is invalid");
return -EINVAL;
}
- if (k->defmgmt) {
+ if (k->defmgmt || k->defbeacon) {
if (k->def_uni || !k->def_multi) {
- GENL_SET_ERR_MSG(info, "defmgmt key must be mcast");
+ GENL_SET_ERR_MSG(info,
+ "defmgmt/defbeacon key must be mcast");
return -EINVAL;
}
}
@@ -1179,14 +1217,20 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k)
"defmgmt key idx not 4 or 5");
return -EINVAL;
}
+ } else if (k->defbeacon) {
+ if (k->idx < 6 || k->idx > 7) {
+ GENL_SET_ERR_MSG(info,
+ "defbeacon key idx not 6 or 7");
+ return -EINVAL;
+ }
} else if (k->def) {
if (k->idx < 0 || k->idx > 3) {
GENL_SET_ERR_MSG(info, "def key idx not 0-3");
return -EINVAL;
}
} else {
- if (k->idx < 0 || k->idx > 5) {
- GENL_SET_ERR_MSG(info, "key idx not 0-5");
+ if (k->idx < 0 || k->idx > 7) {
+ GENL_SET_ERR_MSG(info, "key idx not 0-7");
return -EINVAL;
}
}
@@ -1845,6 +1889,12 @@ nl80211_send_pmsr_ftm_capa(const struct cfg80211_pmsr_capabilities *cap,
nla_put_u32(msg, NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST,
cap->ftm.max_ftms_per_burst))
return -ENOBUFS;
+ if (cap->ftm.trigger_based &&
+ nla_put_flag(msg, NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED))
+ return -ENOBUFS;
+ if (cap->ftm.non_trigger_based &&
+ nla_put_flag(msg, NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED))
+ return -ENOBUFS;
nla_nest_end(msg, ftm);
return 0;
@@ -1892,6 +1942,88 @@ static int nl80211_send_pmsr_capa(struct cfg80211_registered_device *rdev,
return 0;
}
+static int
+nl80211_put_iftype_akm_suites(struct cfg80211_registered_device *rdev,
+ struct sk_buff *msg)
+{
+ int i;
+ struct nlattr *nested, *nested_akms;
+ const struct wiphy_iftype_akm_suites *iftype_akms;
+
+ if (!rdev->wiphy.num_iftype_akm_suites ||
+ !rdev->wiphy.iftype_akm_suites)
+ return 0;
+
+ nested = nla_nest_start(msg, NL80211_ATTR_IFTYPE_AKM_SUITES);
+ if (!nested)
+ return -ENOBUFS;
+
+ for (i = 0; i < rdev->wiphy.num_iftype_akm_suites; i++) {
+ nested_akms = nla_nest_start(msg, i + 1);
+ if (!nested_akms)
+ return -ENOBUFS;
+
+ iftype_akms = &rdev->wiphy.iftype_akm_suites[i];
+
+ if (nl80211_put_iftypes(msg, NL80211_IFTYPE_AKM_ATTR_IFTYPES,
+ iftype_akms->iftypes_mask))
+ return -ENOBUFS;
+
+ if (nla_put(msg, NL80211_IFTYPE_AKM_ATTR_SUITES,
+ sizeof(u32) * iftype_akms->n_akm_suites,
+ iftype_akms->akm_suites)) {
+ return -ENOBUFS;
+ }
+ nla_nest_end(msg, nested_akms);
+ }
+
+ nla_nest_end(msg, nested);
+
+ return 0;
+}
+
+static int
+nl80211_put_tid_config_support(struct cfg80211_registered_device *rdev,
+ struct sk_buff *msg)
+{
+ struct nlattr *supp;
+
+ if (!rdev->wiphy.tid_config_support.vif &&
+ !rdev->wiphy.tid_config_support.peer)
+ return 0;
+
+ supp = nla_nest_start(msg, NL80211_ATTR_TID_CONFIG);
+ if (!supp)
+ return -ENOSPC;
+
+ if (rdev->wiphy.tid_config_support.vif &&
+ nla_put_u64_64bit(msg, NL80211_TID_CONFIG_ATTR_VIF_SUPP,
+ rdev->wiphy.tid_config_support.vif,
+ NL80211_TID_CONFIG_ATTR_PAD))
+ goto fail;
+
+ if (rdev->wiphy.tid_config_support.peer &&
+ nla_put_u64_64bit(msg, NL80211_TID_CONFIG_ATTR_PEER_SUPP,
+ rdev->wiphy.tid_config_support.peer,
+ NL80211_TID_CONFIG_ATTR_PAD))
+ goto fail;
+
+ /* for now we just use the same value ... makes more sense */
+ if (nla_put_u8(msg, NL80211_TID_CONFIG_ATTR_RETRY_SHORT,
+ rdev->wiphy.tid_config_support.max_retry))
+ goto fail;
+ if (nla_put_u8(msg, NL80211_TID_CONFIG_ATTR_RETRY_LONG,
+ rdev->wiphy.tid_config_support.max_retry))
+ goto fail;
+
+ nla_nest_end(msg, supp);
+
+ return 0;
+fail:
+ nla_nest_cancel(msg, supp);
+ return -ENOBUFS;
+}
+
struct nl80211_dump_wiphy_state {
s64 filter_wiphy;
long start;
@@ -2450,6 +2582,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
rdev->wiphy.akm_suites))
goto nla_put_failure;
+ if (nl80211_put_iftype_akm_suites(rdev, msg))
+ goto nla_put_failure;
+
+ if (nl80211_put_tid_config_support(rdev, msg))
+ goto nla_put_failure;
+
/* done */
state->split_start = 0;
break;
@@ -3481,7 +3619,7 @@ static int nl80211_valid_4addr(struct cfg80211_registered_device *rdev,
enum nl80211_iftype iftype)
{
if (!use_4addr) {
- if (netdev && (netdev->priv_flags & IFF_BRIDGE_PORT))
+ if (netdev && netif_is_bridge_port(netdev))
return -EBUSY;
return 0;
}
@@ -3769,8 +3907,14 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
void *hdr;
struct sk_buff *msg;
- if (info->attrs[NL80211_ATTR_KEY_IDX])
+ if (info->attrs[NL80211_ATTR_KEY_IDX]) {
key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]);
+ if (key_idx > 5 &&
+ !wiphy_ext_feature_isset(
+ &rdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION))
+ return -EINVAL;
+ }
if (info->attrs[NL80211_ATTR_MAC])
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
@@ -3846,7 +3990,7 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
/* Only support setting default key and
* Extended Key ID action NL80211_KEY_SET_TX.
*/
- if (!key.def && !key.defmgmt &&
+ if (!key.def && !key.defmgmt && !key.defbeacon &&
!(key.p.mode == NL80211_KEY_SET_TX))
return -EINVAL;
@@ -3893,6 +4037,24 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
#ifdef CONFIG_CFG80211_WEXT
dev->ieee80211_ptr->wext.default_mgmt_key = key.idx;
#endif
+ } else if (key.defbeacon) {
+ if (key.def_uni || !key.def_multi) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (!rdev->ops->set_default_beacon_key) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = nl80211_key_allowed(dev->ieee80211_ptr);
+ if (err)
+ goto out;
+
+ err = rdev_set_default_beacon_key(rdev, dev, key.idx);
+ if (err)
+ goto out;
} else if (key.p.mode == NL80211_KEY_SET_TX &&
wiphy_ext_feature_isset(&rdev->wiphy,
NL80211_EXT_FEATURE_EXT_KEY_ID)) {
@@ -3930,8 +4092,10 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
- if (!key.p.key)
+ if (!key.p.key) {
+ GENL_SET_ERR_MSG(info, "no key");
return -EINVAL;
+ }
if (info->attrs[NL80211_ATTR_MAC])
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
@@ -3945,8 +4109,10 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
/* for now */
if (key.type != NL80211_KEYTYPE_PAIRWISE &&
- key.type != NL80211_KEYTYPE_GROUP)
+ key.type != NL80211_KEYTYPE_GROUP) {
+ GENL_SET_ERR_MSG(info, "key type not pairwise or group");
return -EINVAL;
+ }
if (key.type == NL80211_KEYTYPE_GROUP &&
info->attrs[NL80211_ATTR_VLAN_ID])
@@ -3957,15 +4123,22 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
if (cfg80211_validate_key_settings(rdev, &key.p, key.idx,
key.type == NL80211_KEYTYPE_PAIRWISE,
- mac_addr))
+ mac_addr)) {
+ GENL_SET_ERR_MSG(info, "key setting validation failed");
return -EINVAL;
+ }
wdev_lock(dev->ieee80211_ptr);
err = nl80211_key_allowed(dev->ieee80211_ptr);
- if (!err)
+ if (err)
+ GENL_SET_ERR_MSG(info, "key not allowed");
+ if (!err) {
err = rdev_add_key(rdev, dev, key.idx,
key.type == NL80211_KEYTYPE_PAIRWISE,
mac_addr, &key.p);
+ if (err)
+ GENL_SET_ERR_MSG(info, "key addition failed");
+ }
wdev_unlock(dev->ieee80211_ptr);
return err;
@@ -4518,6 +4691,30 @@ static int nl80211_parse_he_obss_pd(struct nlattr *attrs,
return 0;
}
+static int nl80211_parse_he_bss_color(struct nlattr *attrs,
+ struct cfg80211_he_bss_color *he_bss_color)
+{
+ struct nlattr *tb[NL80211_HE_BSS_COLOR_ATTR_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, NL80211_HE_BSS_COLOR_ATTR_MAX, attrs,
+ he_bss_color_policy, NULL);
+ if (err)
+ return err;
+
+ if (!tb[NL80211_HE_BSS_COLOR_ATTR_COLOR])
+ return -EINVAL;
+
+ he_bss_color->color =
+ nla_get_u8(tb[NL80211_HE_BSS_COLOR_ATTR_COLOR]);
+ he_bss_color->disabled =
+ nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_DISABLED]);
+ he_bss_color->partial =
+ nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_PARTIAL]);
+
+ return 0;
+}
+
static void nl80211_check_ap_rate_selectors(struct cfg80211_ap_settings *params,
const u8 *rates)
{
@@ -4562,6 +4759,9 @@ static void nl80211_calculate_ap_params(struct cfg80211_ap_settings *params)
cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ies, ies_len);
if (cap && cap[1] >= sizeof(*params->he_cap) + 1)
params->he_cap = (void *)(cap + 3);
+ cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, ies, ies_len);
+ if (cap && cap[1] >= sizeof(*params->he_oper) + 1)
+ params->he_oper = (void *)(cap + 3);
}
static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev,
@@ -4809,6 +5009,14 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
goto out;
}
+ if (info->attrs[NL80211_ATTR_HE_BSS_COLOR]) {
+ err = nl80211_parse_he_bss_color(
+ info->attrs[NL80211_ATTR_HE_BSS_COLOR],
+ &params.he_bss_color);
+ if (err)
+ return err;
+ }
+
nl80211_calculate_ap_params(&params);
if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])
@@ -6077,11 +6285,22 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_MAC])
params.mac = nla_data(info->attrs[NL80211_ATTR_MAC]);
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ switch (dev->ieee80211_ptr->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_P2P_GO:
+ /* always accept these */
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ /* conditionally accept */
+ if (wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_DEL_IBSS_STA))
+ break;
return -EINVAL;
+ default:
+ return -EINVAL;
+ }
if (!rdev->ops->del_station)
return -EOPNOTSUPP;
@@ -9112,6 +9331,9 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
return r;
settings->control_port_over_nl80211 = true;
+
+ if (info->attrs[NL80211_ATTR_CONTROL_PORT_NO_PREAUTH])
+ settings->control_port_no_preauth = true;
}
if (info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]) {
@@ -10300,6 +10522,15 @@ static int nl80211_setdel_pmksa(struct sk_buff *skb, struct genl_info *info)
pmksa.pmk_len = nla_len(info->attrs[NL80211_ATTR_PMK]);
}
+ if (info->attrs[NL80211_ATTR_PMK_LIFETIME])
+ pmksa.pmk_lifetime =
+ nla_get_u32(info->attrs[NL80211_ATTR_PMK_LIFETIME]);
+
+ if (info->attrs[NL80211_ATTR_PMK_REAUTH_THRESHOLD])
+ pmksa.pmk_reauth_threshold =
+ nla_get_u8(
+ info->attrs[NL80211_ATTR_PMK_REAUTH_THRESHOLD]);
+
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT &&
!(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP &&
@@ -10545,8 +10776,9 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
return -EOPNOTSUPP;
return cfg80211_mlme_register_mgmt(wdev, info->snd_portid, frame_type,
- nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]),
- nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]));
+ nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]),
+ nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]),
+ info->extack);
}
static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
@@ -13808,6 +14040,141 @@ static int nl80211_probe_mesh_link(struct sk_buff *skb, struct genl_info *info)
return rdev_probe_mesh_link(rdev, dev, dest, buf, len);
}
+static int parse_tid_conf(struct cfg80211_registered_device *rdev,
+ struct nlattr *attrs[], struct net_device *dev,
+ struct cfg80211_tid_cfg *tid_conf,
+ struct genl_info *info, const u8 *peer)
+{
+ struct netlink_ext_ack *extack = info->extack;
+ u64 mask;
+ int err;
+
+ if (!attrs[NL80211_TID_CONFIG_ATTR_TIDS])
+ return -EINVAL;
+
+ tid_conf->config_override =
+ nla_get_flag(attrs[NL80211_TID_CONFIG_ATTR_OVERRIDE]);
+ tid_conf->tids = nla_get_u16(attrs[NL80211_TID_CONFIG_ATTR_TIDS]);
+
+ if (tid_conf->config_override) {
+ if (rdev->ops->reset_tid_config) {
+ err = rdev_reset_tid_config(rdev, dev, peer,
+ tid_conf->tids);
+ /* If peer is there no other configuration will be
+ * allowed
+ */
+ if (err || peer)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ if (attrs[NL80211_TID_CONFIG_ATTR_NOACK]) {
+ tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_NOACK);
+ tid_conf->noack =
+ nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_NOACK]);
+ }
+
+ if (attrs[NL80211_TID_CONFIG_ATTR_RETRY_SHORT]) {
+ tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_RETRY_SHORT);
+ tid_conf->retry_short =
+ nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_RETRY_SHORT]);
+
+ if (tid_conf->retry_short > rdev->wiphy.max_data_retry_count)
+ return -EINVAL;
+ }
+
+ if (attrs[NL80211_TID_CONFIG_ATTR_RETRY_LONG]) {
+ tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG);
+ tid_conf->retry_long =
+ nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_RETRY_LONG]);
+
+ if (tid_conf->retry_long > rdev->wiphy.max_data_retry_count)
+ return -EINVAL;
+ }
+
+ if (attrs[NL80211_TID_CONFIG_ATTR_AMPDU_CTRL]) {
+ tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
+ tid_conf->ampdu =
+ nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_AMPDU_CTRL]);
+ }
+
+ if (attrs[NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL]) {
+ tid_conf->mask |= BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL);
+ tid_conf->rtscts =
+ nla_get_u8(attrs[NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL]);
+ }
+
+ if (peer)
+ mask = rdev->wiphy.tid_config_support.peer;
+ else
+ mask = rdev->wiphy.tid_config_support.vif;
+
+ if (tid_conf->mask & ~mask) {
+ NL_SET_ERR_MSG(extack, "unsupported TID configuration");
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int nl80211_set_tid_config(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct nlattr *attrs[NL80211_TID_CONFIG_ATTR_MAX + 1];
+ struct net_device *dev = info->user_ptr[1];
+ struct cfg80211_tid_config *tid_config;
+ struct nlattr *tid;
+ int conf_idx = 0, rem_conf;
+ int ret = -EINVAL;
+ u32 num_conf = 0;
+
+ if (!info->attrs[NL80211_ATTR_TID_CONFIG])
+ return -EINVAL;
+
+ if (!rdev->ops->set_tid_config)
+ return -EOPNOTSUPP;
+
+ nla_for_each_nested(tid, info->attrs[NL80211_ATTR_TID_CONFIG],
+ rem_conf)
+ num_conf++;
+
+ tid_config = kzalloc(struct_size(tid_config, tid_conf, num_conf),
+ GFP_KERNEL);
+ if (!tid_config)
+ return -ENOMEM;
+
+ tid_config->n_tid_conf = num_conf;
+
+ if (info->attrs[NL80211_ATTR_MAC])
+ tid_config->peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ nla_for_each_nested(tid, info->attrs[NL80211_ATTR_TID_CONFIG],
+ rem_conf) {
+ ret = nla_parse_nested(attrs, NL80211_TID_CONFIG_ATTR_MAX,
+ tid, NULL, NULL);
+
+ if (ret)
+ goto bad_tid_conf;
+
+ ret = parse_tid_conf(rdev, attrs, dev,
+ &tid_config->tid_conf[conf_idx],
+ info, tid_config->peer);
+ if (ret)
+ goto bad_tid_conf;
+
+ conf_idx++;
+ }
+
+ ret = rdev_set_tid_config(rdev, dev, tid_config);
+
+bad_tid_conf:
+ kfree(tid_config);
+ return ret;
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -14762,6 +15129,13 @@ static const struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_SET_TID_CONFIG,
+ .doit = nl80211_set_tid_config,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
static struct genl_family nl80211_fam __ro_after_init = {
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
index c09fbf09549d..63dc8023447f 100644
--- a/net/wireless/pmsr.c
+++ b/net/wireless/pmsr.c
@@ -126,6 +126,38 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
"FTM: civic location request not supported");
}
+ out->ftm.trigger_based =
+ !!tb[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED];
+ if (out->ftm.trigger_based && !capa->ftm.trigger_based) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED],
+ "FTM: trigger based ranging is not supported");
+ return -EINVAL;
+ }
+
+ out->ftm.non_trigger_based =
+ !!tb[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED];
+ if (out->ftm.non_trigger_based && !capa->ftm.non_trigger_based) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED],
+ "FTM: trigger based ranging is not supported");
+ return -EINVAL;
+ }
+
+ if (out->ftm.trigger_based && out->ftm.non_trigger_based) {
+ NL_SET_ERR_MSG(info->extack,
+ "FTM: can't set both trigger based and non trigger based");
+ return -EINVAL;
+ }
+
+ if ((out->ftm.trigger_based || out->ftm.non_trigger_based) &&
+ out->ftm.preamble != NL80211_PREAMBLE_HE) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE],
+ "FTM: non EDCA based ranging must use HE preamble");
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index e0d34f796d0b..99462f0c4e08 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -136,6 +136,19 @@ rdev_set_default_mgmt_key(struct cfg80211_registered_device *rdev,
return ret;
}
+static inline int
+rdev_set_default_beacon_key(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, u8 key_index)
+{
+ int ret;
+
+ trace_rdev_set_default_beacon_key(&rdev->wiphy, netdev, key_index);
+ ret = rdev->ops->set_default_beacon_key(&rdev->wiphy, netdev,
+ key_index);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
static inline int rdev_start_ap(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct cfg80211_ap_settings *settings)
@@ -1313,4 +1326,28 @@ rdev_probe_mesh_link(struct cfg80211_registered_device *rdev,
return ret;
}
+static inline int rdev_set_tid_config(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_tid_config *tid_conf)
+{
+ int ret;
+
+ trace_rdev_set_tid_config(&rdev->wiphy, dev, tid_conf);
+ ret = rdev->ops->set_tid_config(&rdev->wiphy, dev, tid_conf);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
+static inline int rdev_reset_tid_config(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *peer,
+ u8 tids)
+{
+ int ret;
+
+ trace_rdev_reset_tid_config(&rdev->wiphy, dev, peer, tids);
+ ret = rdev->ops->reset_tid_config(&rdev->wiphy, dev, peer, tids);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 1a8218f1bbe0..d476d4da0d09 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1569,6 +1569,8 @@ static u32 map_regdom_flags(u32 rd_flags)
channel_flags |= IEEE80211_CHAN_NO_80MHZ;
if (rd_flags & NL80211_RRF_NO_160MHZ)
channel_flags |= IEEE80211_CHAN_NO_160MHZ;
+ if (rd_flags & NL80211_RRF_NO_HE)
+ channel_flags |= IEEE80211_CHAN_NO_HE;
return channel_flags;
}
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 328402ab64a3..4000382aef48 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -556,9 +556,8 @@ cfg80211_find_sched_scan_req(struct cfg80211_registered_device *rdev, u64 reqid)
{
struct cfg80211_sched_scan_request *pos;
- WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
-
- list_for_each_entry_rcu(pos, &rdev->sched_scan_req_list, list) {
+ list_for_each_entry_rcu(pos, &rdev->sched_scan_req_list, list,
+ lockdep_rtnl_is_held()) {
if (pos->reqid == reqid)
return pos;
}
@@ -1434,8 +1433,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
}
rcu_assign_pointer(tmp.pub.ies, ies);
- signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
- wiphy->max_adj_channel_rssi_comp;
+ signal_valid = data->chan == channel;
res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid, ts);
if (!res)
return NULL;
@@ -1852,8 +1850,7 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS);
ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
- signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
- wiphy->max_adj_channel_rssi_comp;
+ signal_valid = data->chan == channel;
res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid,
jiffies);
if (!res)
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index d32a2ec4d96a..ac3e60aa1fc8 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -1111,9 +1111,16 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
* Delete all the keys ... pairwise keys can't really
* exist any more anyway, but default keys might.
*/
- if (rdev->ops->del_key)
- for (i = 0; i < 6; i++)
+ if (rdev->ops->del_key) {
+ int max_key_idx = 5;
+
+ if (wiphy_ext_feature_isset(
+ wdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION))
+ max_key_idx = 7;
+ for (i = 0; i <= max_key_idx; i++)
rdev_del_key(rdev, dev, i, false, NULL);
+ }
rdev_set_qos_map(rdev, dev, NULL);
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 3ef1679b0e66..839df54cee21 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -510,6 +510,23 @@ TRACE_EVENT(rdev_set_default_mgmt_key,
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index)
);
+TRACE_EVENT(rdev_set_default_beacon_key,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 key_index),
+ TP_ARGS(wiphy, netdev, key_index),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __field(u8, key_index)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ __entry->key_index = key_index;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", key index: %u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->key_index)
+);
+
TRACE_EVENT(rdev_start_ap,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_ap_settings *settings),
@@ -3463,6 +3480,43 @@ TRACE_EVENT(rdev_probe_mesh_link,
WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(dest))
);
+TRACE_EVENT(rdev_set_tid_config,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_tid_config *tid_conf),
+ TP_ARGS(wiphy, netdev, tid_conf),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(peer)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(peer, tid_conf->peer);
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT,
+ WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
+);
+
+TRACE_EVENT(rdev_reset_tid_config,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ const u8 *peer, u8 tids),
+ TP_ARGS(wiphy, netdev, peer, tids),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(peer)
+ __field(u8, tids)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(peer, peer);
+ __entry->tids = tids;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT ", tids: 0x%x",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->tids)
+);
#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 8481e9ac33da..6590efbbcbb9 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -231,7 +231,12 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
struct key_params *params, int key_idx,
bool pairwise, const u8 *mac_addr)
{
- if (key_idx < 0 || key_idx > 5)
+ int max_key_idx = 5;
+
+ if (wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION))
+ max_key_idx = 7;
+ if (key_idx < 0 || key_idx > max_key_idx)
return -EINVAL;
if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
@@ -934,7 +939,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
return -EOPNOTSUPP;
/* if it's part of a bridge, reject changing type to station/ibss */
- if ((dev->priv_flags & IFF_BRIDGE_PORT) &&
+ if (netif_is_bridge_port(dev) &&
(ntype == NL80211_IFTYPE_ADHOC ||
ntype == NL80211_IFTYPE_STATION ||
ntype == NL80211_IFTYPE_P2P_CLIENT))
diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c
index c82999941d3f..d48ad6d29197 100644
--- a/net/x25/x25_forward.c
+++ b/net/x25/x25_forward.c
@@ -131,13 +131,11 @@ out:
void x25_clear_forward_by_lci(unsigned int lci)
{
- struct x25_forward *fwd;
- struct list_head *entry, *tmp;
+ struct x25_forward *fwd, *tmp;
write_lock_bh(&x25_forward_list_lock);
- list_for_each_safe(entry, tmp, &x25_forward_list) {
- fwd = list_entry(entry, struct x25_forward, node);
+ list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
if (fwd->lci == lci) {
list_del(&fwd->node);
kfree(fwd);
@@ -149,13 +147,11 @@ void x25_clear_forward_by_lci(unsigned int lci)
void x25_clear_forward_by_dev(struct net_device *dev)
{
- struct x25_forward *fwd;
- struct list_head *entry, *tmp;
+ struct x25_forward *fwd, *tmp;
write_lock_bh(&x25_forward_list_lock);
- list_for_each_safe(entry, tmp, &x25_forward_list) {
- fwd = list_entry(entry, struct x25_forward, node);
+ list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
list_del(&fwd->node);
kfree(fwd);
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 89a01ac4e079..b50bb5c76da5 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -19,13 +19,13 @@ struct xdp_ring {
/* Used for the RX and TX queues for packets */
struct xdp_rxtx_ring {
struct xdp_ring ptrs;
- struct xdp_desc desc[0] ____cacheline_aligned_in_smp;
+ struct xdp_desc desc[] ____cacheline_aligned_in_smp;
};
/* Used for the fill and completion queues for buffers */
struct xdp_umem_ring {
struct xdp_ring ptrs;
- u64 desc[0] ____cacheline_aligned_in_smp;
+ u64 desc[] ____cacheline_aligned_in_smp;
};
struct xsk_queue {
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
index f15d6a564b0e..037ea156d2f9 100644
--- a/net/xfrm/espintcp.c
+++ b/net/xfrm/espintcp.c
@@ -100,7 +100,7 @@ static int espintcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
flags |= nonblock ? MSG_DONTWAIT : 0;
- skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, NULL, &off, &err);
+ skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, &off, &err);
if (!skb)
return err;
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index e2db468cf50e..6cc7f7f1dd68 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -46,6 +46,25 @@ static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
pskb_pull(skb, skb->mac_len + x->props.header_len);
}
+static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
+ unsigned int hsize)
+{
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ int phlen = 0;
+
+ if (xo->flags & XFRM_GSO_SEGMENT)
+ skb->transport_header = skb->network_header + hsize;
+
+ skb_reset_mac_len(skb);
+ if (x->sel.family != AF_INET6) {
+ phlen = IPV4_BEET_PHMAXLEN;
+ if (x->outer_mode.family == AF_INET6)
+ phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr);
+ }
+
+ pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen));
+}
+
/* Adjust pointers into the packet when IPsec is done at layer2 */
static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
{
@@ -66,9 +85,16 @@ static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
return __xfrm_transport_prep(x, skb,
sizeof(struct ipv6hdr));
break;
+ case XFRM_MODE_BEET:
+ if (x->outer_mode.family == AF_INET)
+ return __xfrm_mode_beet_prep(x, skb,
+ sizeof(struct iphdr));
+ if (x->outer_mode.family == AF_INET6)
+ return __xfrm_mode_beet_prep(x, skb,
+ sizeof(struct ipv6hdr));
+ break;
case XFRM_MODE_ROUTEOPTIMIZATION:
case XFRM_MODE_IN_TRIGGER:
- case XFRM_MODE_BEET:
break;
}
}
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index fafc7aba705f..2fd3d990d992 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -535,8 +535,8 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
{
struct sk_buff *segs, *nskb;
- BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
- BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET);
+ BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
+ BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_GSO_CB_OFFSET);
segs = skb_gso_segment(skb, 0);
kfree_skb(skb);
if (IS_ERR(segs))
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8a4af86a285e..297b2fdb3c29 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2615,7 +2615,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
xdst->xfrm_genid = xfrm[i]->genid;
dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
- dst1->flags |= DST_HOST;
dst1->lastuse = now;
dst1->input = dst_discard;
@@ -2901,7 +2900,7 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
dst_copy_metrics(dst1, dst);
dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
- dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
+ dst1->flags |= DST_XFRM_QUEUE;
dst1->lastuse = jiffies;
dst1->input = dst_discard;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 170d6e7f31d3..8be2d926acc2 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -612,7 +612,7 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
{
struct xfrm_state *x;
- x = kmem_cache_alloc(xfrm_state_cache, GFP_ATOMIC | __GFP_ZERO);
+ x = kmem_cache_zalloc(xfrm_state_cache, GFP_ATOMIC);
if (x) {
write_pnet(&x->xs_net, net);
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 79b0fee6943b..424f6fe7ce38 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -64,11 +64,11 @@ fds_example-objs := fds_example.o
sockex1-objs := sockex1_user.o
sockex2-objs := sockex2_user.o
sockex3-objs := bpf_load.o sockex3_user.o
-tracex1-objs := bpf_load.o tracex1_user.o
+tracex1-objs := bpf_load.o tracex1_user.o $(TRACE_HELPERS)
tracex2-objs := bpf_load.o tracex2_user.o
tracex3-objs := bpf_load.o tracex3_user.o
tracex4-objs := bpf_load.o tracex4_user.o
-tracex5-objs := bpf_load.o tracex5_user.o
+tracex5-objs := bpf_load.o tracex5_user.o $(TRACE_HELPERS)
tracex6-objs := bpf_load.o tracex6_user.o
tracex7-objs := bpf_load.o tracex7_user.o
test_probe_write_user-objs := bpf_load.o test_probe_write_user_user.o
@@ -88,8 +88,8 @@ xdp2-objs := xdp1_user.o
xdp_router_ipv4-objs := xdp_router_ipv4_user.o
test_current_task_under_cgroup-objs := bpf_load.o $(CGROUP_HELPERS) \
test_current_task_under_cgroup_user.o
-trace_event-objs := bpf_load.o trace_event_user.o $(TRACE_HELPERS)
-sampleip-objs := bpf_load.o sampleip_user.o $(TRACE_HELPERS)
+trace_event-objs := trace_event_user.o $(TRACE_HELPERS)
+sampleip-objs := sampleip_user.o $(TRACE_HELPERS)
tc_l2_redirect-objs := bpf_load.o tc_l2_redirect_user.o
lwt_len_hist-objs := bpf_load.o lwt_len_hist_user.o
xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 4574b1939e49..c5ad528f046e 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -665,23 +665,3 @@ int load_bpf_file_fixup_map(const char *path, fixup_map_cb fixup_map)
{
return do_load_bpf_file(path, fixup_map);
}
-
-void read_trace_pipe(void)
-{
- int trace_fd;
-
- trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0);
- if (trace_fd < 0)
- return;
-
- while (1) {
- static char buf[4096];
- ssize_t sz;
-
- sz = read(trace_fd, buf, sizeof(buf) - 1);
- if (sz > 0) {
- buf[sz] = 0;
- puts(buf);
- }
- }
-}
diff --git a/samples/bpf/bpf_load.h b/samples/bpf/bpf_load.h
index 814894a12974..4fcd258c616f 100644
--- a/samples/bpf/bpf_load.h
+++ b/samples/bpf/bpf_load.h
@@ -53,6 +53,5 @@ extern int map_data_count;
int load_bpf_file(char *path);
int load_bpf_file_fixup_map(const char *path, fixup_map_cb fixup_map);
-void read_trace_pipe(void);
int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
#endif
diff --git a/samples/bpf/sampleip_user.c b/samples/bpf/sampleip_user.c
index b0f115f938bc..4372d2da2f9e 100644
--- a/samples/bpf/sampleip_user.c
+++ b/samples/bpf/sampleip_user.c
@@ -10,21 +10,23 @@
#include <errno.h>
#include <signal.h>
#include <string.h>
-#include <assert.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/bpf.h>
-#include <sys/ioctl.h>
+#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include "bpf_load.h"
#include "perf-sys.h"
#include "trace_helpers.h"
+#define __must_check
+#include <linux/err.h>
+
#define DEFAULT_FREQ 99
#define DEFAULT_SECS 5
#define MAX_IPS 8192
#define PAGE_OFFSET 0xffff880000000000
+static int map_fd;
static int nr_cpus;
static void usage(void)
@@ -34,9 +36,10 @@ static void usage(void)
printf(" duration # sampling duration (seconds), default 5\n");
}
-static int sampling_start(int *pmu_fd, int freq)
+static int sampling_start(int freq, struct bpf_program *prog,
+ struct bpf_link *links[])
{
- int i;
+ int i, pmu_fd;
struct perf_event_attr pe_sample_attr = {
.type = PERF_TYPE_SOFTWARE,
@@ -47,26 +50,30 @@ static int sampling_start(int *pmu_fd, int freq)
};
for (i = 0; i < nr_cpus; i++) {
- pmu_fd[i] = sys_perf_event_open(&pe_sample_attr, -1 /* pid */, i,
+ pmu_fd = sys_perf_event_open(&pe_sample_attr, -1 /* pid */, i,
-1 /* group_fd */, 0 /* flags */);
- if (pmu_fd[i] < 0) {
+ if (pmu_fd < 0) {
fprintf(stderr, "ERROR: Initializing perf sampling\n");
return 1;
}
- assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF,
- prog_fd[0]) == 0);
- assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0) == 0);
+ links[i] = bpf_program__attach_perf_event(prog, pmu_fd);
+ if (IS_ERR(links[i])) {
+ fprintf(stderr, "ERROR: Attach perf event\n");
+ links[i] = NULL;
+ close(pmu_fd);
+ return 1;
+ }
}
return 0;
}
-static void sampling_end(int *pmu_fd)
+static void sampling_end(struct bpf_link *links[])
{
int i;
for (i = 0; i < nr_cpus; i++)
- close(pmu_fd[i]);
+ bpf_link__destroy(links[i]);
}
struct ipcount {
@@ -128,14 +135,17 @@ static void print_ip_map(int fd)
static void int_exit(int sig)
{
printf("\n");
- print_ip_map(map_fd[0]);
+ print_ip_map(map_fd);
exit(0);
}
int main(int argc, char **argv)
{
+ int opt, freq = DEFAULT_FREQ, secs = DEFAULT_SECS, error = 1;
+ struct bpf_object *obj = NULL;
+ struct bpf_program *prog;
+ struct bpf_link **links;
char filename[256];
- int *pmu_fd, opt, freq = DEFAULT_FREQ, secs = DEFAULT_SECS;
/* process arguments */
while ((opt = getopt(argc, argv, "F:h")) != -1) {
@@ -163,38 +173,58 @@ int main(int argc, char **argv)
}
/* create perf FDs for each CPU */
- nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
- pmu_fd = malloc(nr_cpus * sizeof(int));
- if (pmu_fd == NULL) {
- fprintf(stderr, "ERROR: malloc of pmu_fd\n");
- return 1;
+ nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ links = calloc(nr_cpus, sizeof(struct bpf_link *));
+ if (!links) {
+ fprintf(stderr, "ERROR: malloc of links\n");
+ goto cleanup;
}
- /* load BPF program */
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
- if (load_bpf_file(filename)) {
- fprintf(stderr, "ERROR: loading BPF program (errno %d):\n",
- errno);
- if (strcmp(bpf_log_buf, "") == 0)
- fprintf(stderr, "Try: ulimit -l unlimited\n");
- else
- fprintf(stderr, "%s", bpf_log_buf);
- return 1;
+ obj = bpf_object__open_file(filename, NULL);
+ if (IS_ERR(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ obj = NULL;
+ goto cleanup;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, "do_sample");
+ if (!prog) {
+ fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
+ goto cleanup;
}
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd = bpf_object__find_map_fd_by_name(obj, "ip_map");
+ if (map_fd < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
signal(SIGINT, int_exit);
signal(SIGTERM, int_exit);
/* do sampling */
printf("Sampling at %d Hertz for %d seconds. Ctrl-C also ends.\n",
freq, secs);
- if (sampling_start(pmu_fd, freq) != 0)
- return 1;
+ if (sampling_start(freq, prog, links) != 0)
+ goto cleanup;
+
sleep(secs);
- sampling_end(pmu_fd);
- free(pmu_fd);
+ error = 0;
+cleanup:
+ sampling_end(links);
/* output sample counts */
- print_ip_map(map_fd[0]);
+ if (!error)
+ print_ip_map(map_fd);
- return 0;
+ free(links);
+ bpf_object__close(obj);
+ return error;
}
diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c
index 356171bc392b..b6cd358d0418 100644
--- a/samples/bpf/trace_event_user.c
+++ b/samples/bpf/trace_event_user.c
@@ -6,22 +6,25 @@
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
-#include <fcntl.h>
-#include <poll.h>
-#include <sys/ioctl.h>
#include <linux/perf_event.h>
#include <linux/bpf.h>
#include <signal.h>
-#include <assert.h>
#include <errno.h>
#include <sys/resource.h>
+#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include "bpf_load.h"
#include "perf-sys.h"
#include "trace_helpers.h"
+#define __must_check
+#include <linux/err.h>
+
#define SAMPLE_FREQ 50
+static int pid;
+/* counts, stackmap */
+static int map_fd[2];
+struct bpf_program *prog;
static bool sys_read_seen, sys_write_seen;
static void print_ksym(__u64 addr)
@@ -91,10 +94,10 @@ static void print_stack(struct key_t *key, __u64 count)
}
}
-static void int_exit(int sig)
+static void err_exit(int err)
{
- kill(0, SIGKILL);
- exit(0);
+ kill(pid, SIGKILL);
+ exit(err);
}
static void print_stacks(void)
@@ -102,7 +105,7 @@ static void print_stacks(void)
struct key_t key = {}, next_key;
__u64 value;
__u32 stackid = 0, next_id;
- int fd = map_fd[0], stack_map = map_fd[1];
+ int error = 1, fd = map_fd[0], stack_map = map_fd[1];
sys_read_seen = sys_write_seen = false;
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
@@ -114,7 +117,7 @@ static void print_stacks(void)
printf("\n");
if (!sys_read_seen || !sys_write_seen) {
printf("BUG kernel stack doesn't contain sys_read() and sys_write()\n");
- int_exit(0);
+ err_exit(error);
}
/* clear stack map */
@@ -136,43 +139,52 @@ static inline int generate_load(void)
static void test_perf_event_all_cpu(struct perf_event_attr *attr)
{
- int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
- int *pmu_fd = malloc(nr_cpus * sizeof(int));
- int i, error = 0;
+ int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ struct bpf_link **links = calloc(nr_cpus, sizeof(struct bpf_link *));
+ int i, pmu_fd, error = 1;
+
+ if (!links) {
+ printf("malloc of links failed\n");
+ goto err;
+ }
/* system wide perf event, no need to inherit */
attr->inherit = 0;
/* open perf_event on all cpus */
for (i = 0; i < nr_cpus; i++) {
- pmu_fd[i] = sys_perf_event_open(attr, -1, i, -1, 0);
- if (pmu_fd[i] < 0) {
+ pmu_fd = sys_perf_event_open(attr, -1, i, -1, 0);
+ if (pmu_fd < 0) {
printf("sys_perf_event_open failed\n");
- error = 1;
goto all_cpu_err;
}
- assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
- assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE) == 0);
+ links[i] = bpf_program__attach_perf_event(prog, pmu_fd);
+ if (IS_ERR(links[i])) {
+ printf("bpf_program__attach_perf_event failed\n");
+ links[i] = NULL;
+ close(pmu_fd);
+ goto all_cpu_err;
+ }
}
- if (generate_load() < 0) {
- error = 1;
+ if (generate_load() < 0)
goto all_cpu_err;
- }
+
print_stacks();
+ error = 0;
all_cpu_err:
- for (i--; i >= 0; i--) {
- ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
- close(pmu_fd[i]);
- }
- free(pmu_fd);
+ for (i--; i >= 0; i--)
+ bpf_link__destroy(links[i]);
+err:
+ free(links);
if (error)
- int_exit(0);
+ err_exit(error);
}
static void test_perf_event_task(struct perf_event_attr *attr)
{
- int pmu_fd, error = 0;
+ struct bpf_link *link = NULL;
+ int pmu_fd, error = 1;
/* per task perf event, enable inherit so the "dd ..." command can be traced properly.
* Enabling inherit will cause bpf_perf_prog_read_time helper failure.
@@ -183,21 +195,25 @@ static void test_perf_event_task(struct perf_event_attr *attr)
pmu_fd = sys_perf_event_open(attr, 0, -1, -1, 0);
if (pmu_fd < 0) {
printf("sys_perf_event_open failed\n");
- int_exit(0);
+ goto err;
}
- assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
- assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE) == 0);
-
- if (generate_load() < 0) {
- error = 1;
+ link = bpf_program__attach_perf_event(prog, pmu_fd);
+ if (IS_ERR(link)) {
+ printf("bpf_program__attach_perf_event failed\n");
+ link = NULL;
+ close(pmu_fd);
goto err;
}
+
+ if (generate_load() < 0)
+ goto err;
+
print_stacks();
+ error = 0;
err:
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
- close(pmu_fd);
+ bpf_link__destroy(link);
if (error)
- int_exit(0);
+ err_exit(error);
}
static void test_bpf_perf_event(void)
@@ -282,29 +298,60 @@ static void test_bpf_perf_event(void)
int main(int argc, char **argv)
{
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+ struct bpf_object *obj = NULL;
char filename[256];
+ int error = 1;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
setrlimit(RLIMIT_MEMLOCK, &r);
- signal(SIGINT, int_exit);
- signal(SIGTERM, int_exit);
+ signal(SIGINT, err_exit);
+ signal(SIGTERM, err_exit);
if (load_kallsyms()) {
printf("failed to process /proc/kallsyms\n");
- return 1;
+ goto cleanup;
+ }
+
+ obj = bpf_object__open_file(filename, NULL);
+ if (IS_ERR(obj)) {
+ printf("opening BPF object file failed\n");
+ obj = NULL;
+ goto cleanup;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
+ if (!prog) {
+ printf("finding a prog in obj file failed\n");
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ printf("loading BPF object file failed\n");
+ goto cleanup;
}
- if (load_bpf_file(filename)) {
- printf("%s", bpf_log_buf);
- return 2;
+ map_fd[0] = bpf_object__find_map_fd_by_name(obj, "counts");
+ map_fd[1] = bpf_object__find_map_fd_by_name(obj, "stackmap");
+ if (map_fd[0] < 0 || map_fd[1] < 0) {
+ printf("finding a counts/stackmap map in obj file failed\n");
+ goto cleanup;
}
- if (fork() == 0) {
+ pid = fork();
+ if (pid == 0) {
read_trace_pipe();
return 0;
+ } else if (pid == -1) {
+ printf("couldn't spawn process\n");
+ goto cleanup;
}
+
test_bpf_perf_event();
- int_exit(0);
- return 0;
+ error = 0;
+
+cleanup:
+ bpf_object__close(obj);
+ err_exit(error);
}
diff --git a/samples/bpf/tracex1_user.c b/samples/bpf/tracex1_user.c
index af8c20608ab5..55fddbd08702 100644
--- a/samples/bpf/tracex1_user.c
+++ b/samples/bpf/tracex1_user.c
@@ -4,6 +4,7 @@
#include <unistd.h>
#include <bpf/bpf.h>
#include "bpf_load.h"
+#include "trace_helpers.h"
int main(int ac, char **argv)
{
diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c
index c4ab91c89494..c2317b39e0d2 100644
--- a/samples/bpf/tracex5_user.c
+++ b/samples/bpf/tracex5_user.c
@@ -8,6 +8,7 @@
#include <bpf/bpf.h>
#include "bpf_load.h"
#include <sys/resource.h>
+#include "trace_helpers.h"
/* install fake seccomp program to enable seccomp code path inside the kernel,
* so that our kprobe attached to seccomp_phase1() can be triggered
diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py
index 90baf7d70911..f43d193aff3a 100755
--- a/scripts/bpf_helpers_doc.py
+++ b/scripts/bpf_helpers_doc.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-only
#
# Copyright (C) 2018-2019 Netronome Systems, Inc.
@@ -400,6 +400,7 @@ class PrinterHelpers(Printer):
'struct bpf_fib_lookup',
'struct bpf_perf_event_data',
'struct bpf_perf_event_value',
+ 'struct bpf_pidns_info',
'struct bpf_sock',
'struct bpf_sock_addr',
'struct bpf_sock_ops',
@@ -435,6 +436,7 @@ class PrinterHelpers(Printer):
'struct bpf_fib_lookup',
'struct bpf_perf_event_data',
'struct bpf_perf_event_value',
+ 'struct bpf_pidns_info',
'struct bpf_sock',
'struct bpf_sock_addr',
'struct bpf_sock_ops',
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index dd484e92752e..d09ab4afbda4 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -63,12 +63,18 @@ vmlinux_link()
local lds="${objtree}/${KBUILD_LDS}"
local output=${1}
local objects
+ local strip_debug
info LD ${output}
# skip output file argument
shift
+ # The kallsyms linking does not need debug symbols included.
+ if [ "$output" != "${output#.tmp_vmlinux.kallsyms}" ] ; then
+ strip_debug=-Wl,--strip-debug
+ fi
+
if [ "${SRCARCH}" != "um" ]; then
objects="--whole-archive \
${KBUILD_VMLINUX_OBJS} \
@@ -79,6 +85,7 @@ vmlinux_link()
${@}"
${LD} ${KBUILD_LDFLAGS} ${LDFLAGS_vmlinux} \
+ ${strip_debug#-Wl,} \
-o ${output} \
-T ${lds} ${objects}
else
@@ -91,6 +98,7 @@ vmlinux_link()
${@}"
${CC} ${CFLAGS_vmlinux} \
+ ${strip_debug} \
-o ${output} \
-Wl,-T,${lds} \
${objects} \
@@ -105,7 +113,6 @@ vmlinux_link()
gen_btf()
{
local pahole_ver
- local bin_arch
if ! [ -x "$(command -v ${PAHOLE})" ]; then
echo >&2 "BTF: ${1}: pahole (${PAHOLE}) is not available"
@@ -118,20 +125,21 @@ gen_btf()
return 1
fi
- info "BTF" ${2}
vmlinux_link ${1}
+
+ info "BTF" ${2}
LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${1}
- # dump .BTF section into raw binary file to link with final vmlinux
- bin_arch=$(LANG=C ${OBJDUMP} -f ${1} | grep architecture | \
- cut -d, -f1 | cut -d' ' -f2)
- bin_format=$(LANG=C ${OBJDUMP} -f ${1} | grep 'file format' | \
- awk '{print $4}')
- ${OBJCOPY} --change-section-address .BTF=0 \
- --set-section-flags .BTF=alloc -O binary \
- --only-section=.BTF ${1} .btf.vmlinux.bin
- ${OBJCOPY} -I binary -O ${bin_format} -B ${bin_arch} \
- --rename-section .data=.BTF .btf.vmlinux.bin ${2}
+ # Create ${2} which contains just .BTF section but no symbols. Add
+ # SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all
+ # deletes all symbols including __start_BTF and __stop_BTF, which will
+ # be redefined in the linker script. Add 2>/dev/null to suppress GNU
+ # objcopy warnings: "empty loadable segment detected at ..."
+ ${OBJCOPY} --only-section=.BTF --set-section-flags .BTF=alloc,readonly \
+ --strip-all ${1} ${2} 2>/dev/null
+ # Change e_type to ET_REL so that it can be used to link final vmlinux.
+ # Unlike GNU ld, lld does not allow an ET_EXEC input.
+ printf '\1' | dd of=${2} conv=notrunc bs=1 seek=16 status=none
}
# Create ${2} .o file with all symbols from the ${1} object file
@@ -166,8 +174,8 @@ kallsyms()
kallsyms_step()
{
kallsymso_prev=${kallsymso}
- kallsymso=.tmp_kallsyms${1}.o
- kallsyms_vmlinux=.tmp_vmlinux${1}
+ kallsyms_vmlinux=.tmp_vmlinux.kallsyms${1}
+ kallsymso=${kallsyms_vmlinux}.o
vmlinux_link ${kallsyms_vmlinux} "${kallsymso_prev}" ${btf_vmlinux_bin_o}
kallsyms ${kallsyms_vmlinux} ${kallsymso}
@@ -190,7 +198,6 @@ cleanup()
{
rm -f .btf.*
rm -f .tmp_System.map
- rm -f .tmp_kallsyms*
rm -f .tmp_vmlinux*
rm -f System.map
rm -f vmlinux
@@ -257,9 +264,8 @@ tr '\0' '\n' < modules.builtin.modinfo | sed -n 's/^[[:alnum:]:_]*\.file=//p' |
btf_vmlinux_bin_o=""
if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then
- if gen_btf .tmp_vmlinux.btf .btf.vmlinux.bin.o ; then
- btf_vmlinux_bin_o=.btf.vmlinux.bin.o
- else
+ btf_vmlinux_bin_o=.btf.vmlinux.bin.o
+ if ! gen_btf .tmp_vmlinux.btf $btf_vmlinux_bin_o ; then
echo >&2 "Failed to generate BTF for vmlinux"
echo >&2 "Try to disable CONFIG_DEBUG_INFO_BTF"
exit 1
diff --git a/security/Kconfig b/security/Kconfig
index 2a1a2d396228..cd3cc7da3a55 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -277,11 +277,11 @@ endchoice
config LSM
string "Ordered list of enabled LSMs"
- default "lockdown,yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor" if DEFAULT_SECURITY_SMACK
- default "lockdown,yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo" if DEFAULT_SECURITY_APPARMOR
- default "lockdown,yama,loadpin,safesetid,integrity,tomoyo" if DEFAULT_SECURITY_TOMOYO
- default "lockdown,yama,loadpin,safesetid,integrity" if DEFAULT_SECURITY_DAC
- default "lockdown,yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
+ default "lockdown,yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor,bpf" if DEFAULT_SECURITY_SMACK
+ default "lockdown,yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo,bpf" if DEFAULT_SECURITY_APPARMOR
+ default "lockdown,yama,loadpin,safesetid,integrity,tomoyo,bpf" if DEFAULT_SECURITY_TOMOYO
+ default "lockdown,yama,loadpin,safesetid,integrity,bpf" if DEFAULT_SECURITY_DAC
+ default "lockdown,yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor,bpf"
help
A comma-separated list of LSMs, in initialization order.
Any LSMs left off this list will be ignored. This can be
diff --git a/security/Makefile b/security/Makefile
index 746438499029..22e73a3482bd 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -12,6 +12,7 @@ subdir-$(CONFIG_SECURITY_YAMA) += yama
subdir-$(CONFIG_SECURITY_LOADPIN) += loadpin
subdir-$(CONFIG_SECURITY_SAFESETID) += safesetid
subdir-$(CONFIG_SECURITY_LOCKDOWN_LSM) += lockdown
+subdir-$(CONFIG_BPF_LSM) += bpf
# always enable default capabilities
obj-y += commoncap.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/
obj-$(CONFIG_SECURITY_SAFESETID) += safesetid/
obj-$(CONFIG_SECURITY_LOCKDOWN_LSM) += lockdown/
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
+obj-$(CONFIG_BPF_LSM) += bpf/
# Object integrity file lists
subdir-$(CONFIG_INTEGRITY) += integrity
diff --git a/security/bpf/Makefile b/security/bpf/Makefile
new file mode 100644
index 000000000000..c7a89a962084
--- /dev/null
+++ b/security/bpf/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (C) 2020 Google LLC.
+
+obj-$(CONFIG_BPF_LSM) := hooks.o
diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c
new file mode 100644
index 000000000000..32d32d485451
--- /dev/null
+++ b/security/bpf/hooks.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2020 Google LLC.
+ */
+#include <linux/lsm_hooks.h>
+#include <linux/bpf_lsm.h>
+
+static struct security_hook_list bpf_lsm_hooks[] __lsm_ro_after_init = {
+ #define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ LSM_HOOK_INIT(NAME, bpf_lsm_##NAME),
+ #include <linux/lsm_hook_defs.h>
+ #undef LSM_HOOK
+};
+
+static int __init bpf_lsm_init(void)
+{
+ security_add_hooks(bpf_lsm_hooks, ARRAY_SIZE(bpf_lsm_hooks), "bpf");
+ pr_info("LSM support for eBPF active\n");
+ return 0;
+}
+
+DEFINE_LSM(bpf) = {
+ .name = "bpf",
+ .init = bpf_lsm_init,
+};
diff --git a/security/security.c b/security/security.c
index 565bc9b67276..7fed24b9d57e 100644
--- a/security/security.c
+++ b/security/security.c
@@ -669,6 +669,25 @@ static void __init lsm_early_task(struct task_struct *task)
}
/*
+ * The default value of the LSM hook is defined in linux/lsm_hook_defs.h and
+ * can be accessed with:
+ *
+ * LSM_RET_DEFAULT(<hook_name>)
+ *
+ * The macros below define static constants for the default value of each
+ * LSM hook.
+ */
+#define LSM_RET_DEFAULT(NAME) (NAME##_default)
+#define DECLARE_LSM_RET_DEFAULT_void(DEFAULT, NAME)
+#define DECLARE_LSM_RET_DEFAULT_int(DEFAULT, NAME) \
+ static const int LSM_RET_DEFAULT(NAME) = (DEFAULT);
+#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ DECLARE_LSM_RET_DEFAULT_##RET(DEFAULT, NAME)
+
+#include <linux/lsm_hook_defs.h>
+#undef LSM_HOOK
+
+/*
* Hook list operation macros.
*
* call_void_hook:
@@ -1338,16 +1357,16 @@ int security_inode_getsecurity(struct inode *inode, const char *name, void **buf
int rc;
if (unlikely(IS_PRIVATE(inode)))
- return -EOPNOTSUPP;
+ return LSM_RET_DEFAULT(inode_getsecurity);
/*
* Only one module will provide an attribute with a given name.
*/
hlist_for_each_entry(hp, &security_hook_heads.inode_getsecurity, list) {
rc = hp->hook.inode_getsecurity(inode, name, buffer, alloc);
- if (rc != -EOPNOTSUPP)
+ if (rc != LSM_RET_DEFAULT(inode_getsecurity))
return rc;
}
- return -EOPNOTSUPP;
+ return LSM_RET_DEFAULT(inode_getsecurity);
}
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
@@ -1356,17 +1375,17 @@ int security_inode_setsecurity(struct inode *inode, const char *name, const void
int rc;
if (unlikely(IS_PRIVATE(inode)))
- return -EOPNOTSUPP;
+ return LSM_RET_DEFAULT(inode_setsecurity);
/*
* Only one module will provide an attribute with a given name.
*/
hlist_for_each_entry(hp, &security_hook_heads.inode_setsecurity, list) {
rc = hp->hook.inode_setsecurity(inode, name, value, size,
flags);
- if (rc != -EOPNOTSUPP)
+ if (rc != LSM_RET_DEFAULT(inode_setsecurity))
return rc;
}
- return -EOPNOTSUPP;
+ return LSM_RET_DEFAULT(inode_setsecurity);
}
int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
@@ -1740,12 +1759,12 @@ int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
int thisrc;
- int rc = -ENOSYS;
+ int rc = LSM_RET_DEFAULT(task_prctl);
struct security_hook_list *hp;
hlist_for_each_entry(hp, &security_hook_heads.task_prctl, list) {
thisrc = hp->hook.task_prctl(option, arg2, arg3, arg4, arg5);
- if (thisrc != -ENOSYS) {
+ if (thisrc != LSM_RET_DEFAULT(task_prctl)) {
rc = thisrc;
if (thisrc != 0)
break;
@@ -1917,7 +1936,7 @@ int security_getprocattr(struct task_struct *p, const char *lsm, char *name,
continue;
return hp->hook.getprocattr(p, name, value);
}
- return -EINVAL;
+ return LSM_RET_DEFAULT(getprocattr);
}
int security_setprocattr(const char *lsm, const char *name, void *value,
@@ -1930,7 +1949,7 @@ int security_setprocattr(const char *lsm, const char *name, void *value,
continue;
return hp->hook.setprocattr(name, value, size);
}
- return -EINVAL;
+ return LSM_RET_DEFAULT(setprocattr);
}
int security_netlink_send(struct sock *sk, struct sk_buff *skb)
@@ -2315,7 +2334,7 @@ int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
const struct flowi *fl)
{
struct security_hook_list *hp;
- int rc = 1;
+ int rc = LSM_RET_DEFAULT(xfrm_state_pol_flow_match);
/*
* Since this function is expected to return 0 or 1, the judgment
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index 8c48864c844a..6567504665b9 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -271,13 +271,8 @@ static void snd_bt87x_free_risc(struct snd_bt87x *chip)
static void snd_bt87x_pci_error(struct snd_bt87x *chip, unsigned int status)
{
- u16 pci_status;
+ int pci_status = pci_status_get_and_clear_errors(chip->pci);
- pci_read_config_word(chip->pci, PCI_STATUS, &pci_status);
- pci_status &= PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT |
- PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT |
- PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY;
- pci_write_config_word(chip->pci, PCI_STATUS, pci_status);
if (pci_status != PCI_STATUS_DETECTED_PARITY)
dev_err(chip->card->dev,
"Aieee - PCI error! status %#08x, PCI status %#04x\n",
diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore
index b13926432b84..8d6e8901ed2b 100644
--- a/tools/bpf/bpftool/.gitignore
+++ b/tools/bpf/bpftool/.gitignore
@@ -1,7 +1,9 @@
*.d
+/_bpftool
/bpftool
bpftool*.8
bpf-helpers.*
FEATURE-DUMP.bpftool
feature
libbpf
+profiler.skel.h
diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
index 4d08f35034a2..b04156cfd7a3 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-feature.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
@@ -19,19 +19,24 @@ SYNOPSIS
FEATURE COMMANDS
================
-| **bpftool** **feature probe** [*COMPONENT*] [**macros** [**prefix** *PREFIX*]]
+| **bpftool** **feature probe** [*COMPONENT*] [**full**] [**macros** [**prefix** *PREFIX*]]
| **bpftool** **feature help**
|
| *COMPONENT* := { **kernel** | **dev** *NAME* }
DESCRIPTION
===========
- **bpftool feature probe** [**kernel**] [**macros** [**prefix** *PREFIX*]]
+ **bpftool feature probe** [**kernel**] [**full**] [**macros** [**prefix** *PREFIX*]]
Probe the running kernel and dump a number of eBPF-related
parameters, such as availability of the **bpf()** system call,
JIT status, eBPF program types availability, eBPF helper
functions availability, and more.
+ By default, bpftool **does not run probes** for
+ **bpf_probe_write_user**\ () and **bpf_trace_printk**\()
+ helpers which print warnings to kernel logs. To enable them
+ and run all probes, the **full** keyword should be used.
+
If the **macros** keyword (but not the **-j** option) is
passed, a subset of the output is dumped as a list of
**#define** macros that are ready to be included in a C
@@ -44,16 +49,12 @@ DESCRIPTION
Keyword **kernel** can be omitted. If no probe target is
specified, probing the kernel is the default behaviour.
- Note that when probed, some eBPF helpers (e.g.
- **bpf_trace_printk**\ () or **bpf_probe_write_user**\ ()) may
- print warnings to kernel logs.
-
- **bpftool feature probe dev** *NAME* [**macros** [**prefix** *PREFIX*]]
+ **bpftool feature probe dev** *NAME* [**full**] [**macros** [**prefix** *PREFIX*]]
Probe network device for supported eBPF features and dump
results to the console.
- The two keywords **macros** and **prefix** have the same
- role as when probing the kernel.
+ The keywords **full**, **macros** and **prefix** have the
+ same role as when probing the kernel.
**bpftool feature help**
Print short help message.
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 64ddf8a4c518..9f19404f470e 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -30,6 +30,7 @@ PROG COMMANDS
| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog tracelog**
| **bpftool** **prog run** *PROG* **data_in** *FILE* [**data_out** *FILE* [**data_size_out** *L*]] [**ctx_in** *FILE* [**ctx_out** *FILE* [**ctx_size_out** *M*]]] [**repeat** *N*]
+| **bpftool** **prog profile** *PROG* [**duration** *DURATION*] *METRICs*
| **bpftool** **prog help**
|
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
@@ -42,11 +43,15 @@ PROG COMMANDS
| **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** |
| **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** |
| **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl** |
-| **cgroup/getsockopt** | **cgroup/setsockopt**
+| **cgroup/getsockopt** | **cgroup/setsockopt** |
+| **struct_ops** | **fentry** | **fexit** | **freplace**
| }
| *ATTACH_TYPE* := {
| **msg_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector**
| }
+| *METRIC* := {
+| **cycles** | **instructions** | **l1d_loads** | **llc_misses**
+| }
DESCRIPTION
@@ -188,6 +193,12 @@ DESCRIPTION
not all of them can take the **ctx_in**/**ctx_out**
arguments. bpftool does not perform checks on program types.
+ **bpftool prog profile** *PROG* [**duration** *DURATION*] *METRICs*
+ Profile *METRICs* for bpf program *PROG* for *DURATION*
+ seconds or until user hits Ctrl-C. *DURATION* is optional.
+ If *DURATION* is not specified, the profiling will run up to
+ UINT_MAX seconds.
+
**bpftool prog help**
Print short help message.
@@ -310,6 +321,15 @@ EXAMPLES
**# rm /sys/fs/bpf/xdp1**
+|
+| **# bpftool prog profile id 337 duration 10 cycles instructions llc_misses**
+
+::
+ 51397 run_cnt
+ 40176203 cycles (83.05%)
+ 42518139 instructions # 1.06 insns per cycle (83.39%)
+ 123 llc_misses # 2.89 LLC misses per million insns (83.15%)
+
SEE ALSO
========
**bpf**\ (2),
diff --git a/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst
new file mode 100644
index 000000000000..f045cc89dd6d
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst
@@ -0,0 +1,116 @@
+==================
+bpftool-struct_ops
+==================
+-------------------------------------------------------------------------------
+tool to register/unregister/introspect BPF struct_ops
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **struct_ops** *COMMAND*
+
+ *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] }
+
+ *COMMANDS* :=
+ { **show** | **list** | **dump** | **register** | **unregister** | **help** }
+
+STRUCT_OPS COMMANDS
+===================
+
+| **bpftool** **struct_ops { show | list }** [*STRUCT_OPS_MAP*]
+| **bpftool** **struct_ops dump** [*STRUCT_OPS_MAP*]
+| **bpftool** **struct_ops register** *OBJ*
+| **bpftool** **struct_ops unregister** *STRUCT_OPS_MAP*
+| **bpftool** **struct_ops help**
+|
+| *STRUCT_OPS_MAP* := { **id** *STRUCT_OPS_MAP_ID* | **name** *STRUCT_OPS_MAP_NAME* }
+| *OBJ* := /a/file/of/bpf_struct_ops.o
+
+
+DESCRIPTION
+===========
+ **bpftool struct_ops { show | list }** [*STRUCT_OPS_MAP*]
+ Show brief information about the struct_ops in the system.
+ If *STRUCT_OPS_MAP* is specified, it shows information only
+ for the given struct_ops. Otherwise, it lists all struct_ops
+ currently existing in the system.
+
+ Output will start with struct_ops map ID, followed by its map
+ name and its struct_ops's kernel type.
+
+ **bpftool struct_ops dump** [*STRUCT_OPS_MAP*]
+ Dump details information about the struct_ops in the system.
+ If *STRUCT_OPS_MAP* is specified, it dumps information only
+ for the given struct_ops. Otherwise, it dumps all struct_ops
+ currently existing in the system.
+
+ **bpftool struct_ops register** *OBJ*
+ Register bpf struct_ops from *OBJ*. All struct_ops under
+ the ELF section ".struct_ops" will be registered to
+ its kernel subsystem.
+
+ **bpftool struct_ops unregister** *STRUCT_OPS_MAP*
+ Unregister the *STRUCT_OPS_MAP* from the kernel subsystem.
+
+ **bpftool struct_ops help**
+ Print short help message.
+
+OPTIONS
+=======
+ -h, --help
+ Print short generic help message (similar to **bpftool help**).
+
+ -V, --version
+ Print version number (similar to **bpftool version**).
+
+ -j, --json
+ Generate JSON output. For commands that cannot produce JSON, this
+ option has no effect.
+
+ -p, --pretty
+ Generate human-readable JSON output. Implies **-j**.
+
+ -d, --debug
+ Print all logs available, even debug-level information. This
+ includes logs from libbpf as well as from the verifier, when
+ attempting to load programs.
+
+EXAMPLES
+========
+**# bpftool struct_ops show**
+
+::
+
+ 100: dctcp tcp_congestion_ops
+ 105: cubic tcp_congestion_ops
+
+**# bpftool struct_ops unregister id 105**
+
+::
+
+ Unregistered tcp_congestion_ops cubic id 105
+
+**# bpftool struct_ops register bpf_cubic.o**
+
+::
+
+ Registered tcp_congestion_ops cubic id 110
+
+
+SEE ALSO
+========
+ **bpf**\ (2),
+ **bpf-helpers**\ (7),
+ **bpftool**\ (8),
+ **bpftool-prog**\ (8),
+ **bpftool-map**\ (8),
+ **bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
+ **bpftool-net**\ (8),
+ **bpftool-perf**\ (8),
+ **bpftool-btf**\ (8)
+ **bpftool-gen**\ (8)
+
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index c4e810335810..f584d1fdfc64 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -59,10 +59,12 @@ LIBS = $(LIBBPF) -lelf -lz
INSTALL ?= install
RM ?= rm -f
+CLANG ?= clang
FEATURE_USER = .bpftool
-FEATURE_TESTS = libbfd disassembler-four-args reallocarray zlib
-FEATURE_DISPLAY = libbfd disassembler-four-args zlib
+FEATURE_TESTS = libbfd disassembler-four-args reallocarray zlib \
+ clang-bpf-global-var
+FEATURE_DISPLAY = libbfd disassembler-four-args zlib clang-bpf-global-var
check_feat := 1
NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall
@@ -110,14 +112,39 @@ SRCS += $(BFD_SRCS)
endif
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
+_OBJS = $(filter-out $(OUTPUT)prog.o,$(OBJS)) $(OUTPUT)_prog.o
+
+ifeq ($(feature-clang-bpf-global-var),1)
+ __OBJS = $(OBJS)
+else
+ __OBJS = $(_OBJS)
+endif
+
+$(OUTPUT)_prog.o: prog.c
+ $(QUIET_CC)$(COMPILE.c) -MMD -DBPFTOOL_WITHOUT_SKELETONS -o $@ $<
+
+$(OUTPUT)_bpftool: $(_OBJS) $(LIBBPF)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(_OBJS) $(LIBS)
+
+skeleton/profiler.bpf.o: skeleton/profiler.bpf.c $(LIBBPF)
+ $(QUIET_CLANG)$(CLANG) \
+ -I$(srctree)/tools/include/uapi/ \
+ -I$(LIBBPF_PATH) -I$(srctree)/tools/lib \
+ -g -O2 -target bpf -c $< -o $@
+
+profiler.skel.h: $(OUTPUT)_bpftool skeleton/profiler.bpf.o
+ $(QUIET_GEN)$(OUTPUT)./_bpftool gen skeleton skeleton/profiler.bpf.o > $@
+
+$(OUTPUT)prog.o: prog.c profiler.skel.h
+ $(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
$(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
$(OUTPUT)feature.o: | zdep
-$(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
- $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJS) $(LIBS)
+$(OUTPUT)bpftool: $(__OBJS) $(LIBBPF)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(__OBJS) $(LIBS)
$(OUTPUT)%.o: %.c
$(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
@@ -125,6 +152,7 @@ $(OUTPUT)%.o: %.c
clean: $(LIBBPF)-clean
$(call QUIET_CLEAN, bpftool)
$(Q)$(RM) -- $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
+ $(Q)$(RM) -- $(OUTPUT)_bpftool profiler.skel.h skeleton/profiler.bpf.o
$(Q)$(RM) -r -- $(OUTPUT)libbpf/
$(call QUIET_CLEAN, core-gen)
$(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpftool
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 754d8395e451..45ee99b159e2 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -337,6 +337,7 @@ _bpftool()
local PROG_TYPE='id pinned tag name'
local MAP_TYPE='id pinned name'
+ local METRIC_TYPE='cycles instructions l1d_loads llc_misses'
case $command in
show|list)
[[ $prev != "$command" ]] && return 0
@@ -388,7 +389,7 @@ _bpftool()
_bpftool_get_prog_ids
;;
name)
- _bpftool_get_map_names
+ _bpftool_get_prog_names
;;
pinned)
_filedir
@@ -469,7 +470,8 @@ _bpftool()
cgroup/recvmsg4 cgroup/recvmsg6 \
cgroup/post_bind4 cgroup/post_bind6 \
cgroup/sysctl cgroup/getsockopt \
- cgroup/setsockopt" -- \
+ cgroup/setsockopt struct_ops \
+ fentry fexit freplace" -- \
"$cur" ) )
return 0
;;
@@ -497,9 +499,51 @@ _bpftool()
tracelog)
return 0
;;
+ profile)
+ case $cword in
+ 3)
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ 4)
+ case $prev in
+ id)
+ _bpftool_get_prog_ids
+ ;;
+ name)
+ _bpftool_get_prog_names
+ ;;
+ pinned)
+ _filedir
+ ;;
+ esac
+ return 0
+ ;;
+ 5)
+ COMPREPLY=( $( compgen -W "$METRIC_TYPE duration" -- "$cur" ) )
+ return 0
+ ;;
+ 6)
+ case $prev in
+ duration)
+ return 0
+ ;;
+ *)
+ COMPREPLY=( $( compgen -W "$METRIC_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ esac
+ return 0
+ ;;
+ *)
+ COMPREPLY=( $( compgen -W "$METRIC_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ esac
+ ;;
run)
- if [[ ${#words[@]} -lt 5 ]]; then
- _filedir
+ if [[ ${#words[@]} -eq 4 ]]; then
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
return 0
fi
case $prev in
@@ -507,6 +551,10 @@ _bpftool()
_bpftool_get_prog_ids
return 0
;;
+ name)
+ _bpftool_get_prog_names
+ return 0
+ ;;
data_in|data_out|ctx_in|ctx_out)
_filedir
return 0
@@ -524,7 +572,35 @@ _bpftool()
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'dump help pin attach detach \
- load loadall show list tracelog run' -- "$cur" ) )
+ load loadall show list tracelog run profile' -- "$cur" ) )
+ ;;
+ esac
+ ;;
+ struct_ops)
+ local STRUCT_OPS_TYPE='id name'
+ case $command in
+ show|list|dump|unregister)
+ case $prev in
+ $command)
+ COMPREPLY=( $( compgen -W "$STRUCT_OPS_TYPE" -- "$cur" ) )
+ ;;
+ id)
+ _bpftool_get_map_ids_for_type struct_ops
+ ;;
+ name)
+ _bpftool_get_map_names_for_type struct_ops
+ ;;
+ esac
+ return 0
+ ;;
+ register)
+ _filedir
+ return 0
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'register unregister show list dump help' \
+ -- "$cur" ) )
;;
esac
;;
@@ -712,11 +788,17 @@ _bpftool()
esac
;;
pin)
- if [[ $prev == "$command" ]]; then
- COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
- else
- _filedir
- fi
+ case $prev in
+ $command)
+ COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
+ ;;
+ id)
+ _bpftool_get_map_ids
+ ;;
+ name)
+ _bpftool_get_map_names
+ ;;
+ esac
return 0
;;
event_pipe)
@@ -843,7 +925,7 @@ _bpftool()
case $command in
skeleton)
_filedir
- ;;
+ ;;
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'skeleton help' -- "$cur" ) )
@@ -943,6 +1025,9 @@ _bpftool()
id)
_bpftool_get_prog_ids
;;
+ name)
+ _bpftool_get_prog_names
+ ;;
pinned)
_filedir
;;
@@ -983,11 +1068,12 @@ _bpftool()
probe)
[[ $prev == "prefix" ]] && return 0
if _bpftool_search_list 'macros'; then
- COMPREPLY+=( $( compgen -W 'prefix' -- "$cur" ) )
+ _bpftool_once_attr 'prefix'
else
COMPREPLY+=( $( compgen -W 'macros' -- "$cur" ) )
fi
_bpftool_one_of_list 'kernel dev'
+ _bpftool_once_attr 'full'
return 0
;;
*)
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
index b3745ed711ba..bcaf55b59498 100644
--- a/tools/bpf/bpftool/btf.c
+++ b/tools/bpf/bpftool/btf.c
@@ -389,6 +389,9 @@ static int dump_btf_c(const struct btf *btf,
if (IS_ERR(d))
return PTR_ERR(d);
+ printf("#ifndef __VMLINUX_H__\n");
+ printf("#define __VMLINUX_H__\n");
+ printf("\n");
printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n");
printf("#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record)\n");
printf("#endif\n\n");
@@ -412,6 +415,8 @@ static int dump_btf_c(const struct btf *btf,
printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n");
printf("#pragma clang attribute pop\n");
printf("#endif\n");
+ printf("\n");
+ printf("#endif /* __VMLINUX_H__ */\n");
done:
btf_dump__free(d);
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 01cc52b834fa..497807bec675 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -4,11 +4,13 @@
#include <ctype.h>
#include <stdio.h> /* for (FILE *) used by json_writer */
#include <string.h>
+#include <unistd.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
#include <linux/btf.h>
#include <linux/err.h>
#include <bpf/btf.h>
+#include <bpf/bpf.h>
#include "json_writer.h"
#include "main.h"
@@ -22,13 +24,102 @@
static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
__u8 bit_offset, const void *data);
-static void btf_dumper_ptr(const void *data, json_writer_t *jw,
- bool is_plain_text)
+static int btf_dump_func(const struct btf *btf, char *func_sig,
+ const struct btf_type *func_proto,
+ const struct btf_type *func, int pos, int size);
+
+static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
+ const struct btf_type *func_proto,
+ __u32 prog_id)
{
- if (is_plain_text)
- jsonw_printf(jw, "%p", *(void **)data);
+ struct bpf_prog_info_linear *prog_info = NULL;
+ const struct btf_type *func_type;
+ const char *prog_name = NULL;
+ struct bpf_func_info *finfo;
+ struct btf *prog_btf = NULL;
+ struct bpf_prog_info *info;
+ int prog_fd, func_sig_len;
+ char prog_str[1024];
+
+ /* Get the ptr's func_proto */
+ func_sig_len = btf_dump_func(d->btf, prog_str, func_proto, NULL, 0,
+ sizeof(prog_str));
+ if (func_sig_len == -1)
+ return -1;
+
+ if (!prog_id)
+ goto print;
+
+ /* Get the bpf_prog's name. Obtain from func_info. */
+ prog_fd = bpf_prog_get_fd_by_id(prog_id);
+ if (prog_fd == -1)
+ goto print;
+
+ prog_info = bpf_program__get_prog_info_linear(prog_fd,
+ 1UL << BPF_PROG_INFO_FUNC_INFO);
+ close(prog_fd);
+ if (IS_ERR(prog_info)) {
+ prog_info = NULL;
+ goto print;
+ }
+ info = &prog_info->info;
+
+ if (!info->btf_id || !info->nr_func_info ||
+ btf__get_from_id(info->btf_id, &prog_btf))
+ goto print;
+ finfo = (struct bpf_func_info *)info->func_info;
+ func_type = btf__type_by_id(prog_btf, finfo->type_id);
+ if (!func_type || !btf_is_func(func_type))
+ goto print;
+
+ prog_name = btf__name_by_offset(prog_btf, func_type->name_off);
+
+print:
+ if (!prog_id)
+ snprintf(&prog_str[func_sig_len],
+ sizeof(prog_str) - func_sig_len, " 0");
+ else if (prog_name)
+ snprintf(&prog_str[func_sig_len],
+ sizeof(prog_str) - func_sig_len,
+ " %s/prog_id:%u", prog_name, prog_id);
else
- jsonw_printf(jw, "%lu", *(unsigned long *)data);
+ snprintf(&prog_str[func_sig_len],
+ sizeof(prog_str) - func_sig_len,
+ " <unknown_prog_name>/prog_id:%u", prog_id);
+
+ prog_str[sizeof(prog_str) - 1] = '\0';
+ jsonw_string(d->jw, prog_str);
+ btf__free(prog_btf);
+ free(prog_info);
+ return 0;
+}
+
+static void btf_dumper_ptr(const struct btf_dumper *d,
+ const struct btf_type *t,
+ const void *data)
+{
+ unsigned long value = *(unsigned long *)data;
+ const struct btf_type *ptr_type;
+ __s32 ptr_type_id;
+
+ if (!d->prog_id_as_func_ptr || value > UINT32_MAX)
+ goto print_ptr_value;
+
+ ptr_type_id = btf__resolve_type(d->btf, t->type);
+ if (ptr_type_id < 0)
+ goto print_ptr_value;
+ ptr_type = btf__type_by_id(d->btf, ptr_type_id);
+ if (!ptr_type || !btf_is_func_proto(ptr_type))
+ goto print_ptr_value;
+
+ if (!dump_prog_id_as_func_ptr(d, ptr_type, value))
+ return;
+
+print_ptr_value:
+ if (d->is_plain_text)
+ jsonw_printf(d->jw, "%p", (void *)value);
+ else
+ jsonw_printf(d->jw, "%lu", value);
}
static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
@@ -43,9 +134,78 @@ static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
return btf_dumper_do_type(d, actual_type_id, bit_offset, data);
}
-static void btf_dumper_enum(const void *data, json_writer_t *jw)
+static int btf_dumper_enum(const struct btf_dumper *d,
+ const struct btf_type *t,
+ const void *data)
+{
+ const struct btf_enum *enums = btf_enum(t);
+ __s64 value;
+ __u16 i;
+
+ switch (t->size) {
+ case 8:
+ value = *(__s64 *)data;
+ break;
+ case 4:
+ value = *(__s32 *)data;
+ break;
+ case 2:
+ value = *(__s16 *)data;
+ break;
+ case 1:
+ value = *(__s8 *)data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < btf_vlen(t); i++) {
+ if (value == enums[i].val) {
+ jsonw_string(d->jw,
+ btf__name_by_offset(d->btf,
+ enums[i].name_off));
+ return 0;
+ }
+ }
+
+ jsonw_int(d->jw, value);
+ return 0;
+}
+
+static bool is_str_array(const struct btf *btf, const struct btf_array *arr,
+ const char *s)
{
- jsonw_printf(jw, "%d", *(int *)data);
+ const struct btf_type *elem_type;
+ const char *end_s;
+
+ if (!arr->nelems)
+ return false;
+
+ elem_type = btf__type_by_id(btf, arr->type);
+ /* Not skipping typedef. typedef to char does not count as
+ * a string now.
+ */
+ while (elem_type && btf_is_mod(elem_type))
+ elem_type = btf__type_by_id(btf, elem_type->type);
+
+ if (!elem_type || !btf_is_int(elem_type) || elem_type->size != 1)
+ return false;
+
+ if (btf_int_encoding(elem_type) != BTF_INT_CHAR &&
+ strcmp("char", btf__name_by_offset(btf, elem_type->name_off)))
+ return false;
+
+ end_s = s + arr->nelems;
+ while (s < end_s) {
+ if (!*s)
+ return true;
+ if (*s <= 0x1f || *s >= 0x7f)
+ return false;
+ s++;
+ }
+
+ /* '\0' is not found */
+ return false;
}
static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
@@ -57,6 +217,11 @@ static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
int ret = 0;
__u32 i;
+ if (is_str_array(d->btf, arr, data)) {
+ jsonw_string(d->jw, data);
+ return 0;
+ }
+
elem_size = btf__resolve_size(d->btf, arr->type);
if (elem_size < 0)
return elem_size;
@@ -366,10 +531,9 @@ static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
case BTF_KIND_ARRAY:
return btf_dumper_array(d, type_id, data);
case BTF_KIND_ENUM:
- btf_dumper_enum(data, d->jw);
- return 0;
+ return btf_dumper_enum(d, t, data);
case BTF_KIND_PTR:
- btf_dumper_ptr(data, d->jw, d->is_plain_text);
+ btf_dumper_ptr(d, t, data);
return 0;
case BTF_KIND_UNKN:
jsonw_printf(d->jw, "(unknown)");
@@ -414,10 +578,6 @@ int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
return -1; \
} while (0)
-static int btf_dump_func(const struct btf *btf, char *func_sig,
- const struct btf_type *func_proto,
- const struct btf_type *func, int pos, int size);
-
static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
char *func_sig, int pos, int size)
{
@@ -526,8 +686,15 @@ static int btf_dump_func(const struct btf *btf, char *func_sig,
BTF_PRINT_ARG(", ");
if (arg->type) {
BTF_PRINT_TYPE(arg->type);
- BTF_PRINT_ARG("%s",
- btf__name_by_offset(btf, arg->name_off));
+ if (arg->name_off)
+ BTF_PRINT_ARG("%s",
+ btf__name_by_offset(btf, arg->name_off));
+ else if (pos && func_sig[pos - 1] == ' ')
+ /* Remove unnecessary space for
+ * FUNC_PROTO that does not have
+ * arg->name_off
+ */
+ func_sig[--pos] = '\0';
} else {
BTF_PRINT_ARG("...");
}
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index b75b8ec5469c..f2223dbdfb0a 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -211,39 +211,14 @@ int do_pin_fd(int fd, const char *name)
return err;
}
-int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
+int do_pin_any(int argc, char **argv, int (*get_fd)(int *, char ***))
{
- unsigned int id;
- char *endptr;
int err;
int fd;
- if (argc < 3) {
- p_err("too few arguments, id ID and FILE path is required");
- return -1;
- } else if (argc > 3) {
- p_err("too many arguments");
- return -1;
- }
-
- if (!is_prefix(*argv, "id")) {
- p_err("expected 'id' got %s", *argv);
- return -1;
- }
- NEXT_ARG();
-
- id = strtoul(*argv, &endptr, 0);
- if (*endptr) {
- p_err("can't parse %s as ID", *argv);
- return -1;
- }
- NEXT_ARG();
-
- fd = get_fd_by_id(id);
- if (fd < 0) {
- p_err("can't open object by id (%u): %s", id, strerror(errno));
- return -1;
- }
+ fd = get_fd(&argc, &argv);
+ if (fd < 0)
+ return fd;
err = do_pin_fd(fd, *argv);
@@ -597,3 +572,10 @@ int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what)
return 0;
}
+
+int __printf(2, 0)
+print_all_levels(__maybe_unused enum libbpf_print_level level,
+ const char *format, va_list args)
+{
+ return vfprintf(stderr, format, args);
+}
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index 941873d778d8..88718ee6a438 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -112,18 +112,12 @@ print_start_section(const char *json_title, const char *plain_title,
}
}
-static void
-print_end_then_start_section(const char *json_title, const char *plain_title,
- const char *define_comment,
- const char *define_prefix)
+static void print_end_section(void)
{
if (json_output)
jsonw_end_object(json_wtr);
else
printf("\n");
-
- print_start_section(json_title, plain_title, define_comment,
- define_prefix);
}
/* Probing functions */
@@ -520,13 +514,38 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
}
static void
+probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
+ const char *define_prefix, unsigned int id,
+ const char *ptype_name, __u32 ifindex)
+{
+ bool res;
+
+ if (!supported_type)
+ res = false;
+ else
+ res = bpf_probe_helper(id, prog_type, ifindex);
+
+ if (json_output) {
+ if (res)
+ jsonw_string(json_wtr, helper_name[id]);
+ } else if (define_prefix) {
+ printf("#define %sBPF__PROG_TYPE_%s__HELPER_%s %s\n",
+ define_prefix, ptype_name, helper_name[id],
+ res ? "1" : "0");
+ } else {
+ if (res)
+ printf("\n\t- %s", helper_name[id]);
+ }
+}
+
+static void
probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
- const char *define_prefix, __u32 ifindex)
+ const char *define_prefix, bool full_mode,
+ __u32 ifindex)
{
const char *ptype_name = prog_type_name[prog_type];
char feat_name[128];
unsigned int id;
- bool res;
if (ifindex)
/* Only test helpers for offload-able program types */
@@ -548,21 +567,19 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
}
for (id = 1; id < ARRAY_SIZE(helper_name); id++) {
- if (!supported_type)
- res = false;
- else
- res = bpf_probe_helper(id, prog_type, ifindex);
-
- if (json_output) {
- if (res)
- jsonw_string(json_wtr, helper_name[id]);
- } else if (define_prefix) {
- printf("#define %sBPF__PROG_TYPE_%s__HELPER_%s %s\n",
- define_prefix, ptype_name, helper_name[id],
- res ? "1" : "0");
- } else {
- if (res)
- printf("\n\t- %s", helper_name[id]);
+ /* Skip helper functions which emit dmesg messages when not in
+ * the full mode.
+ */
+ switch (id) {
+ case BPF_FUNC_trace_printk:
+ case BPF_FUNC_probe_write_user:
+ if (!full_mode)
+ continue;
+ /* fallthrough */
+ default:
+ probe_helper_for_progtype(prog_type, supported_type,
+ define_prefix, id, ptype_name,
+ ifindex);
}
}
@@ -584,13 +601,132 @@ probe_large_insn_limit(const char *define_prefix, __u32 ifindex)
res, define_prefix);
}
+static void
+section_system_config(enum probe_component target, const char *define_prefix)
+{
+ switch (target) {
+ case COMPONENT_KERNEL:
+ case COMPONENT_UNSPEC:
+ if (define_prefix)
+ break;
+
+ print_start_section("system_config",
+ "Scanning system configuration...",
+ NULL, /* define_comment never used here */
+ NULL); /* define_prefix always NULL here */
+ if (check_procfs()) {
+ probe_unprivileged_disabled();
+ probe_jit_enable();
+ probe_jit_harden();
+ probe_jit_kallsyms();
+ probe_jit_limit();
+ } else {
+ p_info("/* procfs not mounted, skipping related probes */");
+ }
+ probe_kernel_image_config();
+ print_end_section();
+ break;
+ default:
+ break;
+ }
+}
+
+static bool section_syscall_config(const char *define_prefix)
+{
+ bool res;
+
+ print_start_section("syscall_config",
+ "Scanning system call availability...",
+ "/*** System call availability ***/",
+ define_prefix);
+ res = probe_bpf_syscall(define_prefix);
+ print_end_section();
+
+ return res;
+}
+
+static void
+section_program_types(bool *supported_types, const char *define_prefix,
+ __u32 ifindex)
+{
+ unsigned int i;
+
+ print_start_section("program_types",
+ "Scanning eBPF program types...",
+ "/*** eBPF program types ***/",
+ define_prefix);
+
+ for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+ probe_prog_type(i, supported_types, define_prefix, ifindex);
+
+ print_end_section();
+}
+
+static void section_map_types(const char *define_prefix, __u32 ifindex)
+{
+ unsigned int i;
+
+ print_start_section("map_types",
+ "Scanning eBPF map types...",
+ "/*** eBPF map types ***/",
+ define_prefix);
+
+ for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++)
+ probe_map_type(i, define_prefix, ifindex);
+
+ print_end_section();
+}
+
+static void
+section_helpers(bool *supported_types, const char *define_prefix,
+ bool full_mode, __u32 ifindex)
+{
+ unsigned int i;
+
+ print_start_section("helpers",
+ "Scanning eBPF helper functions...",
+ "/*** eBPF helper functions ***/",
+ define_prefix);
+
+ if (define_prefix)
+ printf("/*\n"
+ " * Use %sHAVE_PROG_TYPE_HELPER(prog_type_name, helper_name)\n"
+ " * to determine if <helper_name> is available for <prog_type_name>,\n"
+ " * e.g.\n"
+ " * #if %sHAVE_PROG_TYPE_HELPER(xdp, bpf_redirect)\n"
+ " * // do stuff with this helper\n"
+ " * #elif\n"
+ " * // use a workaround\n"
+ " * #endif\n"
+ " */\n"
+ "#define %sHAVE_PROG_TYPE_HELPER(prog_type, helper) \\\n"
+ " %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n",
+ define_prefix, define_prefix, define_prefix,
+ define_prefix);
+ for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+ probe_helpers_for_progtype(i, supported_types[i],
+ define_prefix, full_mode, ifindex);
+
+ print_end_section();
+}
+
+static void section_misc(const char *define_prefix, __u32 ifindex)
+{
+ print_start_section("misc",
+ "Scanning miscellaneous eBPF features...",
+ "/*** eBPF misc features ***/",
+ define_prefix);
+ probe_large_insn_limit(define_prefix, ifindex);
+ print_end_section();
+}
+
static int do_probe(int argc, char **argv)
{
enum probe_component target = COMPONENT_UNSPEC;
const char *define_prefix = NULL;
bool supported_types[128] = {};
+ bool full_mode = false;
__u32 ifindex = 0;
- unsigned int i;
char *ifname;
/* Detection assumes user has sufficient privileges (CAP_SYS_ADMIN).
@@ -629,6 +765,9 @@ static int do_probe(int argc, char **argv)
strerror(errno));
return -1;
}
+ } else if (is_prefix(*argv, "full")) {
+ full_mode = true;
+ NEXT_ARG();
} else if (is_prefix(*argv, "macros") && !define_prefix) {
define_prefix = "";
NEXT_ARG();
@@ -658,97 +797,19 @@ static int do_probe(int argc, char **argv)
jsonw_start_object(json_wtr);
}
- switch (target) {
- case COMPONENT_KERNEL:
- case COMPONENT_UNSPEC:
- if (define_prefix)
- break;
-
- print_start_section("system_config",
- "Scanning system configuration...",
- NULL, /* define_comment never used here */
- NULL); /* define_prefix always NULL here */
- if (check_procfs()) {
- probe_unprivileged_disabled();
- probe_jit_enable();
- probe_jit_harden();
- probe_jit_kallsyms();
- probe_jit_limit();
- } else {
- p_info("/* procfs not mounted, skipping related probes */");
- }
- probe_kernel_image_config();
- if (json_output)
- jsonw_end_object(json_wtr);
- else
- printf("\n");
- break;
- default:
- break;
- }
-
- print_start_section("syscall_config",
- "Scanning system call availability...",
- "/*** System call availability ***/",
- define_prefix);
-
- if (!probe_bpf_syscall(define_prefix))
+ section_system_config(target, define_prefix);
+ if (!section_syscall_config(define_prefix))
/* bpf() syscall unavailable, don't probe other BPF features */
goto exit_close_json;
-
- print_end_then_start_section("program_types",
- "Scanning eBPF program types...",
- "/*** eBPF program types ***/",
- define_prefix);
-
- for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
- probe_prog_type(i, supported_types, define_prefix, ifindex);
-
- print_end_then_start_section("map_types",
- "Scanning eBPF map types...",
- "/*** eBPF map types ***/",
- define_prefix);
-
- for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++)
- probe_map_type(i, define_prefix, ifindex);
-
- print_end_then_start_section("helpers",
- "Scanning eBPF helper functions...",
- "/*** eBPF helper functions ***/",
- define_prefix);
-
- if (define_prefix)
- printf("/*\n"
- " * Use %sHAVE_PROG_TYPE_HELPER(prog_type_name, helper_name)\n"
- " * to determine if <helper_name> is available for <prog_type_name>,\n"
- " * e.g.\n"
- " * #if %sHAVE_PROG_TYPE_HELPER(xdp, bpf_redirect)\n"
- " * // do stuff with this helper\n"
- " * #elif\n"
- " * // use a workaround\n"
- " * #endif\n"
- " */\n"
- "#define %sHAVE_PROG_TYPE_HELPER(prog_type, helper) \\\n"
- " %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n",
- define_prefix, define_prefix, define_prefix,
- define_prefix);
- for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
- probe_helpers_for_progtype(i, supported_types[i],
- define_prefix, ifindex);
-
- print_end_then_start_section("misc",
- "Scanning miscellaneous eBPF features...",
- "/*** eBPF misc features ***/",
- define_prefix);
- probe_large_insn_limit(define_prefix, ifindex);
+ section_program_types(supported_types, define_prefix, ifindex);
+ section_map_types(define_prefix, ifindex);
+ section_helpers(supported_types, define_prefix, full_mode, ifindex);
+ section_misc(define_prefix, ifindex);
exit_close_json:
- if (json_output) {
- /* End current "section" of probes */
- jsonw_end_object(json_wtr);
+ if (json_output)
/* End root object */
jsonw_end_object(json_wtr);
- }
return 0;
}
@@ -761,7 +822,7 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
- "Usage: %s %s probe [COMPONENT] [macros [prefix PREFIX]]\n"
+ "Usage: %s %s probe [COMPONENT] [full] [macros [prefix PREFIX]]\n"
" %s %s help\n"
"\n"
" COMPONENT := { kernel | dev NAME }\n"
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index 6d41bbfc6459..466c269eabdd 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -58,7 +58,7 @@ static int do_help(int argc, char **argv)
" %s batch file FILE\n"
" %s version\n"
"\n"
- " OBJECT := { prog | map | cgroup | perf | net | feature | btf | gen }\n"
+ " OBJECT := { prog | map | cgroup | perf | net | feature | btf | gen | struct_ops }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, bin_name, bin_name);
@@ -79,13 +79,6 @@ static int do_version(int argc, char **argv)
return 0;
}
-static int __printf(2, 0)
-print_all_levels(__maybe_unused enum libbpf_print_level level,
- const char *format, va_list args)
-{
- return vfprintf(stderr, format, args);
-}
-
int cmd_select(const struct cmd *cmds, int argc, char **argv,
int (*help)(int argc, char **argv))
{
@@ -228,6 +221,7 @@ static const struct cmd cmds[] = {
{ "feature", do_feature },
{ "btf", do_btf },
{ "gen", do_gen },
+ { "struct_ops", do_struct_ops },
{ "version", do_version },
{ 0 }
};
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 4e75b58d3989..86f14ce26fd7 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -14,6 +14,8 @@
#include <linux/hashtable.h>
#include <tools/libc_compat.h>
+#include <bpf/libbpf.h>
+
#include "json_writer.h"
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
@@ -76,6 +78,9 @@ static const char * const prog_type_name[] = {
[BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
[BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
[BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
+ [BPF_PROG_TYPE_TRACING] = "tracing",
+ [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
+ [BPF_PROG_TYPE_EXT] = "ext",
};
extern const char * const map_type_name[];
@@ -143,7 +148,7 @@ char *get_fdinfo(int fd, const char *key);
int open_obj_pinned(char *path, bool quiet);
int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type);
int mount_bpffs_for_pin(const char *name);
-int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32));
+int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(int *, char ***));
int do_pin_fd(int fd, const char *name);
int do_prog(int argc, char **arg);
@@ -156,6 +161,7 @@ int do_tracelog(int argc, char **arg);
int do_feature(int argc, char **argv);
int do_btf(int argc, char **argv);
int do_gen(int argc, char **argv);
+int do_struct_ops(int argc, char **argv);
int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what);
int prog_parse_fd(int *argc, char ***argv);
@@ -200,6 +206,7 @@ struct btf_dumper {
const struct btf *btf;
json_writer_t *jw;
bool is_plain_text;
+ bool prog_id_as_func_ptr;
};
/* btf_dumper_type - print data along with type information
@@ -226,4 +233,7 @@ struct tcmsg;
int do_xdp_dump(struct ifinfomsg *ifinfo, struct nlattr **tb);
int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind,
const char *devname, int ifindex);
+
+int print_all_levels(__maybe_unused enum libbpf_print_level level,
+ const char *format, va_list args);
#endif
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index e6c85680b34d..693a632f6813 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -1384,7 +1384,7 @@ static int do_pin(int argc, char **argv)
{
int err;
- err = do_pin_any(argc, argv, bpf_map_get_fd_by_id);
+ err = do_pin_any(argc, argv, map_parse_fd);
if (!err && json_output)
jsonw_null(json_wtr);
return err;
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index b352ab041160..f6a5974a7b0a 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -4,6 +4,7 @@
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
+#include <signal.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
@@ -11,10 +12,13 @@
#include <time.h>
#include <unistd.h>
#include <net/if.h>
+#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <sys/syscall.h>
#include <linux/err.h>
+#include <linux/perf_event.h>
#include <linux/sizes.h>
#include <bpf/bpf.h>
@@ -809,7 +813,7 @@ static int do_pin(int argc, char **argv)
{
int err;
- err = do_pin_any(argc, argv, bpf_prog_get_fd_by_id);
+ err = do_pin_any(argc, argv, prog_parse_fd);
if (!err && json_output)
jsonw_null(json_wtr);
return err;
@@ -1243,6 +1247,25 @@ free_data_in:
return err;
}
+static int
+get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
+ enum bpf_attach_type *expected_attach_type)
+{
+ libbpf_print_fn_t print_backup;
+ int ret;
+
+ ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
+ if (!ret)
+ return ret;
+
+ /* libbpf_prog_type_by_name() failed, let's re-run with debug level */
+ print_backup = libbpf_set_print(print_all_levels);
+ ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
+ libbpf_set_print(print_backup);
+
+ return ret;
+}
+
static int load_with_options(int argc, char **argv, bool first_prog_only)
{
enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
@@ -1292,8 +1315,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
strcat(type, *argv);
strcat(type, "/");
- err = libbpf_prog_type_by_name(type, &common_prog_type,
- &expected_attach_type);
+ err = get_prog_type_by_name(type, &common_prog_type,
+ &expected_attach_type);
free(type);
if (err < 0)
goto err_free_reuse_maps;
@@ -1392,8 +1415,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
if (prog_type == BPF_PROG_TYPE_UNSPEC) {
const char *sec_name = bpf_program__title(pos, false);
- err = libbpf_prog_type_by_name(sec_name, &prog_type,
- &expected_attach_type);
+ err = get_prog_type_by_name(sec_name, &prog_type,
+ &expected_attach_type);
if (err < 0)
goto err_close_obj;
}
@@ -1537,6 +1560,422 @@ static int do_loadall(int argc, char **argv)
return load_with_options(argc, argv, false);
}
+#ifdef BPFTOOL_WITHOUT_SKELETONS
+
+static int do_profile(int argc, char **argv)
+{
+ p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0");
+ return 0;
+}
+
+#else /* BPFTOOL_WITHOUT_SKELETONS */
+
+#include "profiler.skel.h"
+
+struct profile_metric {
+ const char *name;
+ struct bpf_perf_event_value val;
+ struct perf_event_attr attr;
+ bool selected;
+
+ /* calculate ratios like instructions per cycle */
+ const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */
+ const char *ratio_desc;
+ const float ratio_mul;
+} metrics[] = {
+ {
+ .name = "cycles",
+ .attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .exclude_user = 1,
+ },
+ },
+ {
+ .name = "instructions",
+ .attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_INSTRUCTIONS,
+ .exclude_user = 1,
+ },
+ .ratio_metric = 1,
+ .ratio_desc = "insns per cycle",
+ .ratio_mul = 1.0,
+ },
+ {
+ .name = "l1d_loads",
+ .attr = {
+ .type = PERF_TYPE_HW_CACHE,
+ .config =
+ PERF_COUNT_HW_CACHE_L1D |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ .exclude_user = 1,
+ },
+ },
+ {
+ .name = "llc_misses",
+ .attr = {
+ .type = PERF_TYPE_HW_CACHE,
+ .config =
+ PERF_COUNT_HW_CACHE_LL |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ .exclude_user = 1
+ },
+ .ratio_metric = 2,
+ .ratio_desc = "LLC misses per million insns",
+ .ratio_mul = 1e6,
+ },
+};
+
+static __u64 profile_total_count;
+
+#define MAX_NUM_PROFILE_METRICS 4
+
+static int profile_parse_metrics(int argc, char **argv)
+{
+ unsigned int metric_cnt;
+ int selected_cnt = 0;
+ unsigned int i;
+
+ metric_cnt = sizeof(metrics) / sizeof(struct profile_metric);
+
+ while (argc > 0) {
+ for (i = 0; i < metric_cnt; i++) {
+ if (is_prefix(argv[0], metrics[i].name)) {
+ if (!metrics[i].selected)
+ selected_cnt++;
+ metrics[i].selected = true;
+ break;
+ }
+ }
+ if (i == metric_cnt) {
+ p_err("unknown metric %s", argv[0]);
+ return -1;
+ }
+ NEXT_ARG();
+ }
+ if (selected_cnt > MAX_NUM_PROFILE_METRICS) {
+ p_err("too many (%d) metrics, please specify no more than %d metrics at at time",
+ selected_cnt, MAX_NUM_PROFILE_METRICS);
+ return -1;
+ }
+ return selected_cnt;
+}
+
+static void profile_read_values(struct profiler_bpf *obj)
+{
+ __u32 m, cpu, num_cpu = obj->rodata->num_cpu;
+ int reading_map_fd, count_map_fd;
+ __u64 counts[num_cpu];
+ __u32 key = 0;
+ int err;
+
+ reading_map_fd = bpf_map__fd(obj->maps.accum_readings);
+ count_map_fd = bpf_map__fd(obj->maps.counts);
+ if (reading_map_fd < 0 || count_map_fd < 0) {
+ p_err("failed to get fd for map");
+ return;
+ }
+
+ err = bpf_map_lookup_elem(count_map_fd, &key, counts);
+ if (err) {
+ p_err("failed to read count_map: %s", strerror(errno));
+ return;
+ }
+
+ profile_total_count = 0;
+ for (cpu = 0; cpu < num_cpu; cpu++)
+ profile_total_count += counts[cpu];
+
+ for (m = 0; m < ARRAY_SIZE(metrics); m++) {
+ struct bpf_perf_event_value values[num_cpu];
+
+ if (!metrics[m].selected)
+ continue;
+
+ err = bpf_map_lookup_elem(reading_map_fd, &key, values);
+ if (err) {
+ p_err("failed to read reading_map: %s",
+ strerror(errno));
+ return;
+ }
+ for (cpu = 0; cpu < num_cpu; cpu++) {
+ metrics[m].val.counter += values[cpu].counter;
+ metrics[m].val.enabled += values[cpu].enabled;
+ metrics[m].val.running += values[cpu].running;
+ }
+ key++;
+ }
+}
+
+static void profile_print_readings_json(void)
+{
+ __u32 m;
+
+ jsonw_start_array(json_wtr);
+ for (m = 0; m < ARRAY_SIZE(metrics); m++) {
+ if (!metrics[m].selected)
+ continue;
+ jsonw_start_object(json_wtr);
+ jsonw_string_field(json_wtr, "metric", metrics[m].name);
+ jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count);
+ jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter);
+ jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled);
+ jsonw_lluint_field(json_wtr, "running", metrics[m].val.running);
+
+ jsonw_end_object(json_wtr);
+ }
+ jsonw_end_array(json_wtr);
+}
+
+static void profile_print_readings_plain(void)
+{
+ __u32 m;
+
+ printf("\n%18llu %-20s\n", profile_total_count, "run_cnt");
+ for (m = 0; m < ARRAY_SIZE(metrics); m++) {
+ struct bpf_perf_event_value *val = &metrics[m].val;
+ int r;
+
+ if (!metrics[m].selected)
+ continue;
+ printf("%18llu %-20s", val->counter, metrics[m].name);
+
+ r = metrics[m].ratio_metric - 1;
+ if (r >= 0 && metrics[r].selected &&
+ metrics[r].val.counter > 0) {
+ printf("# %8.2f %-30s",
+ val->counter * metrics[m].ratio_mul /
+ metrics[r].val.counter,
+ metrics[m].ratio_desc);
+ } else {
+ printf("%-41s", "");
+ }
+
+ if (val->enabled > val->running)
+ printf("(%4.2f%%)",
+ val->running * 100.0 / val->enabled);
+ printf("\n");
+ }
+}
+
+static void profile_print_readings(void)
+{
+ if (json_output)
+ profile_print_readings_json();
+ else
+ profile_print_readings_plain();
+}
+
+static char *profile_target_name(int tgt_fd)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_func_info *func_info;
+ const struct btf_type *t;
+ char *name = NULL;
+ struct btf *btf;
+
+ info_linear = bpf_program__get_prog_info_linear(
+ tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
+ if (IS_ERR_OR_NULL(info_linear)) {
+ p_err("failed to get info_linear for prog FD %d", tgt_fd);
+ return NULL;
+ }
+
+ if (info_linear->info.btf_id == 0 ||
+ btf__get_from_id(info_linear->info.btf_id, &btf)) {
+ p_err("prog FD %d doesn't have valid btf", tgt_fd);
+ goto out;
+ }
+
+ func_info = (struct bpf_func_info *)(info_linear->info.func_info);
+ t = btf__type_by_id(btf, func_info[0].type_id);
+ if (!t) {
+ p_err("btf %d doesn't have type %d",
+ info_linear->info.btf_id, func_info[0].type_id);
+ goto out;
+ }
+ name = strdup(btf__name_by_offset(btf, t->name_off));
+out:
+ free(info_linear);
+ return name;
+}
+
+static struct profiler_bpf *profile_obj;
+static int profile_tgt_fd = -1;
+static char *profile_tgt_name;
+static int *profile_perf_events;
+static int profile_perf_event_cnt;
+
+static void profile_close_perf_events(struct profiler_bpf *obj)
+{
+ int i;
+
+ for (i = profile_perf_event_cnt - 1; i >= 0; i--)
+ close(profile_perf_events[i]);
+
+ free(profile_perf_events);
+ profile_perf_event_cnt = 0;
+}
+
+static int profile_open_perf_events(struct profiler_bpf *obj)
+{
+ unsigned int cpu, m;
+ int map_fd, pmu_fd;
+
+ profile_perf_events = calloc(
+ sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
+ if (!profile_perf_events) {
+ p_err("failed to allocate memory for perf_event array: %s",
+ strerror(errno));
+ return -1;
+ }
+ map_fd = bpf_map__fd(obj->maps.events);
+ if (map_fd < 0) {
+ p_err("failed to get fd for events map");
+ return -1;
+ }
+
+ for (m = 0; m < ARRAY_SIZE(metrics); m++) {
+ if (!metrics[m].selected)
+ continue;
+ for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
+ pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr,
+ -1/*pid*/, cpu, -1/*group_fd*/, 0);
+ if (pmu_fd < 0 ||
+ bpf_map_update_elem(map_fd, &profile_perf_event_cnt,
+ &pmu_fd, BPF_ANY) ||
+ ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
+ p_err("failed to create event %s on cpu %d",
+ metrics[m].name, cpu);
+ return -1;
+ }
+ profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
+ }
+ }
+ return 0;
+}
+
+static void profile_print_and_cleanup(void)
+{
+ profile_close_perf_events(profile_obj);
+ profile_read_values(profile_obj);
+ profile_print_readings();
+ profiler_bpf__destroy(profile_obj);
+
+ close(profile_tgt_fd);
+ free(profile_tgt_name);
+}
+
+static void int_exit(int signo)
+{
+ profile_print_and_cleanup();
+ exit(0);
+}
+
+static int do_profile(int argc, char **argv)
+{
+ int num_metric, num_cpu, err = -1;
+ struct bpf_program *prog;
+ unsigned long duration;
+ char *endptr;
+
+ /* we at least need two args for the prog and one metric */
+ if (!REQ_ARGS(3))
+ return -EINVAL;
+
+ /* parse target fd */
+ profile_tgt_fd = prog_parse_fd(&argc, &argv);
+ if (profile_tgt_fd < 0) {
+ p_err("failed to parse fd");
+ return -1;
+ }
+
+ /* parse profiling optional duration */
+ if (argc > 2 && is_prefix(argv[0], "duration")) {
+ NEXT_ARG();
+ duration = strtoul(*argv, &endptr, 0);
+ if (*endptr)
+ usage();
+ NEXT_ARG();
+ } else {
+ duration = UINT_MAX;
+ }
+
+ num_metric = profile_parse_metrics(argc, argv);
+ if (num_metric <= 0)
+ goto out;
+
+ num_cpu = libbpf_num_possible_cpus();
+ if (num_cpu <= 0) {
+ p_err("failed to identify number of CPUs");
+ goto out;
+ }
+
+ profile_obj = profiler_bpf__open();
+ if (!profile_obj) {
+ p_err("failed to open and/or load BPF object");
+ goto out;
+ }
+
+ profile_obj->rodata->num_cpu = num_cpu;
+ profile_obj->rodata->num_metric = num_metric;
+
+ /* adjust map sizes */
+ bpf_map__resize(profile_obj->maps.events, num_metric * num_cpu);
+ bpf_map__resize(profile_obj->maps.fentry_readings, num_metric);
+ bpf_map__resize(profile_obj->maps.accum_readings, num_metric);
+ bpf_map__resize(profile_obj->maps.counts, 1);
+
+ /* change target name */
+ profile_tgt_name = profile_target_name(profile_tgt_fd);
+ if (!profile_tgt_name)
+ goto out;
+
+ bpf_object__for_each_program(prog, profile_obj->obj) {
+ err = bpf_program__set_attach_target(prog, profile_tgt_fd,
+ profile_tgt_name);
+ if (err) {
+ p_err("failed to set attach target\n");
+ goto out;
+ }
+ }
+
+ set_max_rlimit();
+ err = profiler_bpf__load(profile_obj);
+ if (err) {
+ p_err("failed to load profile_obj");
+ goto out;
+ }
+
+ err = profile_open_perf_events(profile_obj);
+ if (err)
+ goto out;
+
+ err = profiler_bpf__attach(profile_obj);
+ if (err) {
+ p_err("failed to attach profile_obj");
+ goto out;
+ }
+ signal(SIGINT, int_exit);
+
+ sleep(duration);
+ profile_print_and_cleanup();
+ return 0;
+
+out:
+ profile_close_perf_events(profile_obj);
+ if (profile_obj)
+ profiler_bpf__destroy(profile_obj);
+ close(profile_tgt_fd);
+ free(profile_tgt_name);
+ return err;
+}
+
+#endif /* BPFTOOL_WITHOUT_SKELETONS */
+
static int do_help(int argc, char **argv)
{
if (json_output) {
@@ -1560,6 +1999,7 @@ static int do_help(int argc, char **argv)
" [data_out FILE [data_size_out L]] \\\n"
" [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
" [repeat N]\n"
+ " %s %s profile PROG [duration DURATION] METRICs\n"
" %s %s tracelog\n"
" %s %s help\n"
"\n"
@@ -1573,16 +2013,17 @@ static int do_help(int argc, char **argv)
" cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
" cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
" cgroup/sendmsg4 | cgroup/sendmsg6 | cgroup/recvmsg4 |\n"
- " cgroup/recvmsg6 | cgroup/getsockopt |\n"
- " cgroup/setsockopt }\n"
+ " cgroup/recvmsg6 | cgroup/getsockopt | cgroup/setsockopt |\n"
+ " struct_ops | fentry | fexit | freplace }\n"
" ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
" flow_dissector }\n"
+ " METRIC := { cycles | instructions | l1d_loads | llc_misses }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2]);
+ bin_name, argv[-2], bin_name, argv[-2]);
return 0;
}
@@ -1599,6 +2040,7 @@ static const struct cmd cmds[] = {
{ "detach", do_detach },
{ "tracelog", do_tracelog },
{ "run", do_run },
+ { "profile", do_profile },
{ 0 }
};
diff --git a/tools/bpf/bpftool/skeleton/profiler.bpf.c b/tools/bpf/bpftool/skeleton/profiler.bpf.c
new file mode 100644
index 000000000000..20034c12f7c5
--- /dev/null
+++ b/tools/bpf/bpftool/skeleton/profiler.bpf.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+#include "profiler.h"
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+/* map of perf event fds, num_cpu * num_metric entries */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(int));
+} events SEC(".maps");
+
+/* readings at fentry */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+} fentry_readings SEC(".maps");
+
+/* accumulated readings */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+} accum_readings SEC(".maps");
+
+/* sample counts, one per cpu */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u64));
+} counts SEC(".maps");
+
+const volatile __u32 num_cpu = 1;
+const volatile __u32 num_metric = 1;
+#define MAX_NUM_MATRICS 4
+
+SEC("fentry/XXX")
+int BPF_PROG(fentry_XXX)
+{
+ struct bpf_perf_event_value *ptrs[MAX_NUM_MATRICS];
+ u32 key = bpf_get_smp_processor_id();
+ u32 i;
+
+ /* look up before reading, to reduce error */
+ for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ u32 flag = i;
+
+ ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag);
+ if (!ptrs[i])
+ return 0;
+ }
+
+ for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ struct bpf_perf_event_value reading;
+ int err;
+
+ err = bpf_perf_event_read_value(&events, key, &reading,
+ sizeof(reading));
+ if (err)
+ return 0;
+ *(ptrs[i]) = reading;
+ key += num_cpu;
+ }
+
+ return 0;
+}
+
+static inline void
+fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
+{
+ struct bpf_perf_event_value *before, diff, *accum;
+
+ before = bpf_map_lookup_elem(&fentry_readings, &id);
+ /* only account samples with a valid fentry_reading */
+ if (before && before->counter) {
+ struct bpf_perf_event_value *accum;
+
+ diff.counter = after->counter - before->counter;
+ diff.enabled = after->enabled - before->enabled;
+ diff.running = after->running - before->running;
+
+ accum = bpf_map_lookup_elem(&accum_readings, &id);
+ if (accum) {
+ accum->counter += diff.counter;
+ accum->enabled += diff.enabled;
+ accum->running += diff.running;
+ }
+ }
+}
+
+SEC("fexit/XXX")
+int BPF_PROG(fexit_XXX)
+{
+ struct bpf_perf_event_value readings[MAX_NUM_MATRICS];
+ u32 cpu = bpf_get_smp_processor_id();
+ u32 i, one = 1, zero = 0;
+ int err;
+ u64 *count;
+
+ /* read all events before updating the maps, to reduce error */
+ for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
+ readings + i, sizeof(*readings));
+ if (err)
+ return 0;
+ }
+ count = bpf_map_lookup_elem(&counts, &zero);
+ if (count) {
+ *count += 1;
+ for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++)
+ fexit_update_maps(i, &readings[i]);
+ }
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/bpf/bpftool/skeleton/profiler.h b/tools/bpf/bpftool/skeleton/profiler.h
new file mode 100644
index 000000000000..1f767e9510f7
--- /dev/null
+++ b/tools/bpf/bpftool/skeleton/profiler.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __PROFILER_H
+#define __PROFILER_H
+
+/* useful typedefs from vmlinux.h */
+
+typedef signed char __s8;
+typedef unsigned char __u8;
+typedef short int __s16;
+typedef short unsigned int __u16;
+typedef int __s32;
+typedef unsigned int __u32;
+typedef long long int __s64;
+typedef long long unsigned int __u64;
+
+typedef __s8 s8;
+typedef __u8 u8;
+typedef __s16 s16;
+typedef __u16 u16;
+typedef __s32 s32;
+typedef __u32 u32;
+typedef __s64 s64;
+typedef __u64 u64;
+
+enum {
+ false = 0,
+ true = 1,
+};
+
+#ifdef __CHECKER__
+#define __bitwise__ __attribute__((bitwise))
+#else
+#define __bitwise__
+#endif
+
+typedef __u16 __bitwise__ __le16;
+typedef __u16 __bitwise__ __be16;
+typedef __u32 __bitwise__ __le32;
+typedef __u32 __bitwise__ __be32;
+typedef __u64 __bitwise__ __le64;
+typedef __u64 __bitwise__ __be64;
+
+typedef __u16 __bitwise__ __sum16;
+typedef __u32 __bitwise__ __wsum;
+
+#endif /* __PROFILER_H */
diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c
new file mode 100644
index 000000000000..2a7befbd11ad
--- /dev/null
+++ b/tools/bpf/bpftool/struct_ops.c
@@ -0,0 +1,596 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2020 Facebook */
+
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <linux/err.h>
+
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+
+#include "json_writer.h"
+#include "main.h"
+
+#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
+
+static const struct btf_type *map_info_type;
+static __u32 map_info_alloc_len;
+static struct btf *btf_vmlinux;
+static __s32 map_info_type_id;
+
+struct res {
+ unsigned int nr_maps;
+ unsigned int nr_errs;
+};
+
+static const struct btf *get_btf_vmlinux(void)
+{
+ if (btf_vmlinux)
+ return btf_vmlinux;
+
+ btf_vmlinux = libbpf_find_kernel_btf();
+ if (IS_ERR(btf_vmlinux))
+ p_err("struct_ops requires kernel CONFIG_DEBUG_INFO_BTF=y");
+
+ return btf_vmlinux;
+}
+
+static const char *get_kern_struct_ops_name(const struct bpf_map_info *info)
+{
+ const struct btf *kern_btf;
+ const struct btf_type *t;
+ const char *st_ops_name;
+
+ kern_btf = get_btf_vmlinux();
+ if (IS_ERR(kern_btf))
+ return "<btf_vmlinux_not_found>";
+
+ t = btf__type_by_id(kern_btf, info->btf_vmlinux_value_type_id);
+ st_ops_name = btf__name_by_offset(kern_btf, t->name_off);
+ st_ops_name += strlen(STRUCT_OPS_VALUE_PREFIX);
+
+ return st_ops_name;
+}
+
+static __s32 get_map_info_type_id(void)
+{
+ const struct btf *kern_btf;
+
+ if (map_info_type_id)
+ return map_info_type_id;
+
+ kern_btf = get_btf_vmlinux();
+ if (IS_ERR(kern_btf)) {
+ map_info_type_id = PTR_ERR(kern_btf);
+ return map_info_type_id;
+ }
+
+ map_info_type_id = btf__find_by_name_kind(kern_btf, "bpf_map_info",
+ BTF_KIND_STRUCT);
+ if (map_info_type_id < 0) {
+ p_err("can't find bpf_map_info from btf_vmlinux");
+ return map_info_type_id;
+ }
+ map_info_type = btf__type_by_id(kern_btf, map_info_type_id);
+
+ /* Ensure map_info_alloc() has at least what the bpftool needs */
+ map_info_alloc_len = map_info_type->size;
+ if (map_info_alloc_len < sizeof(struct bpf_map_info))
+ map_info_alloc_len = sizeof(struct bpf_map_info);
+
+ return map_info_type_id;
+}
+
+/* If the subcmd needs to print out the bpf_map_info,
+ * it should always call map_info_alloc to allocate
+ * a bpf_map_info object instead of allocating it
+ * on the stack.
+ *
+ * map_info_alloc() will take the running kernel's btf
+ * into account. i.e. it will consider the
+ * sizeof(struct bpf_map_info) of the running kernel.
+ *
+ * It will enable the "struct_ops" cmd to print the latest
+ * "struct bpf_map_info".
+ *
+ * [ Recall that "struct_ops" requires the kernel's btf to
+ * be available ]
+ */
+static struct bpf_map_info *map_info_alloc(__u32 *alloc_len)
+{
+ struct bpf_map_info *info;
+
+ if (get_map_info_type_id() < 0)
+ return NULL;
+
+ info = calloc(1, map_info_alloc_len);
+ if (!info)
+ p_err("mem alloc failed");
+ else
+ *alloc_len = map_info_alloc_len;
+
+ return info;
+}
+
+/* It iterates all struct_ops maps of the system.
+ * It returns the fd in "*res_fd" and map_info in "*info".
+ * In the very first iteration, info->id should be 0.
+ * An optional map "*name" filter can be specified.
+ * The filter can be made more flexible in the future.
+ * e.g. filter by kernel-struct-ops-name, regex-name, glob-name, ...etc.
+ *
+ * Return value:
+ * 1: A struct_ops map found. It is returned in "*res_fd" and "*info".
+ * The caller can continue to call get_next in the future.
+ * 0: No struct_ops map is returned.
+ * All struct_ops map has been found.
+ * -1: Error and the caller should abort the iteration.
+ */
+static int get_next_struct_ops_map(const char *name, int *res_fd,
+ struct bpf_map_info *info, __u32 info_len)
+{
+ __u32 id = info->id;
+ int err, fd;
+
+ while (true) {
+ err = bpf_map_get_next_id(id, &id);
+ if (err) {
+ if (errno == ENOENT)
+ return 0;
+ p_err("can't get next map: %s", strerror(errno));
+ return -1;
+ }
+
+ fd = bpf_map_get_fd_by_id(id);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ continue;
+ p_err("can't get map by id (%u): %s",
+ id, strerror(errno));
+ return -1;
+ }
+
+ err = bpf_obj_get_info_by_fd(fd, info, &info_len);
+ if (err) {
+ p_err("can't get map info: %s", strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ if (info->type == BPF_MAP_TYPE_STRUCT_OPS &&
+ (!name || !strcmp(name, info->name))) {
+ *res_fd = fd;
+ return 1;
+ }
+ close(fd);
+ }
+}
+
+static int cmd_retval(const struct res *res, bool must_have_one_map)
+{
+ if (res->nr_errs || (!res->nr_maps && must_have_one_map))
+ return -1;
+
+ return 0;
+}
+
+/* "data" is the work_func private storage */
+typedef int (*work_func)(int fd, const struct bpf_map_info *info, void *data,
+ struct json_writer *wtr);
+
+/* Find all struct_ops map in the system.
+ * Filter out by "name" (if specified).
+ * Then call "func(fd, info, data, wtr)" on each struct_ops map found.
+ */
+static struct res do_search(const char *name, work_func func, void *data,
+ struct json_writer *wtr)
+{
+ struct bpf_map_info *info;
+ struct res res = {};
+ __u32 info_len;
+ int fd, err;
+
+ info = map_info_alloc(&info_len);
+ if (!info) {
+ res.nr_errs++;
+ return res;
+ }
+
+ if (wtr)
+ jsonw_start_array(wtr);
+ while ((err = get_next_struct_ops_map(name, &fd, info, info_len)) == 1) {
+ res.nr_maps++;
+ err = func(fd, info, data, wtr);
+ if (err)
+ res.nr_errs++;
+ close(fd);
+ }
+ if (wtr)
+ jsonw_end_array(wtr);
+
+ if (err)
+ res.nr_errs++;
+
+ if (!wtr && name && !res.nr_errs && !res.nr_maps)
+ /* It is not printing empty [].
+ * Thus, needs to specifically say nothing found
+ * for "name" here.
+ */
+ p_err("no struct_ops found for %s", name);
+ else if (!wtr && json_output && !res.nr_errs)
+ /* The "func()" above is not writing any json (i.e. !wtr
+ * test here).
+ *
+ * However, "-j" is enabled and there is no errs here,
+ * so call json_null() as the current convention of
+ * other cmds.
+ */
+ jsonw_null(json_wtr);
+
+ free(info);
+ return res;
+}
+
+static struct res do_one_id(const char *id_str, work_func func, void *data,
+ struct json_writer *wtr)
+{
+ struct bpf_map_info *info;
+ struct res res = {};
+ unsigned long id;
+ __u32 info_len;
+ char *endptr;
+ int fd;
+
+ id = strtoul(id_str, &endptr, 0);
+ if (*endptr || !id || id > UINT32_MAX) {
+ p_err("invalid id %s", id_str);
+ res.nr_errs++;
+ return res;
+ }
+
+ fd = bpf_map_get_fd_by_id(id);
+ if (fd == -1) {
+ p_err("can't get map by id (%lu): %s", id, strerror(errno));
+ res.nr_errs++;
+ return res;
+ }
+
+ info = map_info_alloc(&info_len);
+ if (!info) {
+ res.nr_errs++;
+ goto done;
+ }
+
+ if (bpf_obj_get_info_by_fd(fd, info, &info_len)) {
+ p_err("can't get map info: %s", strerror(errno));
+ res.nr_errs++;
+ goto done;
+ }
+
+ if (info->type != BPF_MAP_TYPE_STRUCT_OPS) {
+ p_err("%s id %u is not a struct_ops map", info->name, info->id);
+ res.nr_errs++;
+ goto done;
+ }
+
+ res.nr_maps++;
+
+ if (func(fd, info, data, wtr))
+ res.nr_errs++;
+ else if (!wtr && json_output)
+ /* The "func()" above is not writing any json (i.e. !wtr
+ * test here).
+ *
+ * However, "-j" is enabled and there is no errs here,
+ * so call json_null() as the current convention of
+ * other cmds.
+ */
+ jsonw_null(json_wtr);
+
+done:
+ free(info);
+ close(fd);
+
+ return res;
+}
+
+static struct res do_work_on_struct_ops(const char *search_type,
+ const char *search_term,
+ work_func func, void *data,
+ struct json_writer *wtr)
+{
+ if (search_type) {
+ if (is_prefix(search_type, "id"))
+ return do_one_id(search_term, func, data, wtr);
+ else if (!is_prefix(search_type, "name"))
+ usage();
+ }
+
+ return do_search(search_term, func, data, wtr);
+}
+
+static int __do_show(int fd, const struct bpf_map_info *info, void *data,
+ struct json_writer *wtr)
+{
+ if (wtr) {
+ jsonw_start_object(wtr);
+ jsonw_uint_field(wtr, "id", info->id);
+ jsonw_string_field(wtr, "name", info->name);
+ jsonw_string_field(wtr, "kernel_struct_ops",
+ get_kern_struct_ops_name(info));
+ jsonw_end_object(wtr);
+ } else {
+ printf("%u: %-15s %-32s\n", info->id, info->name,
+ get_kern_struct_ops_name(info));
+ }
+
+ return 0;
+}
+
+static int do_show(int argc, char **argv)
+{
+ const char *search_type = NULL, *search_term = NULL;
+ struct res res;
+
+ if (argc && argc != 2)
+ usage();
+
+ if (argc == 2) {
+ search_type = GET_ARG();
+ search_term = GET_ARG();
+ }
+
+ res = do_work_on_struct_ops(search_type, search_term, __do_show,
+ NULL, json_wtr);
+
+ return cmd_retval(&res, !!search_term);
+}
+
+static int __do_dump(int fd, const struct bpf_map_info *info, void *data,
+ struct json_writer *wtr)
+{
+ struct btf_dumper *d = (struct btf_dumper *)data;
+ const struct btf_type *struct_ops_type;
+ const struct btf *kern_btf = d->btf;
+ const char *struct_ops_name;
+ int zero = 0;
+ void *value;
+
+ /* note: d->jw == wtr */
+
+ kern_btf = d->btf;
+
+ /* The kernel supporting BPF_MAP_TYPE_STRUCT_OPS must have
+ * btf_vmlinux_value_type_id.
+ */
+ struct_ops_type = btf__type_by_id(kern_btf,
+ info->btf_vmlinux_value_type_id);
+ struct_ops_name = btf__name_by_offset(kern_btf,
+ struct_ops_type->name_off);
+ value = calloc(1, info->value_size);
+ if (!value) {
+ p_err("mem alloc failed");
+ return -1;
+ }
+
+ if (bpf_map_lookup_elem(fd, &zero, value)) {
+ p_err("can't lookup struct_ops map %s id %u",
+ info->name, info->id);
+ free(value);
+ return -1;
+ }
+
+ jsonw_start_object(wtr);
+ jsonw_name(wtr, "bpf_map_info");
+ btf_dumper_type(d, map_info_type_id, (void *)info);
+ jsonw_end_object(wtr);
+
+ jsonw_start_object(wtr);
+ jsonw_name(wtr, struct_ops_name);
+ btf_dumper_type(d, info->btf_vmlinux_value_type_id, value);
+ jsonw_end_object(wtr);
+
+ free(value);
+
+ return 0;
+}
+
+static int do_dump(int argc, char **argv)
+{
+ const char *search_type = NULL, *search_term = NULL;
+ json_writer_t *wtr = json_wtr;
+ const struct btf *kern_btf;
+ struct btf_dumper d = {};
+ struct res res;
+
+ if (argc && argc != 2)
+ usage();
+
+ if (argc == 2) {
+ search_type = GET_ARG();
+ search_term = GET_ARG();
+ }
+
+ kern_btf = get_btf_vmlinux();
+ if (IS_ERR(kern_btf))
+ return -1;
+
+ if (!json_output) {
+ wtr = jsonw_new(stdout);
+ if (!wtr) {
+ p_err("can't create json writer");
+ return -1;
+ }
+ jsonw_pretty(wtr, true);
+ }
+
+ d.btf = kern_btf;
+ d.jw = wtr;
+ d.is_plain_text = !json_output;
+ d.prog_id_as_func_ptr = true;
+
+ res = do_work_on_struct_ops(search_type, search_term, __do_dump, &d,
+ wtr);
+
+ if (!json_output)
+ jsonw_destroy(&wtr);
+
+ return cmd_retval(&res, !!search_term);
+}
+
+static int __do_unregister(int fd, const struct bpf_map_info *info, void *data,
+ struct json_writer *wtr)
+{
+ int zero = 0;
+
+ if (bpf_map_delete_elem(fd, &zero)) {
+ p_err("can't unload %s %s id %u: %s",
+ get_kern_struct_ops_name(info), info->name,
+ info->id, strerror(errno));
+ return -1;
+ }
+
+ p_info("Unregistered %s %s id %u",
+ get_kern_struct_ops_name(info), info->name,
+ info->id);
+
+ return 0;
+}
+
+static int do_unregister(int argc, char **argv)
+{
+ const char *search_type, *search_term;
+ struct res res;
+
+ if (argc != 2)
+ usage();
+
+ search_type = GET_ARG();
+ search_term = GET_ARG();
+
+ res = do_work_on_struct_ops(search_type, search_term,
+ __do_unregister, NULL, NULL);
+
+ return cmd_retval(&res, true);
+}
+
+static int do_register(int argc, char **argv)
+{
+ const struct bpf_map_def *def;
+ struct bpf_map_info info = {};
+ __u32 info_len = sizeof(info);
+ int nr_errs = 0, nr_maps = 0;
+ struct bpf_object *obj;
+ struct bpf_link *link;
+ struct bpf_map *map;
+ const char *file;
+
+ if (argc != 1)
+ usage();
+
+ file = GET_ARG();
+
+ obj = bpf_object__open(file);
+ if (IS_ERR_OR_NULL(obj))
+ return -1;
+
+ set_max_rlimit();
+
+ if (bpf_object__load(obj)) {
+ bpf_object__close(obj);
+ return -1;
+ }
+
+ bpf_object__for_each_map(map, obj) {
+ def = bpf_map__def(map);
+ if (def->type != BPF_MAP_TYPE_STRUCT_OPS)
+ continue;
+
+ link = bpf_map__attach_struct_ops(map);
+ if (IS_ERR(link)) {
+ p_err("can't register struct_ops %s: %s",
+ bpf_map__name(map),
+ strerror(-PTR_ERR(link)));
+ nr_errs++;
+ continue;
+ }
+ nr_maps++;
+
+ bpf_link__disconnect(link);
+ bpf_link__destroy(link);
+
+ if (!bpf_obj_get_info_by_fd(bpf_map__fd(map), &info,
+ &info_len))
+ p_info("Registered %s %s id %u",
+ get_kern_struct_ops_name(&info),
+ bpf_map__name(map),
+ info.id);
+ else
+ /* Not p_err. The struct_ops was attached
+ * successfully.
+ */
+ p_info("Registered %s but can't find id: %s",
+ bpf_map__name(map), strerror(errno));
+ }
+
+ bpf_object__close(obj);
+
+ if (nr_errs)
+ return -1;
+
+ if (!nr_maps) {
+ p_err("no struct_ops found in %s", file);
+ return -1;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+ return 0;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %s %s { show | list } [STRUCT_OPS_MAP]\n"
+ " %s %s dump [STRUCT_OPS_MAP]\n"
+ " %s %s register OBJ\n"
+ " %s %s unregister STRUCT_OPS_MAP\n"
+ " %s %s help\n"
+ "\n"
+ " OPTIONS := { {-j|--json} [{-p|--pretty}] }\n"
+ " STRUCT_OPS_MAP := [ id STRUCT_OPS_MAP_ID | name STRUCT_OPS_MAP_NAME ]\n",
+ bin_name, argv[-2], bin_name, argv[-2],
+ bin_name, argv[-2], bin_name, argv[-2],
+ bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "show", do_show },
+ { "list", do_show },
+ { "register", do_register },
+ { "unregister", do_unregister },
+ { "dump", do_dump },
+ { "help", do_help },
+ { 0 }
+};
+
+int do_struct_ops(int argc, char **argv)
+{
+ int err;
+
+ err = cmd_select(cmds, argc, argv, do_help);
+
+ btf__free(btf_vmlinux);
+ return err;
+}
diff --git a/tools/bpf/runqslower/runqslower.bpf.c b/tools/bpf/runqslower/runqslower.bpf.c
index 48a39f72fadf..1f18a409f044 100644
--- a/tools/bpf/runqslower/runqslower.bpf.c
+++ b/tools/bpf/runqslower/runqslower.bpf.c
@@ -5,9 +5,7 @@
#include "runqslower.h"
#define TASK_RUNNING 0
-
-#define BPF_F_INDEX_MASK 0xffffffffULL
-#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
+#define BPF_F_CURRENT_CPU 0xffffffffULL
const volatile __u64 min_us = 0;
const volatile pid_t targ_pid = 0;
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 7ac0d8088565..ab8e89a7009c 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -67,7 +67,8 @@ FILES= \
test-llvm.bin \
test-llvm-version.bin \
test-libaio.bin \
- test-libzstd.bin
+ test-libzstd.bin \
+ test-clang-bpf-global-var.bin
FILES := $(addprefix $(OUTPUT),$(FILES))
@@ -75,6 +76,7 @@ CC ?= $(CROSS_COMPILE)gcc
CXX ?= $(CROSS_COMPILE)g++
PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config
LLVM_CONFIG ?= llvm-config
+CLANG ?= clang
all: $(FILES)
@@ -321,6 +323,11 @@ $(OUTPUT)test-libaio.bin:
$(OUTPUT)test-libzstd.bin:
$(BUILD) -lzstd
+$(OUTPUT)test-clang-bpf-global-var.bin:
+ $(CLANG) -S -g -target bpf -o - $(patsubst %.bin,%.c,$(@F)) | \
+ grep BTF_KIND_VAR
+
+
###############################
clean:
diff --git a/tools/build/feature/test-clang-bpf-global-var.c b/tools/build/feature/test-clang-bpf-global-var.c
new file mode 100644
index 000000000000..221f1481d52e
--- /dev/null
+++ b/tools/build/feature/test-clang-bpf-global-var.c
@@ -0,0 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+volatile int global_value_for_test = 1;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 22f235260a3a..2e29a671d67e 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -73,7 +73,7 @@ struct bpf_insn {
/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
struct bpf_lpm_trie_key {
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
- __u8 data[0]; /* Arbitrary size */
+ __u8 data[]; /* Arbitrary size */
};
struct bpf_cgroup_storage_key {
@@ -111,6 +111,8 @@ enum bpf_cmd {
BPF_MAP_LOOKUP_AND_DELETE_BATCH,
BPF_MAP_UPDATE_BATCH,
BPF_MAP_DELETE_BATCH,
+ BPF_LINK_CREATE,
+ BPF_LINK_UPDATE,
};
enum bpf_map_type {
@@ -181,6 +183,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_TRACING,
BPF_PROG_TYPE_STRUCT_OPS,
BPF_PROG_TYPE_EXT,
+ BPF_PROG_TYPE_LSM,
};
enum bpf_attach_type {
@@ -210,6 +213,8 @@ enum bpf_attach_type {
BPF_TRACE_RAW_TP,
BPF_TRACE_FENTRY,
BPF_TRACE_FEXIT,
+ BPF_MODIFY_RETURN,
+ BPF_LSM_MAC,
__MAX_BPF_ATTACH_TYPE
};
@@ -325,44 +330,46 @@ enum bpf_attach_type {
#define BPF_PSEUDO_CALL 1
/* flags for BPF_MAP_UPDATE_ELEM command */
-#define BPF_ANY 0 /* create new element or update existing */
-#define BPF_NOEXIST 1 /* create new element if it didn't exist */
-#define BPF_EXIST 2 /* update existing element */
-#define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */
+enum {
+ BPF_ANY = 0, /* create new element or update existing */
+ BPF_NOEXIST = 1, /* create new element if it didn't exist */
+ BPF_EXIST = 2, /* update existing element */
+ BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */
+};
/* flags for BPF_MAP_CREATE command */
-#define BPF_F_NO_PREALLOC (1U << 0)
+enum {
+ BPF_F_NO_PREALLOC = (1U << 0),
/* Instead of having one common LRU list in the
* BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
* which can scale and perform better.
* Note, the LRU nodes (including free nodes) cannot be moved
* across different LRU lists.
*/
-#define BPF_F_NO_COMMON_LRU (1U << 1)
+ BPF_F_NO_COMMON_LRU = (1U << 1),
/* Specify numa node during map creation */
-#define BPF_F_NUMA_NODE (1U << 2)
-
-#define BPF_OBJ_NAME_LEN 16U
+ BPF_F_NUMA_NODE = (1U << 2),
/* Flags for accessing BPF object from syscall side. */
-#define BPF_F_RDONLY (1U << 3)
-#define BPF_F_WRONLY (1U << 4)
+ BPF_F_RDONLY = (1U << 3),
+ BPF_F_WRONLY = (1U << 4),
/* Flag for stack_map, store build_id+offset instead of pointer */
-#define BPF_F_STACK_BUILD_ID (1U << 5)
+ BPF_F_STACK_BUILD_ID = (1U << 5),
/* Zero-initialize hash function seed. This should only be used for testing. */
-#define BPF_F_ZERO_SEED (1U << 6)
+ BPF_F_ZERO_SEED = (1U << 6),
/* Flags for accessing BPF object from program side. */
-#define BPF_F_RDONLY_PROG (1U << 7)
-#define BPF_F_WRONLY_PROG (1U << 8)
+ BPF_F_RDONLY_PROG = (1U << 7),
+ BPF_F_WRONLY_PROG = (1U << 8),
/* Clone map from listener for newly accepted socket */
-#define BPF_F_CLONE (1U << 9)
+ BPF_F_CLONE = (1U << 9),
/* Enable memory-mapping BPF map */
-#define BPF_F_MMAPABLE (1U << 10)
+ BPF_F_MMAPABLE = (1U << 10),
+};
/* Flags for BPF_PROG_QUERY. */
@@ -391,6 +398,8 @@ struct bpf_stack_build_id {
};
};
+#define BPF_OBJ_NAME_LEN 16U
+
union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */
@@ -534,7 +543,7 @@ union bpf_attr {
__u32 prog_cnt;
} query;
- struct {
+ struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
__u64 name;
__u32 prog_fd;
} raw_tracepoint;
@@ -562,6 +571,24 @@ union bpf_attr {
__u64 probe_offset; /* output: probe_offset */
__u64 probe_addr; /* output: probe_addr */
} task_fd_query;
+
+ struct { /* struct used by BPF_LINK_CREATE command */
+ __u32 prog_fd; /* eBPF program to attach */
+ __u32 target_fd; /* object to attach to */
+ __u32 attach_type; /* attach type */
+ __u32 flags; /* extra flags */
+ } link_create;
+
+ struct { /* struct used by BPF_LINK_UPDATE command */
+ __u32 link_fd; /* link fd */
+ /* new program fd to update link with */
+ __u32 new_prog_fd;
+ __u32 flags; /* extra flags */
+ /* expected link's program fd; is specified only if
+ * BPF_F_REPLACE flag is set in flags */
+ __u32 old_prog_fd;
+ } link_update;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -2890,6 +2917,114 @@ union bpf_attr {
* Obtain the 64bit jiffies
* Return
* The 64 bit jiffies
+ *
+ * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
+ * Description
+ * For an eBPF program attached to a perf event, retrieve the
+ * branch records (struct perf_branch_entry) associated to *ctx*
+ * and store it in the buffer pointed by *buf* up to size
+ * *size* bytes.
+ * Return
+ * On success, number of bytes written to *buf*. On error, a
+ * negative value.
+ *
+ * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to
+ * instead return the number of bytes required to store all the
+ * branch entries. If this flag is set, *buf* may be NULL.
+ *
+ * **-EINVAL** if arguments invalid or **size** not a multiple
+ * of sizeof(struct perf_branch_entry).
+ *
+ * **-ENOENT** if architecture does not support branch records.
+ *
+ * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
+ * Description
+ * Returns 0 on success, values for *pid* and *tgid* as seen from the current
+ * *namespace* will be returned in *nsdata*.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EINVAL** if dev and inum supplied don't match dev_t and inode number
+ * with nsfs of current task, or if dev conversion to dev_t lost high bits.
+ *
+ * **-ENOENT** if pidns does not exists for the current task.
+ *
+ * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * Description
+ * Write raw *data* blob into a special BPF perf event held by
+ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
+ * event must have the following attributes: **PERF_SAMPLE_RAW**
+ * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
+ * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
+ *
+ * The *flags* are used to indicate the index in *map* for which
+ * the value must be put, masked with **BPF_F_INDEX_MASK**.
+ * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
+ * to indicate that the index of the current CPU core should be
+ * used.
+ *
+ * The value to write, of *size*, is passed through eBPF stack and
+ * pointed by *data*.
+ *
+ * *ctx* is a pointer to in-kernel struct xdp_buff.
+ *
+ * This helper is similar to **bpf_perf_eventoutput**\ () but
+ * restricted to raw_tracepoint bpf programs.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * u64 bpf_get_netns_cookie(void *ctx)
+ * Description
+ * Retrieve the cookie (generated by the kernel) of the network
+ * namespace the input *ctx* is associated with. The network
+ * namespace cookie remains stable for its lifetime and provides
+ * a global identifier that can be assumed unique. If *ctx* is
+ * NULL, then the helper returns the cookie for the initial
+ * network namespace. The cookie itself is very similar to that
+ * of bpf_get_socket_cookie() helper, but for network namespaces
+ * instead of sockets.
+ * Return
+ * A 8-byte long opaque number.
+ *
+ * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level)
+ * Description
+ * Return id of cgroup v2 that is ancestor of the cgroup associated
+ * with the current task at the *ancestor_level*. The root cgroup
+ * is at *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with the current task, then return value will be the
+ * same as that of **bpf_get_current_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with the current task.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_get_current_cgroup_id**\ ().
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
+ * int bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags)
+ * Description
+ * Assign the *sk* to the *skb*. When combined with appropriate
+ * routing configuration to receive the packet towards the socket,
+ * will cause *skb* to be delivered to the specified socket.
+ * Subsequent redirection of *skb* via **bpf_redirect**\ (),
+ * **bpf_clone_redirect**\ () or other methods outside of BPF may
+ * interfere with successful delivery to the socket.
+ *
+ * This operation is only valid from TC ingress path.
+ *
+ * The *flags* argument must be zero.
+ * Return
+ * 0 on success, or a negative errno in case of failure.
+ *
+ * * **-EINVAL** Unsupported flags specified.
+ * * **-ENOENT** Socket is unavailable for assignment.
+ * * **-ENETUNREACH** Socket is unreachable (wrong netns).
+ * * **-EOPNOTSUPP** Unsupported operation, for example a
+ * call from outside of TC ingress.
+ * * **-ESOCKTNOSUPPORT** Socket type not supported (reuseport).
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -3010,7 +3145,13 @@ union bpf_attr {
FN(probe_read_kernel_str), \
FN(tcp_send_ack), \
FN(send_signal_thread), \
- FN(jiffies64),
+ FN(jiffies64), \
+ FN(read_branch_records), \
+ FN(get_ns_current_pid_tgid), \
+ FN(xdp_output), \
+ FN(get_netns_cookie), \
+ FN(get_current_ancestor_cgroup_id), \
+ FN(sk_assign),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -3025,69 +3166,100 @@ enum bpf_func_id {
/* All flags used by eBPF helper functions, placed here. */
/* BPF_FUNC_skb_store_bytes flags. */
-#define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
-#define BPF_F_INVALIDATE_HASH (1ULL << 1)
+enum {
+ BPF_F_RECOMPUTE_CSUM = (1ULL << 0),
+ BPF_F_INVALIDATE_HASH = (1ULL << 1),
+};
/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
* First 4 bits are for passing the header field size.
*/
-#define BPF_F_HDR_FIELD_MASK 0xfULL
+enum {
+ BPF_F_HDR_FIELD_MASK = 0xfULL,
+};
/* BPF_FUNC_l4_csum_replace flags. */
-#define BPF_F_PSEUDO_HDR (1ULL << 4)
-#define BPF_F_MARK_MANGLED_0 (1ULL << 5)
-#define BPF_F_MARK_ENFORCE (1ULL << 6)
+enum {
+ BPF_F_PSEUDO_HDR = (1ULL << 4),
+ BPF_F_MARK_MANGLED_0 = (1ULL << 5),
+ BPF_F_MARK_ENFORCE = (1ULL << 6),
+};
/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
-#define BPF_F_INGRESS (1ULL << 0)
+enum {
+ BPF_F_INGRESS = (1ULL << 0),
+};
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
-#define BPF_F_TUNINFO_IPV6 (1ULL << 0)
+enum {
+ BPF_F_TUNINFO_IPV6 = (1ULL << 0),
+};
/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
-#define BPF_F_SKIP_FIELD_MASK 0xffULL
-#define BPF_F_USER_STACK (1ULL << 8)
+enum {
+ BPF_F_SKIP_FIELD_MASK = 0xffULL,
+ BPF_F_USER_STACK = (1ULL << 8),
/* flags used by BPF_FUNC_get_stackid only. */
-#define BPF_F_FAST_STACK_CMP (1ULL << 9)
-#define BPF_F_REUSE_STACKID (1ULL << 10)
+ BPF_F_FAST_STACK_CMP = (1ULL << 9),
+ BPF_F_REUSE_STACKID = (1ULL << 10),
/* flags used by BPF_FUNC_get_stack only. */
-#define BPF_F_USER_BUILD_ID (1ULL << 11)
+ BPF_F_USER_BUILD_ID = (1ULL << 11),
+};
/* BPF_FUNC_skb_set_tunnel_key flags. */
-#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
-#define BPF_F_DONT_FRAGMENT (1ULL << 2)
-#define BPF_F_SEQ_NUMBER (1ULL << 3)
+enum {
+ BPF_F_ZERO_CSUM_TX = (1ULL << 1),
+ BPF_F_DONT_FRAGMENT = (1ULL << 2),
+ BPF_F_SEQ_NUMBER = (1ULL << 3),
+};
/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
* BPF_FUNC_perf_event_read_value flags.
*/
-#define BPF_F_INDEX_MASK 0xffffffffULL
-#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
+enum {
+ BPF_F_INDEX_MASK = 0xffffffffULL,
+ BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK,
/* BPF_FUNC_perf_event_output for sk_buff input context. */
-#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
+ BPF_F_CTXLEN_MASK = (0xfffffULL << 32),
+};
/* Current network namespace */
-#define BPF_F_CURRENT_NETNS (-1L)
+enum {
+ BPF_F_CURRENT_NETNS = (-1L),
+};
/* BPF_FUNC_skb_adjust_room flags. */
-#define BPF_F_ADJ_ROOM_FIXED_GSO (1ULL << 0)
+enum {
+ BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0),
+ BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1),
+ BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2),
+ BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3),
+ BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
+};
-#define BPF_ADJ_ROOM_ENCAP_L2_MASK 0xff
-#define BPF_ADJ_ROOM_ENCAP_L2_SHIFT 56
+enum {
+ BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff,
+ BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56,
+};
-#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 (1ULL << 1)
-#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 (1ULL << 2)
-#define BPF_F_ADJ_ROOM_ENCAP_L4_GRE (1ULL << 3)
-#define BPF_F_ADJ_ROOM_ENCAP_L4_UDP (1ULL << 4)
#define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \
BPF_ADJ_ROOM_ENCAP_L2_MASK) \
<< BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
/* BPF_FUNC_sysctl_get_name flags. */
-#define BPF_F_SYSCTL_BASE_NAME (1ULL << 0)
+enum {
+ BPF_F_SYSCTL_BASE_NAME = (1ULL << 0),
+};
/* BPF_FUNC_sk_storage_get flags */
-#define BPF_SK_STORAGE_GET_F_CREATE (1ULL << 0)
+enum {
+ BPF_SK_STORAGE_GET_F_CREATE = (1ULL << 0),
+};
+
+/* BPF_FUNC_read_branch_records flags. */
+enum {
+ BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0),
+};
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
@@ -3153,6 +3325,7 @@ struct __sk_buff {
__u32 wire_len;
__u32 gso_segs;
__bpf_md_ptr(struct bpf_sock *, sk);
+ __u32 gso_size;
};
struct bpf_tunnel_key {
@@ -3505,13 +3678,14 @@ struct bpf_sock_ops {
};
/* Definitions for bpf_sock_ops_cb_flags */
-#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
-#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
-#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
-#define BPF_SOCK_OPS_RTT_CB_FLAG (1<<3)
-#define BPF_SOCK_OPS_ALL_CB_FLAGS 0xF /* Mask of all currently
- * supported cb flags
- */
+enum {
+ BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0),
+ BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1),
+ BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2),
+ BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3),
+/* Mask of all currently supported cb flags */
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0xF,
+};
/* List of known BPF sock_ops operators.
* New entries can only be added at the end
@@ -3590,8 +3764,10 @@ enum {
BPF_TCP_MAX_STATES /* Leave at the end! */
};
-#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
-#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */
+enum {
+ TCP_BPF_IW = 1001, /* Set TCP initial congestion window */
+ TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */
+};
struct bpf_perf_event_value {
__u64 counter;
@@ -3599,12 +3775,16 @@ struct bpf_perf_event_value {
__u64 running;
};
-#define BPF_DEVCG_ACC_MKNOD (1ULL << 0)
-#define BPF_DEVCG_ACC_READ (1ULL << 1)
-#define BPF_DEVCG_ACC_WRITE (1ULL << 2)
+enum {
+ BPF_DEVCG_ACC_MKNOD = (1ULL << 0),
+ BPF_DEVCG_ACC_READ = (1ULL << 1),
+ BPF_DEVCG_ACC_WRITE = (1ULL << 2),
+};
-#define BPF_DEVCG_DEV_BLOCK (1ULL << 0)
-#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
+enum {
+ BPF_DEVCG_DEV_BLOCK = (1ULL << 0),
+ BPF_DEVCG_DEV_CHAR = (1ULL << 1),
+};
struct bpf_cgroup_dev_ctx {
/* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
@@ -3620,8 +3800,10 @@ struct bpf_raw_tracepoint_args {
/* DIRECT: Skip the FIB rules and go to FIB table associated with device
* OUTPUT: Do lookup from egress perspective; default is ingress
*/
-#define BPF_FIB_LOOKUP_DIRECT (1U << 0)
-#define BPF_FIB_LOOKUP_OUTPUT (1U << 1)
+enum {
+ BPF_FIB_LOOKUP_DIRECT = (1U << 0),
+ BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
+};
enum {
BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
@@ -3693,9 +3875,11 @@ enum bpf_task_fd_type {
BPF_FD_TYPE_URETPROBE, /* filename + offset */
};
-#define BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG (1U << 0)
-#define BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL (1U << 1)
-#define BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP (1U << 2)
+enum {
+ BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0),
+ BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1),
+ BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2),
+};
struct bpf_flow_keys {
__u16 nhoff;
@@ -3761,4 +3945,8 @@ struct bpf_sockopt {
__s32 retval;
};
+struct bpf_pidns_info {
+ __u32 pid;
+ __u32 tgid;
+};
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index 024af2d1d0af..ca6665ea758a 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -463,6 +463,7 @@ enum {
IFLA_MACSEC_REPLAY_PROTECT,
IFLA_MACSEC_VALIDATION,
IFLA_MACSEC_PAD,
+ IFLA_MACSEC_OFFLOAD,
__IFLA_MACSEC_MAX,
};
@@ -489,6 +490,7 @@ enum macsec_validation_type {
enum macsec_offload {
MACSEC_OFFLOAD_OFF = 0,
MACSEC_OFFLOAD_PHY = 1,
+ MACSEC_OFFLOAD_MAC = 2,
__MACSEC_OFFLOAD_END,
MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1,
};
@@ -960,11 +962,12 @@ enum {
#define XDP_FLAGS_SKB_MODE (1U << 1)
#define XDP_FLAGS_DRV_MODE (1U << 2)
#define XDP_FLAGS_HW_MODE (1U << 3)
+#define XDP_FLAGS_REPLACE (1U << 4)
#define XDP_FLAGS_MODES (XDP_FLAGS_SKB_MODE | \
XDP_FLAGS_DRV_MODE | \
XDP_FLAGS_HW_MODE)
#define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \
- XDP_FLAGS_MODES)
+ XDP_FLAGS_MODES | XDP_FLAGS_REPLACE)
/* These are stored into IFLA_XDP_ATTACHED on dump. */
enum {
@@ -984,6 +987,7 @@ enum {
IFLA_XDP_DRV_PROG_ID,
IFLA_XDP_SKB_PROG_ID,
IFLA_XDP_HW_PROG_ID,
+ IFLA_XDP_EXPECTED_FD,
__IFLA_XDP_MAX,
};
diff --git a/tools/testing/selftests/bpf/include/uapi/linux/types.h b/tools/include/uapi/linux/types.h
index 91fa51a9c31d..91fa51a9c31d 100644
--- a/tools/testing/selftests/bpf/include/uapi/linux/types.h
+++ b/tools/include/uapi/linux/types.h
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index c6dafe563176..5cc1b0785d18 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -235,7 +235,8 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
memset(&attr, 0, sizeof(attr));
attr.prog_type = load_attr->prog_type;
attr.expected_attach_type = load_attr->expected_attach_type;
- if (attr.prog_type == BPF_PROG_TYPE_STRUCT_OPS) {
+ if (attr.prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
+ attr.prog_type == BPF_PROG_TYPE_LSM) {
attr.attach_btf_id = load_attr->attach_btf_id;
} else if (attr.prog_type == BPF_PROG_TYPE_TRACING ||
attr.prog_type == BPF_PROG_TYPE_EXT) {
@@ -584,6 +585,40 @@ int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
}
+int bpf_link_create(int prog_fd, int target_fd,
+ enum bpf_attach_type attach_type,
+ const struct bpf_link_create_opts *opts)
+{
+ union bpf_attr attr;
+
+ if (!OPTS_VALID(opts, bpf_link_create_opts))
+ return -EINVAL;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.link_create.prog_fd = prog_fd;
+ attr.link_create.target_fd = target_fd;
+ attr.link_create.attach_type = attach_type;
+
+ return sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
+}
+
+int bpf_link_update(int link_fd, int new_prog_fd,
+ const struct bpf_link_update_opts *opts)
+{
+ union bpf_attr attr;
+
+ if (!OPTS_VALID(opts, bpf_link_update_opts))
+ return -EINVAL;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.link_update.link_fd = link_fd;
+ attr.link_update.new_prog_fd = new_prog_fd;
+ attr.link_update.flags = OPTS_GET(opts, flags, 0);
+ attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
+
+ return sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr));
+}
+
int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
__u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
{
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index b976e77316cc..46d47afdd887 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -168,6 +168,25 @@ LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
enum bpf_attach_type type);
+struct bpf_link_create_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+};
+#define bpf_link_create_opts__last_field sz
+
+LIBBPF_API int bpf_link_create(int prog_fd, int target_fd,
+ enum bpf_attach_type attach_type,
+ const struct bpf_link_create_opts *opts);
+
+struct bpf_link_update_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ __u32 flags; /* extra flags */
+ __u32 old_prog_fd; /* expected old program FD */
+};
+#define bpf_link_update_opts__last_field old_prog_fd
+
+LIBBPF_API int bpf_link_update(int link_fd, int new_prog_fd,
+ const struct bpf_link_update_opts *opts);
+
struct bpf_prog_test_run_attr {
int prog_fd;
int repeat;
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index b0dafe8b4ebc..f3f3c3fb98cb 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -49,7 +49,8 @@
#if defined(bpf_target_x86)
-#ifdef __KERNEL__
+#if defined(__KERNEL__) || defined(__VMLINUX_H__)
+
#define PT_REGS_PARM1(x) ((x)->di)
#define PT_REGS_PARM2(x) ((x)->si)
#define PT_REGS_PARM3(x) ((x)->dx)
@@ -60,7 +61,20 @@
#define PT_REGS_RC(x) ((x)->ax)
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->ip)
+
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), di)
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), si)
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), dx)
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), cx)
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8)
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), sp)
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), bp)
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), ax)
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), ip)
+
#else
+
#ifdef __i386__
/* i386 kernel is built with -mregparm=3 */
#define PT_REGS_PARM1(x) ((x)->eax)
@@ -73,7 +87,20 @@
#define PT_REGS_RC(x) ((x)->eax)
#define PT_REGS_SP(x) ((x)->esp)
#define PT_REGS_IP(x) ((x)->eip)
+
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), eax)
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), edx)
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), ecx)
+#define PT_REGS_PARM4_CORE(x) 0
+#define PT_REGS_PARM5_CORE(x) 0
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), esp)
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), ebp)
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), eax)
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), esp)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), eip)
+
#else
+
#define PT_REGS_PARM1(x) ((x)->rdi)
#define PT_REGS_PARM2(x) ((x)->rsi)
#define PT_REGS_PARM3(x) ((x)->rdx)
@@ -84,6 +111,18 @@
#define PT_REGS_RC(x) ((x)->rax)
#define PT_REGS_SP(x) ((x)->rsp)
#define PT_REGS_IP(x) ((x)->rip)
+
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), rdi)
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), rsi)
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), rdx)
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), rcx)
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8)
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), rsp)
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), rbp)
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), rax)
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), rsp)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), rip)
+
#endif
#endif
@@ -104,6 +143,17 @@ struct pt_regs;
#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[3])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[4])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[5])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[6])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), grps[14])
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[11])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[15])
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), pdw.addr)
+
#elif defined(bpf_target_arm)
#define PT_REGS_PARM1(x) ((x)->uregs[0])
@@ -117,6 +167,17 @@ struct pt_regs;
#define PT_REGS_SP(x) ((x)->uregs[13])
#define PT_REGS_IP(x) ((x)->uregs[12])
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), uregs[0])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), uregs[1])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), uregs[2])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), uregs[3])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), uregs[4])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), uregs[14])
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), uregs[11])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), uregs[0])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), uregs[13])
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), uregs[12])
+
#elif defined(bpf_target_arm64)
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
@@ -134,6 +195,17 @@ struct pt_regs;
#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[1])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[2])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[3])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[4])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[30])
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[29])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), sp)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), pc)
+
#elif defined(bpf_target_mips)
#define PT_REGS_PARM1(x) ((x)->regs[4])
@@ -147,6 +219,17 @@ struct pt_regs;
#define PT_REGS_SP(x) ((x)->regs[29])
#define PT_REGS_IP(x) ((x)->cp0_epc)
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), regs[4])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), regs[5])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), regs[6])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), regs[7])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), regs[8])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), regs[31])
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), regs[30])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), regs[1])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), regs[29])
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), cp0_epc)
+
#elif defined(bpf_target_powerpc)
#define PT_REGS_PARM1(x) ((x)->gpr[3])
@@ -158,6 +241,15 @@ struct pt_regs;
#define PT_REGS_SP(x) ((x)->sp)
#define PT_REGS_IP(x) ((x)->nip)
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), gpr[3])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), gpr[4])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), gpr[5])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), gpr[6])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), gpr[7])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), gpr[3])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), nip)
+
#elif defined(bpf_target_sparc)
#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
@@ -169,11 +261,22 @@ struct pt_regs;
#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0])
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I1])
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I2])
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I3])
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I4])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I7])
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0])
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), u_regs[UREG_FP])
+
/* Should this also be a bpf_target check for the sparc case? */
#if defined(__arch64__)
#define PT_REGS_IP(x) ((x)->tpc)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), tpc)
#else
#define PT_REGS_IP(x) ((x)->pc)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), pc)
#endif
#endif
@@ -192,4 +295,122 @@ struct pt_regs;
(void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
#endif
+#define ___bpf_concat(a, b) a ## b
+#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
+#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
+#define ___bpf_narg(...) \
+ ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+#define ___bpf_empty(...) \
+ ___bpf_nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0)
+
+#define ___bpf_ctx_cast0() ctx
+#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
+#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
+#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
+#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
+#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
+#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
+#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
+#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
+#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
+#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9]
+#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10]
+#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11]
+#define ___bpf_ctx_cast(args...) \
+ ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
+
+/*
+ * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
+ * similar kinds of BPF programs, that accept input arguments as a single
+ * pointer to untyped u64 array, where each u64 can actually be a typed
+ * pointer or integer of different size. Instead of requring user to write
+ * manual casts and work with array elements by index, BPF_PROG macro
+ * allows user to declare a list of named and typed input arguments in the
+ * same syntax as for normal C function. All the casting is hidden and
+ * performed transparently, while user code can just assume working with
+ * function arguments of specified type and name.
+ *
+ * Original raw context argument is preserved as well as 'ctx' argument.
+ * This is useful when using BPF helpers that expect original context
+ * as one of the parameters (e.g., for bpf_perf_event_output()).
+ */
+#define BPF_PROG(name, args...) \
+name(unsigned long long *ctx); \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(unsigned long long *ctx, ##args); \
+typeof(name(0)) name(unsigned long long *ctx) \
+{ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ return ____##name(___bpf_ctx_cast(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(unsigned long long *ctx, ##args)
+
+struct pt_regs;
+
+#define ___bpf_kprobe_args0() ctx
+#define ___bpf_kprobe_args1(x) \
+ ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
+#define ___bpf_kprobe_args2(x, args...) \
+ ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
+#define ___bpf_kprobe_args3(x, args...) \
+ ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
+#define ___bpf_kprobe_args4(x, args...) \
+ ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
+#define ___bpf_kprobe_args5(x, args...) \
+ ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
+#define ___bpf_kprobe_args(args...) \
+ ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
+
+/*
+ * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for
+ * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
+ * low-level way of getting kprobe input arguments from struct pt_regs, and
+ * provides a familiar typed and named function arguments syntax and
+ * semantics of accessing kprobe input paremeters.
+ *
+ * Original struct pt_regs* context is preserved as 'ctx' argument. This might
+ * be necessary when using BPF helpers like bpf_perf_event_output().
+ */
+#define BPF_KPROBE(name, args...) \
+name(struct pt_regs *ctx); \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args); \
+typeof(name(0)) name(struct pt_regs *ctx) \
+{ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ return ____##name(___bpf_kprobe_args(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args)
+
+#define ___bpf_kretprobe_args0() ctx
+#define ___bpf_kretprobe_args1(x) \
+ ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx)
+#define ___bpf_kretprobe_args(args...) \
+ ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
+
+/*
+ * BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional
+ * return value (in addition to `struct pt_regs *ctx`), but no input
+ * arguments, because they will be clobbered by the time probed function
+ * returns.
+ */
+#define BPF_KRETPROBE(name, args...) \
+name(struct pt_regs *ctx); \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args); \
+typeof(name(0)) name(struct pt_regs *ctx) \
+{ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ return ____##name(___bpf_kretprobe_args(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
+
#endif
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 3d1c25fc97ae..bfef3d606b54 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -657,22 +657,32 @@ int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
int btf__load(struct btf *btf)
{
- __u32 log_buf_size = BPF_LOG_BUF_SIZE;
+ __u32 log_buf_size = 0;
char *log_buf = NULL;
int err = 0;
if (btf->fd >= 0)
return -EEXIST;
- log_buf = malloc(log_buf_size);
- if (!log_buf)
- return -ENOMEM;
+retry_load:
+ if (log_buf_size) {
+ log_buf = malloc(log_buf_size);
+ if (!log_buf)
+ return -ENOMEM;
- *log_buf = 0;
+ *log_buf = 0;
+ }
btf->fd = bpf_load_btf(btf->data, btf->data_size,
log_buf, log_buf_size, false);
if (btf->fd < 0) {
+ if (!log_buf || errno == ENOSPC) {
+ log_buf_size = max((__u32)BPF_LOG_BUF_SIZE,
+ log_buf_size << 1);
+ free(log_buf);
+ goto retry_load;
+ }
+
err = -errno;
pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno);
if (*log_buf)
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index bd09ed1710f1..0c28ee82834b 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -916,13 +916,13 @@ static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
/* enumerators share namespace with typedef idents */
dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
if (dup_cnt > 1) {
- btf_dump_printf(d, "\n%s%s___%zu = %d,",
+ btf_dump_printf(d, "\n%s%s___%zu = %u,",
pfx(lvl + 1), name, dup_cnt,
- (__s32)v->val);
+ (__u32)v->val);
} else {
- btf_dump_printf(d, "\n%s%s = %d,",
+ btf_dump_printf(d, "\n%s%s = %u,",
pfx(lvl + 1), name,
- (__s32)v->val);
+ (__u32)v->val);
}
}
btf_dump_printf(d, "\n%s}", pfx(lvl));
@@ -1030,7 +1030,7 @@ int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts))
return -EINVAL;
- fname = OPTS_GET(opts, field_name, NULL);
+ fname = OPTS_GET(opts, field_name, "");
lvl = OPTS_GET(opts, indent_level, 0);
btf_dump_emit_type_decl(d, id, fname, lvl);
return 0;
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 7469c7dcc15e..ff9174282a8c 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -1845,7 +1845,6 @@ resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
* type definition, while using only sizeof(void *) space in ELF data section.
*/
static bool get_map_field_int(const char *map_name, const struct btf *btf,
- const struct btf_type *def,
const struct btf_member *m, __u32 *res)
{
const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
@@ -1972,19 +1971,19 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
return -EINVAL;
}
if (strcmp(name, "type") == 0) {
- if (!get_map_field_int(map_name, obj->btf, def, m,
+ if (!get_map_field_int(map_name, obj->btf, m,
&map->def.type))
return -EINVAL;
pr_debug("map '%s': found type = %u.\n",
map_name, map->def.type);
} else if (strcmp(name, "max_entries") == 0) {
- if (!get_map_field_int(map_name, obj->btf, def, m,
+ if (!get_map_field_int(map_name, obj->btf, m,
&map->def.max_entries))
return -EINVAL;
pr_debug("map '%s': found max_entries = %u.\n",
map_name, map->def.max_entries);
} else if (strcmp(name, "map_flags") == 0) {
- if (!get_map_field_int(map_name, obj->btf, def, m,
+ if (!get_map_field_int(map_name, obj->btf, m,
&map->def.map_flags))
return -EINVAL;
pr_debug("map '%s': found map_flags = %u.\n",
@@ -1992,8 +1991,7 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
} else if (strcmp(name, "key_size") == 0) {
__u32 sz;
- if (!get_map_field_int(map_name, obj->btf, def, m,
- &sz))
+ if (!get_map_field_int(map_name, obj->btf, m, &sz))
return -EINVAL;
pr_debug("map '%s': found key_size = %u.\n",
map_name, sz);
@@ -2035,8 +2033,7 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
} else if (strcmp(name, "value_size") == 0) {
__u32 sz;
- if (!get_map_field_int(map_name, obj->btf, def, m,
- &sz))
+ if (!get_map_field_int(map_name, obj->btf, m, &sz))
return -EINVAL;
pr_debug("map '%s': found value_size = %u.\n",
map_name, sz);
@@ -2079,8 +2076,7 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
__u32 val;
int err;
- if (!get_map_field_int(map_name, obj->btf, def, m,
- &val))
+ if (!get_map_field_int(map_name, obj->btf, m, &val))
return -EINVAL;
pr_debug("map '%s': found pinning = %u.\n",
map_name, val);
@@ -2284,11 +2280,16 @@ static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
}
}
-static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
+static bool libbpf_needs_btf(const struct bpf_object *obj)
{
return obj->efile.btf_maps_shndx >= 0 ||
- obj->efile.st_ops_shndx >= 0 ||
- obj->nr_extern > 0;
+ obj->efile.st_ops_shndx >= 0 ||
+ obj->nr_extern > 0;
+}
+
+static bool kernel_needs_btf(const struct bpf_object *obj)
+{
+ return obj->efile.st_ops_shndx >= 0;
}
static int bpf_object__init_btf(struct bpf_object *obj,
@@ -2324,7 +2325,7 @@ static int bpf_object__init_btf(struct bpf_object *obj,
}
}
out:
- if (err && bpf_object__is_btf_mandatory(obj)) {
+ if (err && libbpf_needs_btf(obj)) {
pr_warn("BTF is required, but is missing or corrupted.\n");
return err;
}
@@ -2348,7 +2349,7 @@ static int bpf_object__finalize_btf(struct bpf_object *obj)
btf_ext__free(obj->btf_ext);
obj->btf_ext = NULL;
- if (bpf_object__is_btf_mandatory(obj)) {
+ if (libbpf_needs_btf(obj)) {
pr_warn("BTF is required, but is missing or corrupted.\n");
return -ENOENT;
}
@@ -2357,7 +2358,8 @@ static int bpf_object__finalize_btf(struct bpf_object *obj)
static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog)
{
- if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
+ if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
+ prog->type == BPF_PROG_TYPE_LSM)
return true;
/* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
@@ -2412,7 +2414,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
obj->btf_ext = NULL;
}
- if (bpf_object__is_btf_mandatory(obj))
+ if (kernel_needs_btf(obj))
return err;
}
return 0;
@@ -3868,6 +3870,10 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
if (str_is_empty(targ_name))
continue;
+ t = skip_mods_and_typedefs(targ_btf, i, NULL);
+ if (!btf_is_composite(t) && !btf_is_array(t))
+ continue;
+
targ_essent_len = bpf_core_essential_name_len(targ_name);
if (targ_essent_len != local_essent_len)
continue;
@@ -4846,8 +4852,8 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
{
struct bpf_load_program_attr load_attr;
char *cp, errmsg[STRERR_BUFSIZE];
- int log_buf_size = BPF_LOG_BUF_SIZE;
- char *log_buf;
+ size_t log_buf_size = 0;
+ char *log_buf = NULL;
int btf_fd, ret;
if (!insns || !insns_cnt)
@@ -4861,7 +4867,8 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.insns = insns;
load_attr.insns_cnt = insns_cnt;
load_attr.license = license;
- if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) {
+ if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
+ prog->type == BPF_PROG_TYPE_LSM) {
load_attr.attach_btf_id = prog->attach_btf_id;
} else if (prog->type == BPF_PROG_TYPE_TRACING ||
prog->type == BPF_PROG_TYPE_EXT) {
@@ -4887,22 +4894,28 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.prog_flags = prog->prog_flags;
retry_load:
- log_buf = malloc(log_buf_size);
- if (!log_buf)
- pr_warn("Alloc log buffer for bpf loader error, continue without log\n");
+ if (log_buf_size) {
+ log_buf = malloc(log_buf_size);
+ if (!log_buf)
+ return -ENOMEM;
+
+ *log_buf = 0;
+ }
ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
if (ret >= 0) {
- if (load_attr.log_level)
+ if (log_buf && load_attr.log_level)
pr_debug("verifier log:\n%s", log_buf);
*pfd = ret;
ret = 0;
goto out;
}
- if (errno == ENOSPC) {
- log_buf_size <<= 1;
+ if (!log_buf || errno == ENOSPC) {
+ log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
+ log_buf_size << 1);
+
free(log_buf);
goto retry_load;
}
@@ -4945,8 +4958,9 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
{
int err = 0, fd, i, btf_id;
- if (prog->type == BPF_PROG_TYPE_TRACING ||
- prog->type == BPF_PROG_TYPE_EXT) {
+ if ((prog->type == BPF_PROG_TYPE_TRACING ||
+ prog->type == BPF_PROG_TYPE_LSM ||
+ prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
btf_id = libbpf_find_attach_btf_id(prog);
if (btf_id <= 0)
return btf_id;
@@ -6185,6 +6199,7 @@ bool bpf_program__is_##NAME(const struct bpf_program *prog) \
} \
BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
+BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
@@ -6251,6 +6266,8 @@ static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
struct bpf_program *prog);
static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
struct bpf_program *prog);
+static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
+ struct bpf_program *prog);
struct bpf_sec_def {
const char *sec;
@@ -6290,6 +6307,10 @@ static const struct bpf_sec_def section_defs[] = {
.expected_attach_type = BPF_TRACE_FENTRY,
.is_attach_btf = true,
.attach_fn = attach_trace),
+ SEC_DEF("fmod_ret/", TRACING,
+ .expected_attach_type = BPF_MODIFY_RETURN,
+ .is_attach_btf = true,
+ .attach_fn = attach_trace),
SEC_DEF("fexit/", TRACING,
.expected_attach_type = BPF_TRACE_FEXIT,
.is_attach_btf = true,
@@ -6297,6 +6318,10 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("freplace/", EXT,
.is_attach_btf = true,
.attach_fn = attach_trace),
+ SEC_DEF("lsm/", LSM,
+ .is_attach_btf = true,
+ .expected_attach_type = BPF_LSM_MAC,
+ .attach_fn = attach_lsm),
BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
@@ -6559,6 +6584,7 @@ invalid_prog:
}
#define BTF_TRACE_PREFIX "btf_trace_"
+#define BTF_LSM_PREFIX "bpf_lsm_"
#define BTF_MAX_NAME_SIZE 128
static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
@@ -6586,9 +6612,15 @@ static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name,
if (attach_type == BPF_TRACE_RAW_TP)
err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
BTF_KIND_TYPEDEF);
+ else if (attach_type == BPF_LSM_MAC)
+ err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name,
+ BTF_KIND_FUNC);
else
err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
+ if (err <= 0)
+ pr_warn("%s is not found in vmlinux BTF\n", name);
+
return err;
}
@@ -6661,8 +6693,6 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog)
err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
name + section_defs[i].len,
attach_type);
- if (err <= 0)
- pr_warn("%s is not found in vmlinux BTF\n", name);
return err;
}
pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
@@ -6742,6 +6772,17 @@ void *bpf_map__priv(const struct bpf_map *map)
return map ? map->priv : ERR_PTR(-EINVAL);
}
+int bpf_map__set_initial_value(struct bpf_map *map,
+ const void *data, size_t size)
+{
+ if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
+ size != map->def.value_size || map->fd >= 0)
+ return -EINVAL;
+
+ memcpy(map->mmaped, data, size);
+ return 0;
+}
+
bool bpf_map__is_offload_neutral(const struct bpf_map *map)
{
return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
@@ -6932,9 +6973,17 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
struct bpf_link {
int (*detach)(struct bpf_link *link);
int (*destroy)(struct bpf_link *link);
+ char *pin_path; /* NULL, if not pinned */
+ int fd; /* hook FD, -1 if not applicable */
bool disconnected;
};
+/* Replace link's underlying BPF program with the new one */
+int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
+{
+ return bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
+}
+
/* Release "ownership" of underlying BPF resource (typically, BPF program
* attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
* link, when destructed through bpf_link__destroy() call won't attempt to
@@ -6961,26 +7010,109 @@ int bpf_link__destroy(struct bpf_link *link)
err = link->detach(link);
if (link->destroy)
link->destroy(link);
+ if (link->pin_path)
+ free(link->pin_path);
free(link);
return err;
}
-struct bpf_link_fd {
- struct bpf_link link; /* has to be at the top of struct */
- int fd; /* hook FD */
-};
+int bpf_link__fd(const struct bpf_link *link)
+{
+ return link->fd;
+}
+
+const char *bpf_link__pin_path(const struct bpf_link *link)
+{
+ return link->pin_path;
+}
+
+static int bpf_link__detach_fd(struct bpf_link *link)
+{
+ return close(link->fd);
+}
+
+struct bpf_link *bpf_link__open(const char *path)
+{
+ struct bpf_link *link;
+ int fd;
+
+ fd = bpf_obj_get(path);
+ if (fd < 0) {
+ fd = -errno;
+ pr_warn("failed to open link at %s: %d\n", path, fd);
+ return ERR_PTR(fd);
+ }
+
+ link = calloc(1, sizeof(*link));
+ if (!link) {
+ close(fd);
+ return ERR_PTR(-ENOMEM);
+ }
+ link->detach = &bpf_link__detach_fd;
+ link->fd = fd;
+
+ link->pin_path = strdup(path);
+ if (!link->pin_path) {
+ bpf_link__destroy(link);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return link;
+}
+
+int bpf_link__pin(struct bpf_link *link, const char *path)
+{
+ int err;
+
+ if (link->pin_path)
+ return -EBUSY;
+ err = make_parent_dir(path);
+ if (err)
+ return err;
+ err = check_path(path);
+ if (err)
+ return err;
+
+ link->pin_path = strdup(path);
+ if (!link->pin_path)
+ return -ENOMEM;
+
+ if (bpf_obj_pin(link->fd, link->pin_path)) {
+ err = -errno;
+ zfree(&link->pin_path);
+ return err;
+ }
+
+ pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
+ return 0;
+}
+
+int bpf_link__unpin(struct bpf_link *link)
+{
+ int err;
+
+ if (!link->pin_path)
+ return -EINVAL;
+
+ err = unlink(link->pin_path);
+ if (err != 0)
+ return -errno;
+
+ pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
+ zfree(&link->pin_path);
+ return 0;
+}
static int bpf_link__detach_perf_event(struct bpf_link *link)
{
- struct bpf_link_fd *l = (void *)link;
int err;
- err = ioctl(l->fd, PERF_EVENT_IOC_DISABLE, 0);
+ err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0);
if (err)
err = -errno;
- close(l->fd);
+ close(link->fd);
return err;
}
@@ -6988,7 +7120,7 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
int pfd)
{
char errmsg[STRERR_BUFSIZE];
- struct bpf_link_fd *link;
+ struct bpf_link *link;
int prog_fd, err;
if (pfd < 0) {
@@ -7006,7 +7138,7 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
link = calloc(1, sizeof(*link));
if (!link)
return ERR_PTR(-ENOMEM);
- link->link.detach = &bpf_link__detach_perf_event;
+ link->detach = &bpf_link__detach_perf_event;
link->fd = pfd;
if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
@@ -7025,7 +7157,7 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return ERR_PTR(err);
}
- return (struct bpf_link *)link;
+ return link;
}
/*
@@ -7313,18 +7445,11 @@ out:
return link;
}
-static int bpf_link__detach_fd(struct bpf_link *link)
-{
- struct bpf_link_fd *l = (void *)link;
-
- return close(l->fd);
-}
-
struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
const char *tp_name)
{
char errmsg[STRERR_BUFSIZE];
- struct bpf_link_fd *link;
+ struct bpf_link *link;
int prog_fd, pfd;
prog_fd = bpf_program__fd(prog);
@@ -7337,7 +7462,7 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
link = calloc(1, sizeof(*link));
if (!link)
return ERR_PTR(-ENOMEM);
- link->link.detach = &bpf_link__detach_fd;
+ link->detach = &bpf_link__detach_fd;
pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
if (pfd < 0) {
@@ -7349,7 +7474,7 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
return ERR_PTR(pfd);
}
link->fd = pfd;
- return (struct bpf_link *)link;
+ return link;
}
static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
@@ -7360,10 +7485,11 @@ static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
return bpf_program__attach_raw_tracepoint(prog, tp_name);
}
-struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
+/* Common logic for all BPF program types that attach to a btf_id */
+static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
{
char errmsg[STRERR_BUFSIZE];
- struct bpf_link_fd *link;
+ struct bpf_link *link;
int prog_fd, pfd;
prog_fd = bpf_program__fd(prog);
@@ -7376,13 +7502,13 @@ struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
link = calloc(1, sizeof(*link));
if (!link)
return ERR_PTR(-ENOMEM);
- link->link.detach = &bpf_link__detach_fd;
+ link->detach = &bpf_link__detach_fd;
pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
if (pfd < 0) {
pfd = -errno;
free(link);
- pr_warn("program '%s': failed to attach to trace: %s\n",
+ pr_warn("program '%s': failed to attach: %s\n",
bpf_program__title(prog, false),
libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
return ERR_PTR(pfd);
@@ -7391,12 +7517,68 @@ struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
return (struct bpf_link *)link;
}
+struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
+{
+ return bpf_program__attach_btf_id(prog);
+}
+
+struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog)
+{
+ return bpf_program__attach_btf_id(prog);
+}
+
static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
struct bpf_program *prog)
{
return bpf_program__attach_trace(prog);
}
+static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
+ struct bpf_program *prog)
+{
+ return bpf_program__attach_lsm(prog);
+}
+
+struct bpf_link *
+bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
+{
+ const struct bpf_sec_def *sec_def;
+ enum bpf_attach_type attach_type;
+ char errmsg[STRERR_BUFSIZE];
+ struct bpf_link *link;
+ int prog_fd, link_fd;
+
+ prog_fd = bpf_program__fd(prog);
+ if (prog_fd < 0) {
+ pr_warn("program '%s': can't attach before loaded\n",
+ bpf_program__title(prog, false));
+ return ERR_PTR(-EINVAL);
+ }
+
+ link = calloc(1, sizeof(*link));
+ if (!link)
+ return ERR_PTR(-ENOMEM);
+ link->detach = &bpf_link__detach_fd;
+
+ attach_type = bpf_program__get_expected_attach_type(prog);
+ if (!attach_type) {
+ sec_def = find_sec_def(bpf_program__title(prog, false));
+ if (sec_def)
+ attach_type = sec_def->attach_type;
+ }
+ link_fd = bpf_link_create(prog_fd, cgroup_fd, attach_type, NULL);
+ if (link_fd < 0) {
+ link_fd = -errno;
+ free(link);
+ pr_warn("program '%s': failed to attach to cgroup: %s\n",
+ bpf_program__title(prog, false),
+ libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
+ return ERR_PTR(link_fd);
+ }
+ link->fd = link_fd;
+ return link;
+}
+
struct bpf_link *bpf_program__attach(struct bpf_program *prog)
{
const struct bpf_sec_def *sec_def;
@@ -7410,10 +7592,9 @@ struct bpf_link *bpf_program__attach(struct bpf_program *prog)
static int bpf_link__detach_struct_ops(struct bpf_link *link)
{
- struct bpf_link_fd *l = (void *)link;
__u32 zero = 0;
- if (bpf_map_delete_elem(l->fd, &zero))
+ if (bpf_map_delete_elem(link->fd, &zero))
return -errno;
return 0;
@@ -7422,7 +7603,7 @@ static int bpf_link__detach_struct_ops(struct bpf_link *link)
struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
{
struct bpf_struct_ops *st_ops;
- struct bpf_link_fd *link;
+ struct bpf_link *link;
__u32 i, zero = 0;
int err;
@@ -7454,10 +7635,10 @@ struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
return ERR_PTR(err);
}
- link->link.detach = bpf_link__detach_struct_ops;
+ link->detach = bpf_link__detach_struct_ops;
link->fd = map->fd;
- return (struct bpf_link *)link;
+ return link;
}
enum bpf_perf_event_ret
@@ -8138,6 +8319,31 @@ void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
}
}
+int bpf_program__set_attach_target(struct bpf_program *prog,
+ int attach_prog_fd,
+ const char *attach_func_name)
+{
+ int btf_id;
+
+ if (!prog || attach_prog_fd < 0 || !attach_func_name)
+ return -EINVAL;
+
+ if (attach_prog_fd)
+ btf_id = libbpf_find_prog_btf_id(attach_func_name,
+ attach_prog_fd);
+ else
+ btf_id = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
+ attach_func_name,
+ prog->expected_attach_type);
+
+ if (btf_id < 0)
+ return btf_id;
+
+ prog->attach_btf_id = btf_id;
+ prog->attach_prog_fd = attach_prog_fd;
+ return 0;
+}
+
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
{
int err = 0, n, len, start, end = -1;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 3fe12c9d1f92..44df1d3e7287 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -219,6 +219,13 @@ LIBBPF_API void bpf_program__unload(struct bpf_program *prog);
struct bpf_link;
+LIBBPF_API struct bpf_link *bpf_link__open(const char *path);
+LIBBPF_API int bpf_link__fd(const struct bpf_link *link);
+LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link);
+LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path);
+LIBBPF_API int bpf_link__unpin(struct bpf_link *link);
+LIBBPF_API int bpf_link__update_program(struct bpf_link *link,
+ struct bpf_program *prog);
LIBBPF_API void bpf_link__disconnect(struct bpf_link *link);
LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
@@ -240,11 +247,17 @@ bpf_program__attach_tracepoint(struct bpf_program *prog,
LIBBPF_API struct bpf_link *
bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
const char *tp_name);
-
LIBBPF_API struct bpf_link *
bpf_program__attach_trace(struct bpf_program *prog);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_lsm(struct bpf_program *prog);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd);
+
struct bpf_map;
+
LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map);
+
struct bpf_insn;
/*
@@ -316,6 +329,7 @@ LIBBPF_API int bpf_program__set_socket_filter(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_tracepoint(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_raw_tracepoint(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_kprobe(struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_lsm(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog);
@@ -334,10 +348,15 @@ LIBBPF_API void
bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type);
+LIBBPF_API int
+bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
+ const char *attach_func_name);
+
LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_kprobe(const struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_lsm(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog);
@@ -398,6 +417,8 @@ typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
bpf_map_clear_priv_t clear_priv);
LIBBPF_API void *bpf_map__priv(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
+ const void *data, size_t size);
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
@@ -435,7 +456,15 @@ struct xdp_link_info {
__u8 attach_mode;
};
+struct bpf_xdp_set_link_opts {
+ size_t sz;
+ __u32 old_fd;
+};
+#define bpf_xdp_set_link_opts__last_field old_fd
+
LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
+LIBBPF_API int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
+ const struct bpf_xdp_set_link_opts *opts);
LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags);
LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
size_t info_size, __u32 flags);
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index b035122142bb..bb8831605b25 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -235,3 +235,22 @@ LIBBPF_0.0.7 {
btf__align_of;
libbpf_find_kernel_btf;
} LIBBPF_0.0.6;
+
+LIBBPF_0.0.8 {
+ global:
+ bpf_link__fd;
+ bpf_link__open;
+ bpf_link__pin;
+ bpf_link__pin_path;
+ bpf_link__unpin;
+ bpf_link__update_program;
+ bpf_link_create;
+ bpf_link_update;
+ bpf_map__set_initial_value;
+ bpf_program__attach_cgroup;
+ bpf_program__attach_lsm;
+ bpf_program__is_lsm;
+ bpf_program__set_attach_target;
+ bpf_program__set_lsm;
+ bpf_set_link_xdp_fd_opts;
+} LIBBPF_0.0.7;
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index b782ebef6ac9..2c92059c0c90 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -108,6 +108,7 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
case BPF_PROG_TYPE_TRACING:
case BPF_PROG_TYPE_STRUCT_OPS:
case BPF_PROG_TYPE_EXT:
+ case BPF_PROG_TYPE_LSM:
default:
break;
}
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index 431bd25c6cdb..18b5319025e1 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -132,7 +132,8 @@ done:
return ret;
}
-int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
+static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd,
+ __u32 flags)
{
int sock, seq = 0, ret;
struct nlattr *nla, *nla_xdp;
@@ -178,6 +179,14 @@ int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
nla->nla_len += nla_xdp->nla_len;
}
+ if (flags & XDP_FLAGS_REPLACE) {
+ nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
+ nla_xdp->nla_type = IFLA_XDP_EXPECTED_FD;
+ nla_xdp->nla_len = NLA_HDRLEN + sizeof(old_fd);
+ memcpy((char *)nla_xdp + NLA_HDRLEN, &old_fd, sizeof(old_fd));
+ nla->nla_len += nla_xdp->nla_len;
+ }
+
req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len);
if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
@@ -191,6 +200,29 @@ cleanup:
return ret;
}
+int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
+ const struct bpf_xdp_set_link_opts *opts)
+{
+ int old_fd = -1;
+
+ if (!OPTS_VALID(opts, bpf_xdp_set_link_opts))
+ return -EINVAL;
+
+ if (OPTS_HAS(opts, old_fd)) {
+ old_fd = OPTS_GET(opts, old_fd, -1);
+ flags |= XDP_FLAGS_REPLACE;
+ }
+
+ return __bpf_set_link_xdp_fd_replace(ifindex, fd,
+ old_fd,
+ flags);
+}
+
+int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
+{
+ return __bpf_set_link_xdp_fd_replace(ifindex, fd, 0, flags);
+}
+
static int __dump_link_nlmsg(struct nlmsghdr *nlh,
libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie)
{
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 9807903f121e..f7f4efb70a4c 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -280,7 +280,11 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
fill->consumer = map + off.fr.consumer;
fill->flags = map + off.fr.flags;
fill->ring = map + off.fr.desc;
- fill->cached_cons = umem->config.fill_size;
+ fill->cached_prod = *fill->producer;
+ /* cached_cons is "size" bigger than the real consumer pointer
+ * See xsk_prod_nb_free
+ */
+ fill->cached_cons = *fill->consumer + umem->config.fill_size;
map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
@@ -297,6 +301,8 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
comp->consumer = map + off.cr.consumer;
comp->flags = map + off.cr.flags;
comp->ring = map + off.cr.desc;
+ comp->cached_prod = *comp->producer;
+ comp->cached_cons = *comp->consumer;
*umem_ptr = umem;
return 0;
@@ -672,6 +678,8 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
rx->consumer = rx_map + off.rx.consumer;
rx->flags = rx_map + off.rx.flags;
rx->ring = rx_map + off.rx.desc;
+ rx->cached_prod = *rx->producer;
+ rx->cached_cons = *rx->consumer;
}
xsk->rx = rx;
@@ -691,7 +699,11 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
tx->consumer = tx_map + off.tx.consumer;
tx->flags = tx_map + off.tx.flags;
tx->ring = tx_map + off.tx.desc;
- tx->cached_cons = xsk->config.tx_size;
+ tx->cached_prod = *tx->producer;
+ /* cached_cons is r->size bigger than the real consumer pointer
+ * See xsk_prod_nb_free
+ */
+ tx->cached_cons = *tx->consumer + xsk->config.tx_size;
}
xsk->tx = tx;
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 6d2f3a1b2249..a7974638561c 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -106,6 +106,7 @@ ifneq ($(silent),1)
ifneq ($(V),1)
QUIET_CC = @echo ' CC '$@;
QUIET_CC_FPIC = @echo ' CC FPIC '$@;
+ QUIET_CLANG = @echo ' CLANG '$@;
QUIET_AR = @echo ' AR '$@;
QUIET_LINK = @echo ' LINK '$@;
QUIET_MKDIR = @echo ' MKDIR '$@;
diff --git a/tools/testing/selftests/.gitignore b/tools/testing/selftests/.gitignore
index 61df01cdf0b2..304fdf1a21dc 100644
--- a/tools/testing/selftests/.gitignore
+++ b/tools/testing/selftests/.gitignore
@@ -3,4 +3,7 @@ gpiogpio-hammer
gpioinclude/
gpiolsgpio
tpm2/SpaceTest.log
-tpm2/*.pyc
+
+# Python bytecode and cache
+__pycache__/
+*.py[cod]
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index b93fa645ee54..077818d0197f 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -36,7 +36,6 @@ TARGETS += net
TARGETS += net/forwarding
TARGETS += net/mptcp
TARGETS += netfilter
-TARGETS += networking/timestamping
TARGETS += nsfs
TARGETS += pidfd
TARGETS += powerpc
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index ec464859c6b6..2198cd876675 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -31,6 +31,7 @@ test_tcp_check_syncookie_user
test_sysctl
test_hashmap
test_btf_dump
+test_current_pid_tgid_new_ns
xdping
test_cpp
*.skel.h
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 257a1aaaa37d..7729892e0b04 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -20,8 +20,9 @@ CLANG ?= clang
LLC ?= llc
LLVM_OBJCOPY ?= llvm-objcopy
BPF_GCC ?= $(shell command -v bpf-gcc;)
-CFLAGS += -g -Wall -O2 $(GENFLAGS) -I$(CURDIR) -I$(APIDIR) \
+CFLAGS += -g -rdynamic -Wall -O2 $(GENFLAGS) -I$(CURDIR) \
-I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) -I$(TOOLSINCDIR) \
+ -I$(APIDIR) \
-Dbpf_prog_load=bpf_prog_test_load \
-Dbpf_load_program=bpf_test_load_program
LDLIBS += -lcap -lelf -lz -lrt -lpthread
@@ -32,7 +33,8 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
test_cgroup_storage \
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \
- test_progs-no_alu32
+ test_progs-no_alu32 \
+ test_current_pid_tgid_new_ns
# Also test bpf-gcc, if present
ifneq ($(BPF_GCC),)
@@ -62,7 +64,8 @@ TEST_PROGS := test_kmod.sh \
test_tc_tunnel.sh \
test_tc_edt.sh \
test_xdping.sh \
- test_bpftool_build.sh
+ test_bpftool_build.sh \
+ test_bpftool.sh
TEST_PROGS_EXTENDED := with_addr.sh \
with_tunnels.sh \
@@ -128,10 +131,13 @@ $(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ)
$(call msg,CC,,$@)
$(CC) -c $(CFLAGS) -o $@ $<
-VMLINUX_BTF_PATHS := $(abspath ../../../../vmlinux) \
- /sys/kernel/btf/vmlinux \
- /boot/vmlinux-$(shell uname -r)
-VMLINUX_BTF:= $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))
+VMLINUX_BTF_PATHS := $(if $(O),$(O)/vmlinux) \
+ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
+ ../../../../vmlinux \
+ /sys/kernel/btf/vmlinux \
+ /boot/vmlinux-$(shell uname -r)
+VMLINUX_BTF := $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+
$(OUTPUT)/runqslower: $(BPFOBJ)
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \
OUTPUT=$(SCRATCH_DIR)/ VMLINUX_BTF=$(VMLINUX_BTF) \
@@ -171,6 +177,10 @@ $(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(INCLUDE_DIR):
$(call msg,MKDIR,,$@)
mkdir -p $@
+$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
+ $(call msg,GEN,,$@)
+ $(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
+
# Get Clang's default includes on this system, as opposed to those seen by
# '-target bpf'. This fixes "missing" files on some architectures/distros,
# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc.
@@ -189,8 +199,8 @@ MENDIAN=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian)
CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG))
BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
- -I$(INCLUDE_DIR) -I$(CURDIR) -I$(CURDIR)/include/uapi \
- -I$(APIDIR) -I$(abspath $(OUTPUT)/../usr/include)
+ -I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \
+ -I$(abspath $(OUTPUT)/../usr/include)
CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \
-Wno-compare-distinct-pointer-types
@@ -209,7 +219,7 @@ define CLANG_BPF_BUILD_RULE
$(call msg,CLNG-LLC,$(TRUNNER_BINARY),$2)
($(CLANG) $3 -O2 -target bpf -emit-llvm \
-c $1 -o - || echo "BPF obj compilation failed") | \
- $(LLC) -mattr=dwarfris -march=bpf -mcpu=probe $4 -filetype=obj -o $2
+ $(LLC) -mattr=dwarfris -march=bpf -mcpu=v3 $4 -filetype=obj -o $2
endef
# Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32
define CLANG_NOALU32_BPF_BUILD_RULE
@@ -223,7 +233,7 @@ define CLANG_NATIVE_BPF_BUILD_RULE
$(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
($(CLANG) $3 -O2 -emit-llvm \
-c $1 -o - || echo "BPF obj compilation failed") | \
- $(LLC) -march=bpf -mcpu=probe $4 -filetype=obj -o $2
+ $(LLC) -march=bpf -mcpu=v3 $4 -filetype=obj -o $2
endef
# Build BPF object using GCC
define GCC_BPF_BUILD_RULE
@@ -279,6 +289,7 @@ $(TRUNNER_BPF_PROGS_DIR)$(if $2,-)$2-bpfobjs := y
$(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
$(TRUNNER_BPF_PROGS_DIR)/%.c \
$(TRUNNER_BPF_PROGS_DIR)/*.h \
+ $$(INCLUDE_DIR)/vmlinux.h \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
$$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
$(TRUNNER_BPF_CFLAGS), \
diff --git a/tools/testing/selftests/bpf/bpf_tcp_helpers.h b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
index 8f21965ffc6c..5bf2fe9b1efa 100644
--- a/tools/testing/selftests/bpf/bpf_tcp_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
@@ -6,7 +6,7 @@
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
#define BPF_STRUCT_OPS(name, args...) \
SEC("struct_ops/"#name) \
diff --git a/tools/testing/selftests/bpf/bpf_trace_helpers.h b/tools/testing/selftests/bpf/bpf_trace_helpers.h
deleted file mode 100644
index c6f1354d93fb..000000000000
--- a/tools/testing/selftests/bpf/bpf_trace_helpers.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
-#ifndef __BPF_TRACE_HELPERS_H
-#define __BPF_TRACE_HELPERS_H
-
-#include <bpf/bpf_helpers.h>
-
-#define ___bpf_concat(a, b) a ## b
-#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
-#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
-#define ___bpf_narg(...) \
- ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
-#define ___bpf_empty(...) \
- ___bpf_nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0)
-
-#define ___bpf_ctx_cast0() ctx
-#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
-#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
-#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
-#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
-#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
-#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
-#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
-#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
-#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
-#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9]
-#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10]
-#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11]
-#define ___bpf_ctx_cast(args...) \
- ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
-
-/*
- * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
- * similar kinds of BPF programs, that accept input arguments as a single
- * pointer to untyped u64 array, where each u64 can actually be a typed
- * pointer or integer of different size. Instead of requring user to write
- * manual casts and work with array elements by index, BPF_PROG macro
- * allows user to declare a list of named and typed input arguments in the
- * same syntax as for normal C function. All the casting is hidden and
- * performed transparently, while user code can just assume working with
- * function arguments of specified type and name.
- *
- * Original raw context argument is preserved as well as 'ctx' argument.
- * This is useful when using BPF helpers that expect original context
- * as one of the parameters (e.g., for bpf_perf_event_output()).
- */
-#define BPF_PROG(name, args...) \
-name(unsigned long long *ctx); \
-static __always_inline typeof(name(0)) \
-____##name(unsigned long long *ctx, ##args); \
-typeof(name(0)) name(unsigned long long *ctx) \
-{ \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
- return ____##name(___bpf_ctx_cast(args)); \
- _Pragma("GCC diagnostic pop") \
-} \
-static __always_inline typeof(name(0)) \
-____##name(unsigned long long *ctx, ##args)
-
-struct pt_regs;
-
-#define ___bpf_kprobe_args0() ctx
-#define ___bpf_kprobe_args1(x) \
- ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
-#define ___bpf_kprobe_args2(x, args...) \
- ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
-#define ___bpf_kprobe_args3(x, args...) \
- ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
-#define ___bpf_kprobe_args4(x, args...) \
- ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
-#define ___bpf_kprobe_args5(x, args...) \
- ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
-#define ___bpf_kprobe_args(args...) \
- ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
-
-/*
- * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for
- * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
- * low-level way of getting kprobe input arguments from struct pt_regs, and
- * provides a familiar typed and named function arguments syntax and
- * semantics of accessing kprobe input paremeters.
- *
- * Original struct pt_regs* context is preserved as 'ctx' argument. This might
- * be necessary when using BPF helpers like bpf_perf_event_output().
- */
-#define BPF_KPROBE(name, args...) \
-name(struct pt_regs *ctx); \
-static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args);\
-typeof(name(0)) name(struct pt_regs *ctx) \
-{ \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
- return ____##name(___bpf_kprobe_args(args)); \
- _Pragma("GCC diagnostic pop") \
-} \
-static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
-
-#define ___bpf_kretprobe_args0() ctx
-#define ___bpf_kretprobe_argsN(x, args...) \
- ___bpf_kprobe_args(args), (void *)PT_REGS_RET(ctx)
-#define ___bpf_kretprobe_args(args...) \
- ___bpf_apply(___bpf_kretprobe_args, ___bpf_empty(args))(args)
-
-/*
- * BPF_KRETPROBE is similar to BPF_KPROBE, except, in addition to listing all
- * input kprobe arguments, one last extra argument has to be specified, which
- * captures kprobe return value.
- */
-#define BPF_KRETPROBE(name, args...) \
-name(struct pt_regs *ctx); \
-static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args);\
-typeof(name(0)) name(struct pt_regs *ctx) \
-{ \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
- return ____##name(___bpf_kretprobe_args(args)); \
- _Pragma("GCC diagnostic pop") \
-} \
-static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
-#endif
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 5dc109f4c097..60e3ae5d4e48 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -35,3 +35,5 @@ CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
CONFIG_IPV6_SIT=m
CONFIG_BPF_JIT=y
+CONFIG_BPF_LSM=y
+CONFIG_SECURITY=y
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
index 8482bbc67eec..9a8f47fc0b91 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
@@ -11,6 +11,7 @@
static const unsigned int total_bytes = 10 * 1024 * 1024;
static const struct timeval timeo_sec = { .tv_sec = 10 };
static const size_t timeo_optlen = sizeof(timeo_sec);
+static int expected_stg = 0xeB9F;
static int stop, duration;
static int settimeo(int fd)
@@ -88,7 +89,7 @@ done:
return NULL;
}
-static void do_test(const char *tcp_ca)
+static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
{
struct sockaddr_in6 sa6 = {};
ssize_t nr_recv = 0, bytes = 0;
@@ -126,14 +127,34 @@ static void do_test(const char *tcp_ca)
err = listen(lfd, 1);
if (CHECK(err == -1, "listen", "errno:%d\n", errno))
goto done;
- err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd);
- if (CHECK(err != 0, "pthread_create", "err:%d\n", err))
- goto done;
+
+ if (sk_stg_map) {
+ err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd,
+ &expected_stg, BPF_NOEXIST);
+ if (CHECK(err, "bpf_map_update_elem(sk_stg_map)",
+ "err:%d errno:%d\n", err, errno))
+ goto done;
+ }
/* connect to server */
err = connect(fd, (struct sockaddr *)&sa6, addrlen);
if (CHECK(err == -1, "connect", "errno:%d\n", errno))
- goto wait_thread;
+ goto done;
+
+ if (sk_stg_map) {
+ int tmp_stg;
+
+ err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd,
+ &tmp_stg);
+ if (CHECK(!err || errno != ENOENT,
+ "bpf_map_lookup_elem(sk_stg_map)",
+ "err:%d errno:%d\n", err, errno))
+ goto done;
+ }
+
+ err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd);
+ if (CHECK(err != 0, "pthread_create", "err:%d errno:%d\n", err, errno))
+ goto done;
/* recv total_bytes */
while (bytes < total_bytes && !READ_ONCE(stop)) {
@@ -149,7 +170,6 @@ static void do_test(const char *tcp_ca)
CHECK(bytes != total_bytes, "recv", "%zd != %u nr_recv:%zd errno:%d\n",
bytes, total_bytes, nr_recv, errno);
-wait_thread:
WRITE_ONCE(stop, 1);
pthread_join(srv_thread, &thread_ret);
CHECK(IS_ERR(thread_ret), "pthread_join", "thread_ret:%ld",
@@ -175,7 +195,7 @@ static void test_cubic(void)
return;
}
- do_test("bpf_cubic");
+ do_test("bpf_cubic", NULL);
bpf_link__destroy(link);
bpf_cubic__destroy(cubic_skel);
@@ -197,7 +217,10 @@ static void test_dctcp(void)
return;
}
- do_test("bpf_dctcp");
+ do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map);
+ CHECK(dctcp_skel->bss->stg_result != expected_stg,
+ "Unexpected stg_result", "stg_result (%x) != expected_stg (%x)\n",
+ dctcp_skel->bss->stg_result, expected_stg);
bpf_link__destroy(link);
bpf_dctcp__destroy(dctcp_skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
index 7390d3061065..cb33a7ee4e04 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
@@ -125,6 +125,6 @@ void test_btf_dump() {
if (!test__start_subtest(t->name))
continue;
- test_btf_dump_case(i, &btf_dump_test_cases[i]);
+ test_btf_dump_case(i, &btf_dump_test_cases[i]);
}
}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
index 5b13f2c6c402..70e94e783070 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c
@@ -6,7 +6,7 @@
#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
-char bpf_log_buf[BPF_LOG_BUF_SIZE];
+static char bpf_log_buf[BPF_LOG_BUF_SIZE];
static int prog_load(void)
{
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
index 2ff21dbce179..139f8e82c7c6 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
@@ -6,7 +6,7 @@
#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
-char bpf_log_buf[BPF_LOG_BUF_SIZE];
+static char bpf_log_buf[BPF_LOG_BUF_SIZE];
static int map_fd = -1;
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
index 9d8cb48b99de..9e96f8d87fea 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c
@@ -8,7 +8,7 @@
#define BAR "/foo/bar/"
#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
-char bpf_log_buf[BPF_LOG_BUF_SIZE];
+static char bpf_log_buf[BPF_LOG_BUF_SIZE];
static int prog_load(int verdict)
{
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c
new file mode 100644
index 000000000000..6e04f8d1d15b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+#include "test_cgroup_link.skel.h"
+
+static __u32 duration = 0;
+#define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null"
+
+static struct test_cgroup_link *skel = NULL;
+
+int ping_and_check(int exp_calls, int exp_alt_calls)
+{
+ skel->bss->calls = 0;
+ skel->bss->alt_calls = 0;
+ CHECK_FAIL(system(PING_CMD));
+ if (CHECK(skel->bss->calls != exp_calls, "call_cnt",
+ "exp %d, got %d\n", exp_calls, skel->bss->calls))
+ return -EINVAL;
+ if (CHECK(skel->bss->alt_calls != exp_alt_calls, "alt_call_cnt",
+ "exp %d, got %d\n", exp_alt_calls, skel->bss->alt_calls))
+ return -EINVAL;
+ return 0;
+}
+
+void test_cgroup_link(void)
+{
+ struct {
+ const char *path;
+ int fd;
+ } cgs[] = {
+ { "/cg1" },
+ { "/cg1/cg2" },
+ { "/cg1/cg2/cg3" },
+ { "/cg1/cg2/cg3/cg4" },
+ };
+ int last_cg = ARRAY_SIZE(cgs) - 1, cg_nr = ARRAY_SIZE(cgs);
+ DECLARE_LIBBPF_OPTS(bpf_link_update_opts, link_upd_opts);
+ struct bpf_link *links[ARRAY_SIZE(cgs)] = {}, *tmp_link;
+ __u32 prog_ids[ARRAY_SIZE(cgs)], prog_cnt = 0, attach_flags;
+ int i = 0, err, prog_fd;
+ bool detach_legacy = false;
+
+ skel = test_cgroup_link__open_and_load();
+ if (CHECK(!skel, "skel_open_load", "failed to open/load skeleton\n"))
+ return;
+ prog_fd = bpf_program__fd(skel->progs.egress);
+
+ err = setup_cgroup_environment();
+ if (CHECK(err, "cg_init", "failed: %d\n", err))
+ goto cleanup;
+
+ for (i = 0; i < cg_nr; i++) {
+ cgs[i].fd = create_and_get_cgroup(cgs[i].path);
+ if (CHECK(cgs[i].fd < 0, "cg_create", "fail: %d\n", cgs[i].fd))
+ goto cleanup;
+ }
+
+ err = join_cgroup(cgs[last_cg].path);
+ if (CHECK(err, "cg_join", "fail: %d\n", err))
+ goto cleanup;
+
+ for (i = 0; i < cg_nr; i++) {
+ links[i] = bpf_program__attach_cgroup(skel->progs.egress,
+ cgs[i].fd);
+ if (CHECK(IS_ERR(links[i]), "cg_attach", "i: %d, err: %ld\n",
+ i, PTR_ERR(links[i])))
+ goto cleanup;
+ }
+
+ ping_and_check(cg_nr, 0);
+
+ /* query the number of effective progs and attach flags in root cg */
+ err = bpf_prog_query(cgs[0].fd, BPF_CGROUP_INET_EGRESS,
+ BPF_F_QUERY_EFFECTIVE, &attach_flags, NULL,
+ &prog_cnt);
+ CHECK_FAIL(err);
+ CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
+ if (CHECK(prog_cnt != 1, "effect_cnt", "exp %d, got %d\n", 1, prog_cnt))
+ goto cleanup;
+
+ /* query the number of effective progs in last cg */
+ err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
+ BPF_F_QUERY_EFFECTIVE, NULL, NULL,
+ &prog_cnt);
+ CHECK_FAIL(err);
+ CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
+ if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
+ cg_nr, prog_cnt))
+ goto cleanup;
+
+ /* query the effective prog IDs in last cg */
+ err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
+ BPF_F_QUERY_EFFECTIVE, &attach_flags,
+ prog_ids, &prog_cnt);
+ CHECK_FAIL(err);
+ CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
+ if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
+ cg_nr, prog_cnt))
+ goto cleanup;
+ for (i = 1; i < prog_cnt; i++) {
+ CHECK(prog_ids[i - 1] != prog_ids[i], "prog_id_check",
+ "idx %d, prev id %d, cur id %d\n",
+ i, prog_ids[i - 1], prog_ids[i]);
+ }
+
+ /* detach bottom program and ping again */
+ bpf_link__destroy(links[last_cg]);
+ links[last_cg] = NULL;
+
+ ping_and_check(cg_nr - 1, 0);
+
+ /* mix in with non link-based multi-attachments */
+ err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
+ BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_MULTI);
+ if (CHECK(err, "cg_attach_legacy", "errno=%d\n", errno))
+ goto cleanup;
+ detach_legacy = true;
+
+ links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
+ cgs[last_cg].fd);
+ if (CHECK(IS_ERR(links[last_cg]), "cg_attach", "err: %ld\n",
+ PTR_ERR(links[last_cg])))
+ goto cleanup;
+
+ ping_and_check(cg_nr + 1, 0);
+
+ /* detach link */
+ bpf_link__destroy(links[last_cg]);
+ links[last_cg] = NULL;
+
+ /* detach legacy */
+ err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
+ if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno))
+ goto cleanup;
+ detach_legacy = false;
+
+ /* attach legacy exclusive prog attachment */
+ err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
+ BPF_CGROUP_INET_EGRESS, 0);
+ if (CHECK(err, "cg_attach_exclusive", "errno=%d\n", errno))
+ goto cleanup;
+ detach_legacy = true;
+
+ /* attempt to mix in with multi-attach bpf_link */
+ tmp_link = bpf_program__attach_cgroup(skel->progs.egress,
+ cgs[last_cg].fd);
+ if (CHECK(!IS_ERR(tmp_link), "cg_attach_fail", "unexpected success!\n")) {
+ bpf_link__destroy(tmp_link);
+ goto cleanup;
+ }
+
+ ping_and_check(cg_nr, 0);
+
+ /* detach */
+ err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
+ if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno))
+ goto cleanup;
+ detach_legacy = false;
+
+ ping_and_check(cg_nr - 1, 0);
+
+ /* attach back link-based one */
+ links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
+ cgs[last_cg].fd);
+ if (CHECK(IS_ERR(links[last_cg]), "cg_attach", "err: %ld\n",
+ PTR_ERR(links[last_cg])))
+ goto cleanup;
+
+ ping_and_check(cg_nr, 0);
+
+ /* check legacy exclusive prog can't be attached */
+ err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
+ BPF_CGROUP_INET_EGRESS, 0);
+ if (CHECK(!err, "cg_attach_exclusive", "unexpected success")) {
+ bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
+ goto cleanup;
+ }
+
+ /* replace BPF programs inside their links for all but first link */
+ for (i = 1; i < cg_nr; i++) {
+ err = bpf_link__update_program(links[i], skel->progs.egress_alt);
+ if (CHECK(err, "prog_upd", "link #%d\n", i))
+ goto cleanup;
+ }
+
+ ping_and_check(1, cg_nr - 1);
+
+ /* Attempt program update with wrong expected BPF program */
+ link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress_alt);
+ link_upd_opts.flags = BPF_F_REPLACE;
+ err = bpf_link_update(bpf_link__fd(links[0]),
+ bpf_program__fd(skel->progs.egress_alt),
+ &link_upd_opts);
+ if (CHECK(err == 0 || errno != EPERM, "prog_cmpxchg1",
+ "unexpectedly succeeded, err %d, errno %d\n", err, -errno))
+ goto cleanup;
+
+ /* Compare-exchange single link program from egress to egress_alt */
+ link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress);
+ link_upd_opts.flags = BPF_F_REPLACE;
+ err = bpf_link_update(bpf_link__fd(links[0]),
+ bpf_program__fd(skel->progs.egress_alt),
+ &link_upd_opts);
+ if (CHECK(err, "prog_cmpxchg2", "errno %d\n", -errno))
+ goto cleanup;
+
+ /* ping */
+ ping_and_check(0, cg_nr);
+
+ /* close cgroup FDs before detaching links */
+ for (i = 0; i < cg_nr; i++) {
+ if (cgs[i].fd > 0) {
+ close(cgs[i].fd);
+ cgs[i].fd = -1;
+ }
+ }
+
+ /* BPF programs should still get called */
+ ping_and_check(0, cg_nr);
+
+ /* leave cgroup and remove them, don't detach programs */
+ cleanup_cgroup_environment();
+
+ /* BPF programs should have been auto-detached */
+ ping_and_check(0, 0);
+
+cleanup:
+ if (detach_legacy)
+ bpf_prog_detach2(prog_fd, cgs[last_cg].fd,
+ BPF_CGROUP_INET_EGRESS);
+
+ for (i = 0; i < cg_nr; i++) {
+ if (!IS_ERR(links[i]))
+ bpf_link__destroy(links[i]);
+ }
+ test_cgroup_link__destroy(skel);
+
+ for (i = 0; i < cg_nr; i++) {
+ if (cgs[i].fd > 0)
+ close(cgs[i].fd);
+ }
+ cleanup_cgroup_environment();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
index 235ac4f67f5b..83493bd5745c 100644
--- a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
@@ -1,22 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
-#include "test_pkt_access.skel.h"
#include "fentry_test.skel.h"
#include "fexit_test.skel.h"
void test_fentry_fexit(void)
{
- struct test_pkt_access *pkt_skel = NULL;
struct fentry_test *fentry_skel = NULL;
struct fexit_test *fexit_skel = NULL;
__u64 *fentry_res, *fexit_res;
__u32 duration = 0, retval;
- int err, pkt_fd, i;
+ int err, prog_fd, i;
- pkt_skel = test_pkt_access__open_and_load();
- if (CHECK(!pkt_skel, "pkt_skel_load", "pkt_access skeleton failed\n"))
- return;
fentry_skel = fentry_test__open_and_load();
if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n"))
goto close_prog;
@@ -31,8 +26,8 @@ void test_fentry_fexit(void)
if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
goto close_prog;
- pkt_fd = bpf_program__fd(pkt_skel->progs.test_pkt_access);
- err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ prog_fd = bpf_program__fd(fexit_skel->progs.test1);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
CHECK(err || retval, "ipv6",
"err %d errno %d retval %d duration %d\n",
@@ -49,7 +44,6 @@ void test_fentry_fexit(void)
}
close_prog:
- test_pkt_access__destroy(pkt_skel);
fentry_test__destroy(fentry_skel);
fexit_test__destroy(fexit_skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
index 5cc06021f27d..04ebbf1cb390 100644
--- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c
@@ -1,20 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
-#include "test_pkt_access.skel.h"
#include "fentry_test.skel.h"
void test_fentry_test(void)
{
- struct test_pkt_access *pkt_skel = NULL;
struct fentry_test *fentry_skel = NULL;
- int err, pkt_fd, i;
+ int err, prog_fd, i;
__u32 duration = 0, retval;
__u64 *result;
- pkt_skel = test_pkt_access__open_and_load();
- if (CHECK(!pkt_skel, "pkt_skel_load", "pkt_access skeleton failed\n"))
- return;
fentry_skel = fentry_test__open_and_load();
if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n"))
goto cleanup;
@@ -23,10 +18,10 @@ void test_fentry_test(void)
if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err))
goto cleanup;
- pkt_fd = bpf_program__fd(pkt_skel->progs.test_pkt_access);
- err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ prog_fd = bpf_program__fd(fentry_skel->progs.test1);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
- CHECK(err || retval, "ipv6",
+ CHECK(err || retval, "test_run",
"err %d errno %d retval %d duration %d\n",
err, errno, retval, duration);
@@ -39,5 +34,4 @@ void test_fentry_test(void)
cleanup:
fentry_test__destroy(fentry_skel);
- test_pkt_access__destroy(pkt_skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
index d2c3655dd7a3..78d7a2765c27 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c
@@ -1,64 +1,37 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
+#include "fexit_test.skel.h"
void test_fexit_test(void)
{
- struct bpf_prog_load_attr attr = {
- .file = "./fexit_test.o",
- };
-
- char prog_name[] = "fexit/bpf_fentry_testX";
- struct bpf_object *obj = NULL, *pkt_obj;
- int err, pkt_fd, kfree_skb_fd, i;
- struct bpf_link *link[6] = {};
- struct bpf_program *prog[6];
+ struct fexit_test *fexit_skel = NULL;
+ int err, prog_fd, i;
__u32 duration = 0, retval;
- struct bpf_map *data_map;
- const int zero = 0;
- u64 result[6];
+ __u64 *result;
- err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
- &pkt_obj, &pkt_fd);
- if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
- return;
- err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd);
- if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno))
- goto close_prog;
+ fexit_skel = fexit_test__open_and_load();
+ if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n"))
+ goto cleanup;
- for (i = 0; i < 6; i++) {
- prog_name[sizeof(prog_name) - 2] = '1' + i;
- prog[i] = bpf_object__find_program_by_title(obj, prog_name);
- if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name))
- goto close_prog;
- link[i] = bpf_program__attach_trace(prog[i]);
- if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
- goto close_prog;
- }
- data_map = bpf_object__find_map_by_name(obj, "fexit_te.bss");
- if (CHECK(!data_map, "find_data_map", "data map not found\n"))
- goto close_prog;
+ err = fexit_test__attach(fexit_skel);
+ if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
+ goto cleanup;
- err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ prog_fd = bpf_program__fd(fexit_skel->progs.test1);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
- CHECK(err || retval, "ipv6",
+ CHECK(err || retval, "test_run",
"err %d errno %d retval %d duration %d\n",
err, errno, retval, duration);
- err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result);
- if (CHECK(err, "get_result",
- "failed to get output data: %d\n", err))
- goto close_prog;
-
- for (i = 0; i < 6; i++)
- if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n",
- i + 1, result[i]))
- goto close_prog;
+ result = (__u64 *)fexit_skel->bss;
+ for (i = 0; i < 6; i++) {
+ if (CHECK(result[i] != 1, "result",
+ "fexit_test%d failed err %lld\n", i + 1, result[i]))
+ goto cleanup;
+ }
-close_prog:
- for (i = 0; i < 6; i++)
- if (!IS_ERR_OR_NULL(link[i]))
- bpf_link__destroy(link[i]);
- bpf_object__close(obj);
- bpf_object__close(pkt_obj);
+cleanup:
+ fexit_test__destroy(fexit_skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
index eba9a970703b..925722217edf 100644
--- a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
+++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
@@ -82,6 +82,7 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
void test_get_stack_raw_tp(void)
{
const char *file = "./test_get_stack_rawtp.o";
+ const char *file_err = "./test_get_stack_rawtp_err.o";
const char *prog_name = "raw_tracepoint/sys_enter";
int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP;
struct perf_buffer_opts pb_opts = {};
@@ -93,6 +94,10 @@ void test_get_stack_raw_tp(void)
struct bpf_map *map;
cpu_set_t cpu_set;
+ err = bpf_prog_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err >= 0, "prog_load raw tp", "err %d errno %d\n", err, errno))
+ return;
+
err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/global_data_init.c b/tools/testing/selftests/bpf/prog_tests/global_data_init.c
new file mode 100644
index 000000000000..3bdaa5a40744
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/global_data_init.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_global_data_init(void)
+{
+ const char *file = "./test_global_data.o";
+ int err = -ENOMEM, map_fd, zero = 0;
+ __u8 *buff = NULL, *newval = NULL;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+ __u32 duration = 0;
+ size_t sz;
+
+ obj = bpf_object__open_file(file, NULL);
+ if (CHECK_FAIL(!obj))
+ return;
+
+ map = bpf_object__find_map_by_name(obj, "test_glo.rodata");
+ if (CHECK_FAIL(!map || !bpf_map__is_internal(map)))
+ goto out;
+
+ sz = bpf_map__def(map)->value_size;
+ newval = malloc(sz);
+ if (CHECK_FAIL(!newval))
+ goto out;
+
+ memset(newval, 0, sz);
+ /* wrong size, should fail */
+ err = bpf_map__set_initial_value(map, newval, sz - 1);
+ if (CHECK(!err, "reject set initial value wrong size", "err %d\n", err))
+ goto out;
+
+ err = bpf_map__set_initial_value(map, newval, sz);
+ if (CHECK(err, "set initial value", "err %d\n", err))
+ goto out;
+
+ err = bpf_object__load(obj);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ map_fd = bpf_map__fd(map);
+ if (CHECK_FAIL(map_fd < 0))
+ goto out;
+
+ buff = malloc(sz);
+ if (buff)
+ err = bpf_map_lookup_elem(map_fd, &zero, buff);
+ if (CHECK(!buff || err || memcmp(buff, newval, sz),
+ "compare .rodata map data override",
+ "err %d errno %d\n", err, errno))
+ goto out;
+
+ memset(newval, 1, sz);
+ /* object loaded - should fail */
+ err = bpf_map__set_initial_value(map, newval, sz);
+ CHECK(!err, "reject set initial value after load", "err %d\n", err);
+out:
+ free(buff);
+ free(newval);
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/link_pinning.c b/tools/testing/selftests/bpf/prog_tests/link_pinning.c
new file mode 100644
index 000000000000..a743288cf384
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/link_pinning.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <test_progs.h>
+#include <sys/stat.h>
+
+#include "test_link_pinning.skel.h"
+
+static int duration = 0;
+
+void test_link_pinning_subtest(struct bpf_program *prog,
+ struct test_link_pinning__bss *bss)
+{
+ const char *link_pin_path = "/sys/fs/bpf/pinned_link_test";
+ struct stat statbuf = {};
+ struct bpf_link *link;
+ int err, i;
+
+ link = bpf_program__attach(prog);
+ if (CHECK(IS_ERR(link), "link_attach", "err: %ld\n", PTR_ERR(link)))
+ goto cleanup;
+
+ bss->in = 1;
+ usleep(1);
+ CHECK(bss->out != 1, "res_check1", "exp %d, got %d\n", 1, bss->out);
+
+ /* pin link */
+ err = bpf_link__pin(link, link_pin_path);
+ if (CHECK(err, "link_pin", "err: %d\n", err))
+ goto cleanup;
+
+ CHECK(strcmp(link_pin_path, bpf_link__pin_path(link)), "pin_path1",
+ "exp %s, got %s\n", link_pin_path, bpf_link__pin_path(link));
+
+ /* check that link was pinned */
+ err = stat(link_pin_path, &statbuf);
+ if (CHECK(err, "stat_link", "err %d errno %d\n", err, errno))
+ goto cleanup;
+
+ bss->in = 2;
+ usleep(1);
+ CHECK(bss->out != 2, "res_check2", "exp %d, got %d\n", 2, bss->out);
+
+ /* destroy link, pinned link should keep program attached */
+ bpf_link__destroy(link);
+ link = NULL;
+
+ bss->in = 3;
+ usleep(1);
+ CHECK(bss->out != 3, "res_check3", "exp %d, got %d\n", 3, bss->out);
+
+ /* re-open link from BPFFS */
+ link = bpf_link__open(link_pin_path);
+ if (CHECK(IS_ERR(link), "link_open", "err: %ld\n", PTR_ERR(link)))
+ goto cleanup;
+
+ CHECK(strcmp(link_pin_path, bpf_link__pin_path(link)), "pin_path2",
+ "exp %s, got %s\n", link_pin_path, bpf_link__pin_path(link));
+
+ /* unpin link from BPFFS, program still attached */
+ err = bpf_link__unpin(link);
+ if (CHECK(err, "link_unpin", "err: %d\n", err))
+ goto cleanup;
+
+ /* still active, as we have FD open now */
+ bss->in = 4;
+ usleep(1);
+ CHECK(bss->out != 4, "res_check4", "exp %d, got %d\n", 4, bss->out);
+
+ bpf_link__destroy(link);
+ link = NULL;
+
+ /* Validate it's finally detached.
+ * Actual detachment might get delayed a bit, so there is no reliable
+ * way to validate it immediately here, let's count up for long enough
+ * and see if eventually output stops being updated
+ */
+ for (i = 5; i < 10000; i++) {
+ bss->in = i;
+ usleep(1);
+ if (bss->out == i - 1)
+ break;
+ }
+ CHECK(i == 10000, "link_attached", "got to iteration #%d\n", i);
+
+cleanup:
+ if (!IS_ERR(link))
+ bpf_link__destroy(link);
+}
+
+void test_link_pinning(void)
+{
+ struct test_link_pinning* skel;
+
+ skel = test_link_pinning__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ return;
+
+ if (test__start_subtest("pin_raw_tp"))
+ test_link_pinning_subtest(skel->progs.raw_tp_prog, skel->bss);
+ if (test__start_subtest("pin_tp_btf"))
+ test_link_pinning_subtest(skel->progs.tp_btf_prog, skel->bss);
+
+ test_link_pinning__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/modify_return.c b/tools/testing/selftests/bpf/prog_tests/modify_return.c
new file mode 100644
index 000000000000..97fec70c600b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/modify_return.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2020 Google LLC.
+ */
+
+#include <test_progs.h>
+#include "modify_return.skel.h"
+
+#define LOWER(x) ((x) & 0xffff)
+#define UPPER(x) ((x) >> 16)
+
+
+static void run_test(__u32 input_retval, __u16 want_side_effect, __s16 want_ret)
+{
+ struct modify_return *skel = NULL;
+ int err, prog_fd;
+ __u32 duration = 0, retval;
+ __u16 side_effect;
+ __s16 ret;
+
+ skel = modify_return__open_and_load();
+ if (CHECK(!skel, "skel_load", "modify_return skeleton failed\n"))
+ goto cleanup;
+
+ err = modify_return__attach(skel);
+ if (CHECK(err, "modify_return", "attach failed: %d\n", err))
+ goto cleanup;
+
+ skel->bss->input_retval = input_retval;
+ prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0, NULL, 0,
+ &retval, &duration);
+
+ CHECK(err, "test_run", "err %d errno %d\n", err, errno);
+
+ side_effect = UPPER(retval);
+ ret = LOWER(retval);
+
+ CHECK(ret != want_ret, "test_run",
+ "unexpected ret: %d, expected: %d\n", ret, want_ret);
+ CHECK(side_effect != want_side_effect, "modify_return",
+ "unexpected side_effect: %d\n", side_effect);
+
+ CHECK(skel->bss->fentry_result != 1, "modify_return",
+ "fentry failed\n");
+ CHECK(skel->bss->fexit_result != 1, "modify_return",
+ "fexit failed\n");
+ CHECK(skel->bss->fmod_ret_result != 1, "modify_return",
+ "fmod_ret failed\n");
+
+cleanup:
+ modify_return__destroy(skel);
+}
+
+void test_modify_return(void)
+{
+ run_test(0 /* input_retval */,
+ 1 /* want_side_effect */,
+ 4 /* want_ret */);
+ run_test(-EINVAL /* input_retval */,
+ 0 /* want_side_effect */,
+ -EINVAL /* want_ret */);
+}
+
diff --git a/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
new file mode 100644
index 000000000000..542240e16564
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Carlos Neira cneirabustos@gmail.com */
+#include <test_progs.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+
+struct bss {
+ __u64 dev;
+ __u64 ino;
+ __u64 pid_tgid;
+ __u64 user_pid_tgid;
+};
+
+void test_ns_current_pid_tgid(void)
+{
+ const char *probe_name = "raw_tracepoint/sys_enter";
+ const char *file = "test_ns_current_pid_tgid.o";
+ int err, key = 0, duration = 0;
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
+ struct bpf_map *bss_map;
+ struct bpf_object *obj;
+ struct bss bss;
+ struct stat st;
+ __u64 id;
+
+ obj = bpf_object__open_file(file, NULL);
+ if (CHECK(IS_ERR(obj), "obj_open", "err %ld\n", PTR_ERR(obj)))
+ return;
+
+ err = bpf_object__load(obj);
+ if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
+ goto cleanup;
+
+ bss_map = bpf_object__find_map_by_name(obj, "test_ns_.bss");
+ if (CHECK(!bss_map, "find_bss_map", "failed\n"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_title(obj, probe_name);
+ if (CHECK(!prog, "find_prog", "prog '%s' not found\n",
+ probe_name))
+ goto cleanup;
+
+ memset(&bss, 0, sizeof(bss));
+ pid_t tid = syscall(SYS_gettid);
+ pid_t pid = getpid();
+
+ id = (__u64) tid << 32 | pid;
+ bss.user_pid_tgid = id;
+
+ if (CHECK_FAIL(stat("/proc/self/ns/pid", &st))) {
+ perror("Failed to stat /proc/self/ns/pid");
+ goto cleanup;
+ }
+
+ bss.dev = st.st_dev;
+ bss.ino = st.st_ino;
+
+ err = bpf_map_update_elem(bpf_map__fd(bss_map), &key, &bss, 0);
+ if (CHECK(err, "setting_bss", "failed to set bss : %d\n", err))
+ goto cleanup;
+
+ link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
+ if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n",
+ PTR_ERR(link))) {
+ link = NULL;
+ goto cleanup;
+ }
+
+ /* trigger some syscalls */
+ usleep(1);
+
+ err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &key, &bss);
+ if (CHECK(err, "set_bss", "failed to get bss : %d\n", err))
+ goto cleanup;
+
+ if (CHECK(id != bss.pid_tgid, "Compare user pid/tgid vs. bpf pid/tgid",
+ "User pid/tgid %llu BPF pid/tgid %llu\n", id, bss.pid_tgid))
+ goto cleanup;
+cleanup:
+ if (!link) {
+ bpf_link__destroy(link);
+ link = NULL;
+ }
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_branches.c b/tools/testing/selftests/bpf/prog_tests/perf_branches.c
new file mode 100644
index 000000000000..e35c444902a7
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/perf_branches.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <pthread.h>
+#include <sched.h>
+#include <sys/socket.h>
+#include <test_progs.h>
+#include "bpf/libbpf_internal.h"
+#include "test_perf_branches.skel.h"
+
+static void check_good_sample(struct test_perf_branches *skel)
+{
+ int written_global = skel->bss->written_global_out;
+ int required_size = skel->bss->required_size_out;
+ int written_stack = skel->bss->written_stack_out;
+ int pbe_size = sizeof(struct perf_branch_entry);
+ int duration = 0;
+
+ if (CHECK(!skel->bss->valid, "output not valid",
+ "no valid sample from prog"))
+ return;
+
+ /*
+ * It's hard to validate the contents of the branch entries b/c it
+ * would require some kind of disassembler and also encoding the
+ * valid jump instructions for supported architectures. So just check
+ * the easy stuff for now.
+ */
+ CHECK(required_size <= 0, "read_branches_size", "err %d\n", required_size);
+ CHECK(written_stack < 0, "read_branches_stack", "err %d\n", written_stack);
+ CHECK(written_stack % pbe_size != 0, "read_branches_stack",
+ "stack bytes written=%d not multiple of struct size=%d\n",
+ written_stack, pbe_size);
+ CHECK(written_global < 0, "read_branches_global", "err %d\n", written_global);
+ CHECK(written_global % pbe_size != 0, "read_branches_global",
+ "global bytes written=%d not multiple of struct size=%d\n",
+ written_global, pbe_size);
+ CHECK(written_global < written_stack, "read_branches_size",
+ "written_global=%d < written_stack=%d\n", written_global, written_stack);
+}
+
+static void check_bad_sample(struct test_perf_branches *skel)
+{
+ int written_global = skel->bss->written_global_out;
+ int required_size = skel->bss->required_size_out;
+ int written_stack = skel->bss->written_stack_out;
+ int duration = 0;
+
+ if (CHECK(!skel->bss->valid, "output not valid",
+ "no valid sample from prog"))
+ return;
+
+ CHECK((required_size != -EINVAL && required_size != -ENOENT),
+ "read_branches_size", "err %d\n", required_size);
+ CHECK((written_stack != -EINVAL && written_stack != -ENOENT),
+ "read_branches_stack", "written %d\n", written_stack);
+ CHECK((written_global != -EINVAL && written_global != -ENOENT),
+ "read_branches_global", "written %d\n", written_global);
+}
+
+static void test_perf_branches_common(int perf_fd,
+ void (*cb)(struct test_perf_branches *))
+{
+ struct test_perf_branches *skel;
+ int err, i, duration = 0;
+ bool detached = false;
+ struct bpf_link *link;
+ volatile int j = 0;
+ cpu_set_t cpu_set;
+
+ skel = test_perf_branches__open_and_load();
+ if (CHECK(!skel, "test_perf_branches_load",
+ "perf_branches skeleton failed\n"))
+ return;
+
+ /* attach perf_event */
+ link = bpf_program__attach_perf_event(skel->progs.perf_branches, perf_fd);
+ if (CHECK(IS_ERR(link), "attach_perf_event", "err %ld\n", PTR_ERR(link)))
+ goto out_destroy_skel;
+
+ /* generate some branches on cpu 0 */
+ CPU_ZERO(&cpu_set);
+ CPU_SET(0, &cpu_set);
+ err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
+ if (CHECK(err, "set_affinity", "cpu #0, err %d\n", err))
+ goto out_destroy;
+ /* spin the loop for a while (random high number) */
+ for (i = 0; i < 1000000; ++i)
+ ++j;
+
+ test_perf_branches__detach(skel);
+ detached = true;
+
+ cb(skel);
+out_destroy:
+ bpf_link__destroy(link);
+out_destroy_skel:
+ if (!detached)
+ test_perf_branches__detach(skel);
+ test_perf_branches__destroy(skel);
+}
+
+static void test_perf_branches_hw(void)
+{
+ struct perf_event_attr attr = {0};
+ int duration = 0;
+ int pfd;
+
+ /* create perf event */
+ attr.size = sizeof(attr);
+ attr.type = PERF_TYPE_HARDWARE;
+ attr.config = PERF_COUNT_HW_CPU_CYCLES;
+ attr.freq = 1;
+ attr.sample_freq = 4000;
+ attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
+ attr.branch_sample_type = PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY;
+ pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
+
+ /*
+ * Some setups don't support branch records (virtual machines, !x86),
+ * so skip test in this case.
+ */
+ if (pfd == -1) {
+ if (errno == ENOENT || errno == EOPNOTSUPP) {
+ printf("%s:SKIP:no PERF_SAMPLE_BRANCH_STACK\n",
+ __func__);
+ test__skip();
+ return;
+ }
+ if (CHECK(pfd < 0, "perf_event_open", "err %d errno %d\n",
+ pfd, errno))
+ return;
+ }
+
+ test_perf_branches_common(pfd, check_good_sample);
+
+ close(pfd);
+}
+
+/*
+ * Tests negative case -- run bpf_read_branch_records() on improperly configured
+ * perf event.
+ */
+static void test_perf_branches_no_hw(void)
+{
+ struct perf_event_attr attr = {0};
+ int duration = 0;
+ int pfd;
+
+ /* create perf event */
+ attr.size = sizeof(attr);
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.config = PERF_COUNT_SW_CPU_CLOCK;
+ attr.freq = 1;
+ attr.sample_freq = 4000;
+ pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
+ if (CHECK(pfd < 0, "perf_event_open", "err %d\n", pfd))
+ return;
+
+ test_perf_branches_common(pfd, check_bad_sample);
+
+ close(pfd);
+}
+
+void test_perf_branches(void)
+{
+ if (test__start_subtest("perf_branches_hw"))
+ test_perf_branches_hw();
+ if (test__start_subtest("perf_branches_no_hw"))
+ test_perf_branches_no_hw();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
index 0800036ed654..821b4146b7b6 100644
--- a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
+++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
@@ -36,6 +36,7 @@ static int result_map, tmp_index_ovr_map, linum_map, data_check_map;
static __u32 expected_results[NR_RESULTS];
static int sk_fds[REUSEPORT_ARRAY_SIZE];
static int reuseport_array = -1, outer_map = -1;
+static enum bpf_map_type inner_map_type;
static int select_by_skb_data_prog;
static int saved_tcp_syncookie = -1;
static struct bpf_object *obj;
@@ -63,13 +64,15 @@ static union sa46 {
} \
})
-static int create_maps(void)
+static int create_maps(enum bpf_map_type inner_type)
{
struct bpf_create_map_attr attr = {};
+ inner_map_type = inner_type;
+
/* Creating reuseport_array */
attr.name = "reuseport_array";
- attr.map_type = BPF_MAP_TYPE_REUSEPORT_SOCKARRAY;
+ attr.map_type = inner_type;
attr.key_size = sizeof(__u32);
attr.value_size = sizeof(__u32);
attr.max_entries = REUSEPORT_ARRAY_SIZE;
@@ -506,11 +509,6 @@ static void test_syncookie(int type, sa_family_t family)
.pass_on_failure = 0,
};
- if (type != SOCK_STREAM) {
- test__skip();
- return;
- }
-
/*
* +1 for TCP-SYN and
* +1 for the TCP-ACK (ack the syncookie)
@@ -728,12 +726,36 @@ static void cleanup_per_test(bool no_inner_map)
static void cleanup(void)
{
- if (outer_map != -1)
+ if (outer_map != -1) {
close(outer_map);
- if (reuseport_array != -1)
+ outer_map = -1;
+ }
+
+ if (reuseport_array != -1) {
close(reuseport_array);
- if (obj)
+ reuseport_array = -1;
+ }
+
+ if (obj) {
bpf_object__close(obj);
+ obj = NULL;
+ }
+
+ memset(expected_results, 0, sizeof(expected_results));
+}
+
+static const char *maptype_str(enum bpf_map_type type)
+{
+ switch (type) {
+ case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
+ return "reuseport_sockarray";
+ case BPF_MAP_TYPE_SOCKMAP:
+ return "sockmap";
+ case BPF_MAP_TYPE_SOCKHASH:
+ return "sockhash";
+ default:
+ return "unknown";
+ }
}
static const char *family_str(sa_family_t family)
@@ -760,7 +782,7 @@ static const char *sotype_str(int sotype)
}
}
-#define TEST_INIT(fn, ...) { fn, #fn, __VA_ARGS__ }
+#define TEST_INIT(fn_, ...) { .fn = fn_, .name = #fn_, __VA_ARGS__ }
static void test_config(int sotype, sa_family_t family, bool inany)
{
@@ -768,12 +790,15 @@ static void test_config(int sotype, sa_family_t family, bool inany)
void (*fn)(int sotype, sa_family_t family);
const char *name;
bool no_inner_map;
+ int need_sotype;
} tests[] = {
- TEST_INIT(test_err_inner_map, true /* no_inner_map */),
+ TEST_INIT(test_err_inner_map,
+ .no_inner_map = true),
TEST_INIT(test_err_skb_data),
TEST_INIT(test_err_sk_select_port),
TEST_INIT(test_pass),
- TEST_INIT(test_syncookie),
+ TEST_INIT(test_syncookie,
+ .need_sotype = SOCK_STREAM),
TEST_INIT(test_pass_on_err),
TEST_INIT(test_detach_bpf),
};
@@ -781,7 +806,11 @@ static void test_config(int sotype, sa_family_t family, bool inany)
const struct test *t;
for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
- snprintf(s, sizeof(s), "%s/%s %s %s",
+ if (t->need_sotype && t->need_sotype != sotype)
+ continue; /* test not compatible with socket type */
+
+ snprintf(s, sizeof(s), "%s %s/%s %s %s",
+ maptype_str(inner_map_type),
family_str(family), sotype_str(sotype),
inany ? "INANY" : "LOOPBACK", t->name);
@@ -816,13 +845,20 @@ static void test_all(void)
test_config(c->sotype, c->family, c->inany);
}
-void test_select_reuseport(void)
+void test_map_type(enum bpf_map_type mt)
{
- if (create_maps())
+ if (create_maps(mt))
goto out;
if (prepare_bpf_obj())
goto out;
+ test_all();
+out:
+ cleanup();
+}
+
+void test_select_reuseport(void)
+{
saved_tcp_fo = read_int_sysctl(TCP_FO_SYSCTL);
if (saved_tcp_fo < 0)
goto out;
@@ -835,8 +871,9 @@ void test_select_reuseport(void)
if (disable_syncookie())
goto out;
- test_all();
+ test_map_type(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
+ test_map_type(BPF_MAP_TYPE_SOCKMAP);
+ test_map_type(BPF_MAP_TYPE_SOCKHASH);
out:
- cleanup();
restore_sysctls();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
new file mode 100644
index 000000000000..d572e1a2c297
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+// Copyright (c) 2019 Cloudflare
+// Copyright (c) 2020 Isovalent, Inc.
+/*
+ * Test that the socket assign program is able to redirect traffic towards a
+ * socket, regardless of whether the port or address destination of the traffic
+ * matches the port.
+ */
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "test_progs.h"
+
+#define BIND_PORT 1234
+#define CONNECT_PORT 4321
+#define TEST_DADDR (0xC0A80203)
+#define NS_SELF "/proc/self/ns/net"
+
+static const struct timeval timeo_sec = { .tv_sec = 3 };
+static const size_t timeo_optlen = sizeof(timeo_sec);
+static int stop, duration;
+
+static bool
+configure_stack(void)
+{
+ char tc_cmd[BUFSIZ];
+
+ /* Move to a new networking namespace */
+ if (CHECK_FAIL(unshare(CLONE_NEWNET)))
+ return false;
+
+ /* Configure necessary links, routes */
+ if (CHECK_FAIL(system("ip link set dev lo up")))
+ return false;
+ if (CHECK_FAIL(system("ip route add local default dev lo")))
+ return false;
+ if (CHECK_FAIL(system("ip -6 route add local default dev lo")))
+ return false;
+
+ /* Load qdisc, BPF program */
+ if (CHECK_FAIL(system("tc qdisc add dev lo clsact")))
+ return false;
+ sprintf(tc_cmd, "%s %s %s %s", "tc filter add dev lo ingress bpf",
+ "direct-action object-file ./test_sk_assign.o",
+ "section classifier/sk_assign_test",
+ (env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "");
+ if (CHECK(system(tc_cmd), "BPF load failed;",
+ "run with -vv for more info\n"))
+ return false;
+
+ return true;
+}
+
+static int
+start_server(const struct sockaddr *addr, socklen_t len, int type)
+{
+ int fd;
+
+ fd = socket(addr->sa_family, type, 0);
+ if (CHECK_FAIL(fd == -1))
+ goto out;
+ if (CHECK_FAIL(setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo_sec,
+ timeo_optlen)))
+ goto close_out;
+ if (CHECK_FAIL(bind(fd, addr, len) == -1))
+ goto close_out;
+ if (type == SOCK_STREAM && CHECK_FAIL(listen(fd, 128) == -1))
+ goto close_out;
+
+ goto out;
+close_out:
+ close(fd);
+ fd = -1;
+out:
+ return fd;
+}
+
+static int
+connect_to_server(const struct sockaddr *addr, socklen_t len, int type)
+{
+ int fd = -1;
+
+ fd = socket(addr->sa_family, type, 0);
+ if (CHECK_FAIL(fd == -1))
+ goto out;
+ if (CHECK_FAIL(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo_sec,
+ timeo_optlen)))
+ goto close_out;
+ if (CHECK_FAIL(connect(fd, addr, len)))
+ goto close_out;
+
+ goto out;
+close_out:
+ close(fd);
+ fd = -1;
+out:
+ return fd;
+}
+
+static in_port_t
+get_port(int fd)
+{
+ struct sockaddr_storage ss;
+ socklen_t slen = sizeof(ss);
+ in_port_t port = 0;
+
+ if (CHECK_FAIL(getsockname(fd, (struct sockaddr *)&ss, &slen)))
+ return port;
+
+ switch (ss.ss_family) {
+ case AF_INET:
+ port = ((struct sockaddr_in *)&ss)->sin_port;
+ break;
+ case AF_INET6:
+ port = ((struct sockaddr_in6 *)&ss)->sin6_port;
+ break;
+ default:
+ CHECK(1, "Invalid address family", "%d\n", ss.ss_family);
+ }
+ return port;
+}
+
+static ssize_t
+rcv_msg(int srv_client, int type)
+{
+ struct sockaddr_storage ss;
+ char buf[BUFSIZ];
+ socklen_t slen;
+
+ if (type == SOCK_STREAM)
+ return read(srv_client, &buf, sizeof(buf));
+ else
+ return recvfrom(srv_client, &buf, sizeof(buf), 0,
+ (struct sockaddr *)&ss, &slen);
+}
+
+static int
+run_test(int server_fd, const struct sockaddr *addr, socklen_t len, int type)
+{
+ int client = -1, srv_client = -1;
+ char buf[] = "testing";
+ in_port_t port;
+ int ret = 1;
+
+ client = connect_to_server(addr, len, type);
+ if (client == -1) {
+ perror("Cannot connect to server");
+ goto out;
+ }
+
+ if (type == SOCK_STREAM) {
+ srv_client = accept(server_fd, NULL, NULL);
+ if (CHECK_FAIL(srv_client == -1)) {
+ perror("Can't accept connection");
+ goto out;
+ }
+ } else {
+ srv_client = server_fd;
+ }
+ if (CHECK_FAIL(write(client, buf, sizeof(buf)) != sizeof(buf))) {
+ perror("Can't write on client");
+ goto out;
+ }
+ if (CHECK_FAIL(rcv_msg(srv_client, type) != sizeof(buf))) {
+ perror("Can't read on server");
+ goto out;
+ }
+
+ port = get_port(srv_client);
+ if (CHECK_FAIL(!port))
+ goto out;
+ /* SOCK_STREAM is connected via accept(), so the server's local address
+ * will be the CONNECT_PORT rather than the BIND port that corresponds
+ * to the listen socket. SOCK_DGRAM on the other hand is connectionless
+ * so we can't really do the same check there; the server doesn't ever
+ * create a socket with CONNECT_PORT.
+ */
+ if (type == SOCK_STREAM &&
+ CHECK(port != htons(CONNECT_PORT), "Expected", "port %u but got %u",
+ CONNECT_PORT, ntohs(port)))
+ goto out;
+ else if (type == SOCK_DGRAM &&
+ CHECK(port != htons(BIND_PORT), "Expected",
+ "port %u but got %u", BIND_PORT, ntohs(port)))
+ goto out;
+
+ ret = 0;
+out:
+ close(client);
+ if (srv_client != server_fd)
+ close(srv_client);
+ if (ret)
+ WRITE_ONCE(stop, 1);
+ return ret;
+}
+
+static void
+prepare_addr(struct sockaddr *addr, int family, __u16 port, bool rewrite_addr)
+{
+ struct sockaddr_in *addr4;
+ struct sockaddr_in6 *addr6;
+
+ switch (family) {
+ case AF_INET:
+ addr4 = (struct sockaddr_in *)addr;
+ memset(addr4, 0, sizeof(*addr4));
+ addr4->sin_family = family;
+ addr4->sin_port = htons(port);
+ if (rewrite_addr)
+ addr4->sin_addr.s_addr = htonl(TEST_DADDR);
+ else
+ addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ break;
+ case AF_INET6:
+ addr6 = (struct sockaddr_in6 *)addr;
+ memset(addr6, 0, sizeof(*addr6));
+ addr6->sin6_family = family;
+ addr6->sin6_port = htons(port);
+ addr6->sin6_addr = in6addr_loopback;
+ if (rewrite_addr)
+ addr6->sin6_addr.s6_addr32[3] = htonl(TEST_DADDR);
+ break;
+ default:
+ fprintf(stderr, "Invalid family %d", family);
+ }
+}
+
+struct test_sk_cfg {
+ const char *name;
+ int family;
+ struct sockaddr *addr;
+ socklen_t len;
+ int type;
+ bool rewrite_addr;
+};
+
+#define TEST(NAME, FAMILY, TYPE, REWRITE) \
+{ \
+ .name = NAME, \
+ .family = FAMILY, \
+ .addr = (FAMILY == AF_INET) ? (struct sockaddr *)&addr4 \
+ : (struct sockaddr *)&addr6, \
+ .len = (FAMILY == AF_INET) ? sizeof(addr4) : sizeof(addr6), \
+ .type = TYPE, \
+ .rewrite_addr = REWRITE, \
+}
+
+void test_sk_assign(void)
+{
+ struct sockaddr_in addr4;
+ struct sockaddr_in6 addr6;
+ struct test_sk_cfg tests[] = {
+ TEST("ipv4 tcp port redir", AF_INET, SOCK_STREAM, false),
+ TEST("ipv4 tcp addr redir", AF_INET, SOCK_STREAM, true),
+ TEST("ipv6 tcp port redir", AF_INET6, SOCK_STREAM, false),
+ TEST("ipv6 tcp addr redir", AF_INET6, SOCK_STREAM, true),
+ TEST("ipv4 udp port redir", AF_INET, SOCK_DGRAM, false),
+ TEST("ipv4 udp addr redir", AF_INET, SOCK_DGRAM, true),
+ TEST("ipv6 udp port redir", AF_INET6, SOCK_DGRAM, false),
+ TEST("ipv6 udp addr redir", AF_INET6, SOCK_DGRAM, true),
+ };
+ int server = -1;
+ int self_net;
+
+ self_net = open(NS_SELF, O_RDONLY);
+ if (CHECK_FAIL(self_net < 0)) {
+ perror("Unable to open "NS_SELF);
+ return;
+ }
+
+ if (!configure_stack()) {
+ perror("configure_stack");
+ goto cleanup;
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(tests) && !READ_ONCE(stop); i++) {
+ struct test_sk_cfg *test = &tests[i];
+ const struct sockaddr *addr;
+
+ if (!test__start_subtest(test->name))
+ continue;
+ prepare_addr(test->addr, test->family, BIND_PORT, false);
+ addr = (const struct sockaddr *)test->addr;
+ server = start_server(addr, test->len, test->type);
+ if (server == -1)
+ goto cleanup;
+
+ /* connect to unbound ports */
+ prepare_addr(test->addr, test->family, CONNECT_PORT,
+ test->rewrite_addr);
+ if (run_test(server, addr, test->len, test->type))
+ goto close;
+
+ close(server);
+ server = -1;
+ }
+
+close:
+ close(server);
+cleanup:
+ if (CHECK_FAIL(setns(self_net, CLONE_NEWNET)))
+ perror("Failed to setns("NS_SELF")");
+ close(self_net);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
index c6d6b685a946..4538bd08203f 100644
--- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
+++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
@@ -14,6 +14,7 @@ void test_skb_ctx(void)
.wire_len = 100,
.gso_segs = 8,
.mark = 9,
+ .gso_size = 10,
};
struct bpf_prog_test_run_attr tattr = {
.data_in = &pkt_v4,
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
new file mode 100644
index 000000000000..06b86addc181
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Cloudflare
+/*
+ * Tests for sockmap/sockhash holding kTLS sockets.
+ */
+
+#include "test_progs.h"
+
+#define MAX_TEST_NAME 80
+#define TCP_ULP 31
+
+static int tcp_server(int family)
+{
+ int err, s;
+
+ s = socket(family, SOCK_STREAM, 0);
+ if (CHECK_FAIL(s == -1)) {
+ perror("socket");
+ return -1;
+ }
+
+ err = listen(s, SOMAXCONN);
+ if (CHECK_FAIL(err)) {
+ perror("listen");
+ return -1;
+ }
+
+ return s;
+}
+
+static int disconnect(int fd)
+{
+ struct sockaddr unspec = { AF_UNSPEC };
+
+ return connect(fd, &unspec, sizeof(unspec));
+}
+
+/* Disconnect (unhash) a kTLS socket after removing it from sockmap. */
+static void test_sockmap_ktls_disconnect_after_delete(int family, int map)
+{
+ struct sockaddr_storage addr = {0};
+ socklen_t len = sizeof(addr);
+ int err, cli, srv, zero = 0;
+
+ srv = tcp_server(family);
+ if (srv == -1)
+ return;
+
+ err = getsockname(srv, (struct sockaddr *)&addr, &len);
+ if (CHECK_FAIL(err)) {
+ perror("getsockopt");
+ goto close_srv;
+ }
+
+ cli = socket(family, SOCK_STREAM, 0);
+ if (CHECK_FAIL(cli == -1)) {
+ perror("socket");
+ goto close_srv;
+ }
+
+ err = connect(cli, (struct sockaddr *)&addr, len);
+ if (CHECK_FAIL(err)) {
+ perror("connect");
+ goto close_cli;
+ }
+
+ err = bpf_map_update_elem(map, &zero, &cli, 0);
+ if (CHECK_FAIL(err)) {
+ perror("bpf_map_update_elem");
+ goto close_cli;
+ }
+
+ err = setsockopt(cli, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls"));
+ if (CHECK_FAIL(err)) {
+ perror("setsockopt(TCP_ULP)");
+ goto close_cli;
+ }
+
+ err = bpf_map_delete_elem(map, &zero);
+ if (CHECK_FAIL(err)) {
+ perror("bpf_map_delete_elem");
+ goto close_cli;
+ }
+
+ err = disconnect(cli);
+ if (CHECK_FAIL(err))
+ perror("disconnect");
+
+close_cli:
+ close(cli);
+close_srv:
+ close(srv);
+}
+
+static void run_tests(int family, enum bpf_map_type map_type)
+{
+ char test_name[MAX_TEST_NAME];
+ int map;
+
+ map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0);
+ if (CHECK_FAIL(map == -1)) {
+ perror("bpf_map_create");
+ return;
+ }
+
+ snprintf(test_name, MAX_TEST_NAME,
+ "sockmap_ktls disconnect_after_delete %s %s",
+ family == AF_INET ? "IPv4" : "IPv6",
+ map_type == BPF_MAP_TYPE_SOCKMAP ? "SOCKMAP" : "SOCKHASH");
+ if (!test__start_subtest(test_name))
+ return;
+
+ test_sockmap_ktls_disconnect_after_delete(family, map);
+
+ close(map);
+}
+
+void test_sockmap_ktls(void)
+{
+ run_tests(AF_INET, BPF_MAP_TYPE_SOCKMAP);
+ run_tests(AF_INET, BPF_MAP_TYPE_SOCKHASH);
+ run_tests(AF_INET6, BPF_MAP_TYPE_SOCKMAP);
+ run_tests(AF_INET6, BPF_MAP_TYPE_SOCKHASH);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
new file mode 100644
index 000000000000..d7d65a700799
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
@@ -0,0 +1,1635 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Cloudflare
+/*
+ * Test suite for SOCKMAP/SOCKHASH holding listening sockets.
+ * Covers:
+ * 1. BPF map operations - bpf_map_{update,lookup delete}_elem
+ * 2. BPF redirect helpers - bpf_{sk,msg}_redirect_map
+ * 3. BPF reuseport helper - bpf_sk_select_reuseport
+ */
+
+#include <linux/compiler.h>
+#include <errno.h>
+#include <error.h>
+#include <limits.h>
+#include <netinet/in.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/select.h>
+#include <unistd.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "bpf_util.h"
+#include "test_progs.h"
+#include "test_sockmap_listen.skel.h"
+
+#define IO_TIMEOUT_SEC 30
+#define MAX_STRERR_LEN 256
+#define MAX_TEST_NAME 80
+
+#define _FAIL(errnum, fmt...) \
+ ({ \
+ error_at_line(0, (errnum), __func__, __LINE__, fmt); \
+ CHECK_FAIL(true); \
+ })
+#define FAIL(fmt...) _FAIL(0, fmt)
+#define FAIL_ERRNO(fmt...) _FAIL(errno, fmt)
+#define FAIL_LIBBPF(err, msg) \
+ ({ \
+ char __buf[MAX_STRERR_LEN]; \
+ libbpf_strerror((err), __buf, sizeof(__buf)); \
+ FAIL("%s: %s", (msg), __buf); \
+ })
+
+/* Wrappers that fail the test on error and report it. */
+
+#define xaccept_nonblock(fd, addr, len) \
+ ({ \
+ int __ret = \
+ accept_timeout((fd), (addr), (len), IO_TIMEOUT_SEC); \
+ if (__ret == -1) \
+ FAIL_ERRNO("accept"); \
+ __ret; \
+ })
+
+#define xbind(fd, addr, len) \
+ ({ \
+ int __ret = bind((fd), (addr), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("bind"); \
+ __ret; \
+ })
+
+#define xclose(fd) \
+ ({ \
+ int __ret = close((fd)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("close"); \
+ __ret; \
+ })
+
+#define xconnect(fd, addr, len) \
+ ({ \
+ int __ret = connect((fd), (addr), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("connect"); \
+ __ret; \
+ })
+
+#define xgetsockname(fd, addr, len) \
+ ({ \
+ int __ret = getsockname((fd), (addr), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("getsockname"); \
+ __ret; \
+ })
+
+#define xgetsockopt(fd, level, name, val, len) \
+ ({ \
+ int __ret = getsockopt((fd), (level), (name), (val), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("getsockopt(" #name ")"); \
+ __ret; \
+ })
+
+#define xlisten(fd, backlog) \
+ ({ \
+ int __ret = listen((fd), (backlog)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("listen"); \
+ __ret; \
+ })
+
+#define xsetsockopt(fd, level, name, val, len) \
+ ({ \
+ int __ret = setsockopt((fd), (level), (name), (val), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("setsockopt(" #name ")"); \
+ __ret; \
+ })
+
+#define xsend(fd, buf, len, flags) \
+ ({ \
+ ssize_t __ret = send((fd), (buf), (len), (flags)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("send"); \
+ __ret; \
+ })
+
+#define xrecv_nonblock(fd, buf, len, flags) \
+ ({ \
+ ssize_t __ret = recv_timeout((fd), (buf), (len), (flags), \
+ IO_TIMEOUT_SEC); \
+ if (__ret == -1) \
+ FAIL_ERRNO("recv"); \
+ __ret; \
+ })
+
+#define xsocket(family, sotype, flags) \
+ ({ \
+ int __ret = socket(family, sotype, flags); \
+ if (__ret == -1) \
+ FAIL_ERRNO("socket"); \
+ __ret; \
+ })
+
+#define xbpf_map_delete_elem(fd, key) \
+ ({ \
+ int __ret = bpf_map_delete_elem((fd), (key)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("map_delete"); \
+ __ret; \
+ })
+
+#define xbpf_map_lookup_elem(fd, key, val) \
+ ({ \
+ int __ret = bpf_map_lookup_elem((fd), (key), (val)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("map_lookup"); \
+ __ret; \
+ })
+
+#define xbpf_map_update_elem(fd, key, val, flags) \
+ ({ \
+ int __ret = bpf_map_update_elem((fd), (key), (val), (flags)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("map_update"); \
+ __ret; \
+ })
+
+#define xbpf_prog_attach(prog, target, type, flags) \
+ ({ \
+ int __ret = \
+ bpf_prog_attach((prog), (target), (type), (flags)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("prog_attach(" #type ")"); \
+ __ret; \
+ })
+
+#define xbpf_prog_detach2(prog, target, type) \
+ ({ \
+ int __ret = bpf_prog_detach2((prog), (target), (type)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("prog_detach2(" #type ")"); \
+ __ret; \
+ })
+
+#define xpthread_create(thread, attr, func, arg) \
+ ({ \
+ int __ret = pthread_create((thread), (attr), (func), (arg)); \
+ errno = __ret; \
+ if (__ret) \
+ FAIL_ERRNO("pthread_create"); \
+ __ret; \
+ })
+
+#define xpthread_join(thread, retval) \
+ ({ \
+ int __ret = pthread_join((thread), (retval)); \
+ errno = __ret; \
+ if (__ret) \
+ FAIL_ERRNO("pthread_join"); \
+ __ret; \
+ })
+
+static int poll_read(int fd, unsigned int timeout_sec)
+{
+ struct timeval timeout = { .tv_sec = timeout_sec };
+ fd_set rfds;
+ int r;
+
+ FD_ZERO(&rfds);
+ FD_SET(fd, &rfds);
+
+ r = select(fd + 1, &rfds, NULL, NULL, &timeout);
+ if (r == 0)
+ errno = ETIME;
+
+ return r == 1 ? 0 : -1;
+}
+
+static int accept_timeout(int fd, struct sockaddr *addr, socklen_t *len,
+ unsigned int timeout_sec)
+{
+ if (poll_read(fd, timeout_sec))
+ return -1;
+
+ return accept(fd, addr, len);
+}
+
+static int recv_timeout(int fd, void *buf, size_t len, int flags,
+ unsigned int timeout_sec)
+{
+ if (poll_read(fd, timeout_sec))
+ return -1;
+
+ return recv(fd, buf, len, flags);
+}
+
+static void init_addr_loopback4(struct sockaddr_storage *ss, socklen_t *len)
+{
+ struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss));
+
+ addr4->sin_family = AF_INET;
+ addr4->sin_port = 0;
+ addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ *len = sizeof(*addr4);
+}
+
+static void init_addr_loopback6(struct sockaddr_storage *ss, socklen_t *len)
+{
+ struct sockaddr_in6 *addr6 = memset(ss, 0, sizeof(*ss));
+
+ addr6->sin6_family = AF_INET6;
+ addr6->sin6_port = 0;
+ addr6->sin6_addr = in6addr_loopback;
+ *len = sizeof(*addr6);
+}
+
+static void init_addr_loopback(int family, struct sockaddr_storage *ss,
+ socklen_t *len)
+{
+ switch (family) {
+ case AF_INET:
+ init_addr_loopback4(ss, len);
+ return;
+ case AF_INET6:
+ init_addr_loopback6(ss, len);
+ return;
+ default:
+ FAIL("unsupported address family %d", family);
+ }
+}
+
+static inline struct sockaddr *sockaddr(struct sockaddr_storage *ss)
+{
+ return (struct sockaddr *)ss;
+}
+
+static int enable_reuseport(int s, int progfd)
+{
+ int err, one = 1;
+
+ err = xsetsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one));
+ if (err)
+ return -1;
+ err = xsetsockopt(s, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &progfd,
+ sizeof(progfd));
+ if (err)
+ return -1;
+
+ return 0;
+}
+
+static int socket_loopback_reuseport(int family, int sotype, int progfd)
+{
+ struct sockaddr_storage addr;
+ socklen_t len;
+ int err, s;
+
+ init_addr_loopback(family, &addr, &len);
+
+ s = xsocket(family, sotype, 0);
+ if (s == -1)
+ return -1;
+
+ if (progfd >= 0)
+ enable_reuseport(s, progfd);
+
+ err = xbind(s, sockaddr(&addr), len);
+ if (err)
+ goto close;
+
+ if (sotype & SOCK_DGRAM)
+ return s;
+
+ err = xlisten(s, SOMAXCONN);
+ if (err)
+ goto close;
+
+ return s;
+close:
+ xclose(s);
+ return -1;
+}
+
+static int socket_loopback(int family, int sotype)
+{
+ return socket_loopback_reuseport(family, sotype, -1);
+}
+
+static void test_insert_invalid(int family, int sotype, int mapfd)
+{
+ u32 key = 0;
+ u64 value;
+ int err;
+
+ value = -1;
+ err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+ if (!err || errno != EINVAL)
+ FAIL_ERRNO("map_update: expected EINVAL");
+
+ value = INT_MAX;
+ err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+ if (!err || errno != EBADF)
+ FAIL_ERRNO("map_update: expected EBADF");
+}
+
+static void test_insert_opened(int family, int sotype, int mapfd)
+{
+ u32 key = 0;
+ u64 value;
+ int err, s;
+
+ s = xsocket(family, sotype, 0);
+ if (s == -1)
+ return;
+
+ errno = 0;
+ value = s;
+ err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+ if (!err || errno != EOPNOTSUPP)
+ FAIL_ERRNO("map_update: expected EOPNOTSUPP");
+
+ xclose(s);
+}
+
+static void test_insert_bound(int family, int sotype, int mapfd)
+{
+ struct sockaddr_storage addr;
+ socklen_t len;
+ u32 key = 0;
+ u64 value;
+ int err, s;
+
+ init_addr_loopback(family, &addr, &len);
+
+ s = xsocket(family, sotype, 0);
+ if (s == -1)
+ return;
+
+ err = xbind(s, sockaddr(&addr), len);
+ if (err)
+ goto close;
+
+ errno = 0;
+ value = s;
+ err = bpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+ if (!err || errno != EOPNOTSUPP)
+ FAIL_ERRNO("map_update: expected EOPNOTSUPP");
+close:
+ xclose(s);
+}
+
+static void test_insert(int family, int sotype, int mapfd)
+{
+ u64 value;
+ u32 key;
+ int s;
+
+ s = socket_loopback(family, sotype);
+ if (s < 0)
+ return;
+
+ key = 0;
+ value = s;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+ xclose(s);
+}
+
+static void test_delete_after_insert(int family, int sotype, int mapfd)
+{
+ u64 value;
+ u32 key;
+ int s;
+
+ s = socket_loopback(family, sotype);
+ if (s < 0)
+ return;
+
+ key = 0;
+ value = s;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+ xbpf_map_delete_elem(mapfd, &key);
+ xclose(s);
+}
+
+static void test_delete_after_close(int family, int sotype, int mapfd)
+{
+ int err, s;
+ u64 value;
+ u32 key;
+
+ s = socket_loopback(family, sotype);
+ if (s < 0)
+ return;
+
+ key = 0;
+ value = s;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+
+ xclose(s);
+
+ errno = 0;
+ err = bpf_map_delete_elem(mapfd, &key);
+ if (!err || (errno != EINVAL && errno != ENOENT))
+ /* SOCKMAP and SOCKHASH return different error codes */
+ FAIL_ERRNO("map_delete: expected EINVAL/EINVAL");
+}
+
+static void test_lookup_after_insert(int family, int sotype, int mapfd)
+{
+ u64 cookie, value;
+ socklen_t len;
+ u32 key;
+ int s;
+
+ s = socket_loopback(family, sotype);
+ if (s < 0)
+ return;
+
+ key = 0;
+ value = s;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+
+ len = sizeof(cookie);
+ xgetsockopt(s, SOL_SOCKET, SO_COOKIE, &cookie, &len);
+
+ xbpf_map_lookup_elem(mapfd, &key, &value);
+
+ if (value != cookie) {
+ FAIL("map_lookup: have %#llx, want %#llx",
+ (unsigned long long)value, (unsigned long long)cookie);
+ }
+
+ xclose(s);
+}
+
+static void test_lookup_after_delete(int family, int sotype, int mapfd)
+{
+ int err, s;
+ u64 value;
+ u32 key;
+
+ s = socket_loopback(family, sotype);
+ if (s < 0)
+ return;
+
+ key = 0;
+ value = s;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+ xbpf_map_delete_elem(mapfd, &key);
+
+ errno = 0;
+ err = bpf_map_lookup_elem(mapfd, &key, &value);
+ if (!err || errno != ENOENT)
+ FAIL_ERRNO("map_lookup: expected ENOENT");
+
+ xclose(s);
+}
+
+static void test_lookup_32_bit_value(int family, int sotype, int mapfd)
+{
+ u32 key, value32;
+ int err, s;
+
+ s = socket_loopback(family, sotype);
+ if (s < 0)
+ return;
+
+ mapfd = bpf_create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(key),
+ sizeof(value32), 1, 0);
+ if (mapfd < 0) {
+ FAIL_ERRNO("map_create");
+ goto close;
+ }
+
+ key = 0;
+ value32 = s;
+ xbpf_map_update_elem(mapfd, &key, &value32, BPF_NOEXIST);
+
+ errno = 0;
+ err = bpf_map_lookup_elem(mapfd, &key, &value32);
+ if (!err || errno != ENOSPC)
+ FAIL_ERRNO("map_lookup: expected ENOSPC");
+
+ xclose(mapfd);
+close:
+ xclose(s);
+}
+
+static void test_update_existing(int family, int sotype, int mapfd)
+{
+ int s1, s2;
+ u64 value;
+ u32 key;
+
+ s1 = socket_loopback(family, sotype);
+ if (s1 < 0)
+ return;
+
+ s2 = socket_loopback(family, sotype);
+ if (s2 < 0)
+ goto close_s1;
+
+ key = 0;
+ value = s1;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+
+ value = s2;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_EXIST);
+ xclose(s2);
+close_s1:
+ xclose(s1);
+}
+
+/* Exercise the code path where we destroy child sockets that never
+ * got accept()'ed, aka orphans, when parent socket gets closed.
+ */
+static void test_destroy_orphan_child(int family, int sotype, int mapfd)
+{
+ struct sockaddr_storage addr;
+ socklen_t len;
+ int err, s, c;
+ u64 value;
+ u32 key;
+
+ s = socket_loopback(family, sotype);
+ if (s < 0)
+ return;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ key = 0;
+ value = s;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+
+ c = xsocket(family, sotype, 0);
+ if (c == -1)
+ goto close_srv;
+
+ xconnect(c, sockaddr(&addr), len);
+ xclose(c);
+close_srv:
+ xclose(s);
+}
+
+/* Perform a passive open after removing listening socket from SOCKMAP
+ * to ensure that callbacks get restored properly.
+ */
+static void test_clone_after_delete(int family, int sotype, int mapfd)
+{
+ struct sockaddr_storage addr;
+ socklen_t len;
+ int err, s, c;
+ u64 value;
+ u32 key;
+
+ s = socket_loopback(family, sotype);
+ if (s < 0)
+ return;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ key = 0;
+ value = s;
+ xbpf_map_update_elem(mapfd, &key, &value, BPF_NOEXIST);
+ xbpf_map_delete_elem(mapfd, &key);
+
+ c = xsocket(family, sotype, 0);
+ if (c < 0)
+ goto close_srv;
+
+ xconnect(c, sockaddr(&addr), len);
+ xclose(c);
+close_srv:
+ xclose(s);
+}
+
+/* Check that child socket that got created while parent was in a
+ * SOCKMAP, but got accept()'ed only after the parent has been removed
+ * from SOCKMAP, gets cloned without parent psock state or callbacks.
+ */
+static void test_accept_after_delete(int family, int sotype, int mapfd)
+{
+ struct sockaddr_storage addr;
+ const u32 zero = 0;
+ int err, s, c, p;
+ socklen_t len;
+ u64 value;
+
+ s = socket_loopback(family, sotype | SOCK_NONBLOCK);
+ if (s == -1)
+ return;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ value = s;
+ err = xbpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST);
+ if (err)
+ goto close_srv;
+
+ c = xsocket(family, sotype, 0);
+ if (c == -1)
+ goto close_srv;
+
+ /* Create child while parent is in sockmap */
+ err = xconnect(c, sockaddr(&addr), len);
+ if (err)
+ goto close_cli;
+
+ /* Remove parent from sockmap */
+ err = xbpf_map_delete_elem(mapfd, &zero);
+ if (err)
+ goto close_cli;
+
+ p = xaccept_nonblock(s, NULL, NULL);
+ if (p == -1)
+ goto close_cli;
+
+ /* Check that child sk_user_data is not set */
+ value = p;
+ xbpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST);
+
+ xclose(p);
+close_cli:
+ xclose(c);
+close_srv:
+ xclose(s);
+}
+
+/* Check that child socket that got created and accepted while parent
+ * was in a SOCKMAP is cloned without parent psock state or callbacks.
+ */
+static void test_accept_before_delete(int family, int sotype, int mapfd)
+{
+ struct sockaddr_storage addr;
+ const u32 zero = 0, one = 1;
+ int err, s, c, p;
+ socklen_t len;
+ u64 value;
+
+ s = socket_loopback(family, sotype | SOCK_NONBLOCK);
+ if (s == -1)
+ return;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ value = s;
+ err = xbpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST);
+ if (err)
+ goto close_srv;
+
+ c = xsocket(family, sotype, 0);
+ if (c == -1)
+ goto close_srv;
+
+ /* Create & accept child while parent is in sockmap */
+ err = xconnect(c, sockaddr(&addr), len);
+ if (err)
+ goto close_cli;
+
+ p = xaccept_nonblock(s, NULL, NULL);
+ if (p == -1)
+ goto close_cli;
+
+ /* Check that child sk_user_data is not set */
+ value = p;
+ xbpf_map_update_elem(mapfd, &one, &value, BPF_NOEXIST);
+
+ xclose(p);
+close_cli:
+ xclose(c);
+close_srv:
+ xclose(s);
+}
+
+struct connect_accept_ctx {
+ int sockfd;
+ unsigned int done;
+ unsigned int nr_iter;
+};
+
+static bool is_thread_done(struct connect_accept_ctx *ctx)
+{
+ return READ_ONCE(ctx->done);
+}
+
+static void *connect_accept_thread(void *arg)
+{
+ struct connect_accept_ctx *ctx = arg;
+ struct sockaddr_storage addr;
+ int family, socktype;
+ socklen_t len;
+ int err, i, s;
+
+ s = ctx->sockfd;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto done;
+
+ len = sizeof(family);
+ err = xgetsockopt(s, SOL_SOCKET, SO_DOMAIN, &family, &len);
+ if (err)
+ goto done;
+
+ len = sizeof(socktype);
+ err = xgetsockopt(s, SOL_SOCKET, SO_TYPE, &socktype, &len);
+ if (err)
+ goto done;
+
+ for (i = 0; i < ctx->nr_iter; i++) {
+ int c, p;
+
+ c = xsocket(family, socktype, 0);
+ if (c < 0)
+ break;
+
+ err = xconnect(c, (struct sockaddr *)&addr, sizeof(addr));
+ if (err) {
+ xclose(c);
+ break;
+ }
+
+ p = xaccept_nonblock(s, NULL, NULL);
+ if (p < 0) {
+ xclose(c);
+ break;
+ }
+
+ xclose(p);
+ xclose(c);
+ }
+done:
+ WRITE_ONCE(ctx->done, 1);
+ return NULL;
+}
+
+static void test_syn_recv_insert_delete(int family, int sotype, int mapfd)
+{
+ struct connect_accept_ctx ctx = { 0 };
+ struct sockaddr_storage addr;
+ socklen_t len;
+ u32 zero = 0;
+ pthread_t t;
+ int err, s;
+ u64 value;
+
+ s = socket_loopback(family, sotype | SOCK_NONBLOCK);
+ if (s < 0)
+ return;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close;
+
+ ctx.sockfd = s;
+ ctx.nr_iter = 1000;
+
+ err = xpthread_create(&t, NULL, connect_accept_thread, &ctx);
+ if (err)
+ goto close;
+
+ value = s;
+ while (!is_thread_done(&ctx)) {
+ err = xbpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST);
+ if (err)
+ break;
+
+ err = xbpf_map_delete_elem(mapfd, &zero);
+ if (err)
+ break;
+ }
+
+ xpthread_join(t, NULL);
+close:
+ xclose(s);
+}
+
+static void *listen_thread(void *arg)
+{
+ struct sockaddr unspec = { AF_UNSPEC };
+ struct connect_accept_ctx *ctx = arg;
+ int err, i, s;
+
+ s = ctx->sockfd;
+
+ for (i = 0; i < ctx->nr_iter; i++) {
+ err = xlisten(s, 1);
+ if (err)
+ break;
+ err = xconnect(s, &unspec, sizeof(unspec));
+ if (err)
+ break;
+ }
+
+ WRITE_ONCE(ctx->done, 1);
+ return NULL;
+}
+
+static void test_race_insert_listen(int family, int socktype, int mapfd)
+{
+ struct connect_accept_ctx ctx = { 0 };
+ const u32 zero = 0;
+ const int one = 1;
+ pthread_t t;
+ int err, s;
+ u64 value;
+
+ s = xsocket(family, socktype, 0);
+ if (s < 0)
+ return;
+
+ err = xsetsockopt(s, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
+ if (err)
+ goto close;
+
+ ctx.sockfd = s;
+ ctx.nr_iter = 10000;
+
+ err = pthread_create(&t, NULL, listen_thread, &ctx);
+ if (err)
+ goto close;
+
+ value = s;
+ while (!is_thread_done(&ctx)) {
+ err = bpf_map_update_elem(mapfd, &zero, &value, BPF_NOEXIST);
+ /* Expecting EOPNOTSUPP before listen() */
+ if (err && errno != EOPNOTSUPP) {
+ FAIL_ERRNO("map_update");
+ break;
+ }
+
+ err = bpf_map_delete_elem(mapfd, &zero);
+ /* Expecting no entry after unhash on connect(AF_UNSPEC) */
+ if (err && errno != EINVAL && errno != ENOENT) {
+ FAIL_ERRNO("map_delete");
+ break;
+ }
+ }
+
+ xpthread_join(t, NULL);
+close:
+ xclose(s);
+}
+
+static void zero_verdict_count(int mapfd)
+{
+ unsigned int zero = 0;
+ int key;
+
+ key = SK_DROP;
+ xbpf_map_update_elem(mapfd, &key, &zero, BPF_ANY);
+ key = SK_PASS;
+ xbpf_map_update_elem(mapfd, &key, &zero, BPF_ANY);
+}
+
+enum redir_mode {
+ REDIR_INGRESS,
+ REDIR_EGRESS,
+};
+
+static const char *redir_mode_str(enum redir_mode mode)
+{
+ switch (mode) {
+ case REDIR_INGRESS:
+ return "ingress";
+ case REDIR_EGRESS:
+ return "egress";
+ default:
+ return "unknown";
+ }
+}
+
+static void redir_to_connected(int family, int sotype, int sock_mapfd,
+ int verd_mapfd, enum redir_mode mode)
+{
+ const char *log_prefix = redir_mode_str(mode);
+ struct sockaddr_storage addr;
+ int s, c0, c1, p0, p1;
+ unsigned int pass;
+ socklen_t len;
+ int err, n;
+ u64 value;
+ u32 key;
+ char b;
+
+ zero_verdict_count(verd_mapfd);
+
+ s = socket_loopback(family, sotype | SOCK_NONBLOCK);
+ if (s < 0)
+ return;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ c0 = xsocket(family, sotype, 0);
+ if (c0 < 0)
+ goto close_srv;
+ err = xconnect(c0, sockaddr(&addr), len);
+ if (err)
+ goto close_cli0;
+
+ p0 = xaccept_nonblock(s, NULL, NULL);
+ if (p0 < 0)
+ goto close_cli0;
+
+ c1 = xsocket(family, sotype, 0);
+ if (c1 < 0)
+ goto close_peer0;
+ err = xconnect(c1, sockaddr(&addr), len);
+ if (err)
+ goto close_cli1;
+
+ p1 = xaccept_nonblock(s, NULL, NULL);
+ if (p1 < 0)
+ goto close_cli1;
+
+ key = 0;
+ value = p0;
+ err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
+ if (err)
+ goto close_peer1;
+
+ key = 1;
+ value = p1;
+ err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
+ if (err)
+ goto close_peer1;
+
+ n = write(mode == REDIR_INGRESS ? c1 : p1, "a", 1);
+ if (n < 0)
+ FAIL_ERRNO("%s: write", log_prefix);
+ if (n == 0)
+ FAIL("%s: incomplete write", log_prefix);
+ if (n < 1)
+ goto close_peer1;
+
+ key = SK_PASS;
+ err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
+ if (err)
+ goto close_peer1;
+ if (pass != 1)
+ FAIL("%s: want pass count 1, have %d", log_prefix, pass);
+
+ n = read(c0, &b, 1);
+ if (n < 0)
+ FAIL_ERRNO("%s: read", log_prefix);
+ if (n == 0)
+ FAIL("%s: incomplete read", log_prefix);
+
+close_peer1:
+ xclose(p1);
+close_cli1:
+ xclose(c1);
+close_peer0:
+ xclose(p0);
+close_cli0:
+ xclose(c0);
+close_srv:
+ xclose(s);
+}
+
+static void test_skb_redir_to_connected(struct test_sockmap_listen *skel,
+ struct bpf_map *inner_map, int family,
+ int sotype)
+{
+ int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
+ int parser = bpf_program__fd(skel->progs.prog_skb_parser);
+ int verdict_map = bpf_map__fd(skel->maps.verdict_map);
+ int sock_map = bpf_map__fd(inner_map);
+ int err;
+
+ err = xbpf_prog_attach(parser, sock_map, BPF_SK_SKB_STREAM_PARSER, 0);
+ if (err)
+ return;
+ err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (err)
+ goto detach;
+
+ redir_to_connected(family, sotype, sock_map, verdict_map,
+ REDIR_INGRESS);
+
+ xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT);
+detach:
+ xbpf_prog_detach2(parser, sock_map, BPF_SK_SKB_STREAM_PARSER);
+}
+
+static void test_msg_redir_to_connected(struct test_sockmap_listen *skel,
+ struct bpf_map *inner_map, int family,
+ int sotype)
+{
+ int verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
+ int verdict_map = bpf_map__fd(skel->maps.verdict_map);
+ int sock_map = bpf_map__fd(inner_map);
+ int err;
+
+ err = xbpf_prog_attach(verdict, sock_map, BPF_SK_MSG_VERDICT, 0);
+ if (err)
+ return;
+
+ redir_to_connected(family, sotype, sock_map, verdict_map, REDIR_EGRESS);
+
+ xbpf_prog_detach2(verdict, sock_map, BPF_SK_MSG_VERDICT);
+}
+
+static void redir_to_listening(int family, int sotype, int sock_mapfd,
+ int verd_mapfd, enum redir_mode mode)
+{
+ const char *log_prefix = redir_mode_str(mode);
+ struct sockaddr_storage addr;
+ int s, c, p, err, n;
+ unsigned int drop;
+ socklen_t len;
+ u64 value;
+ u32 key;
+
+ zero_verdict_count(verd_mapfd);
+
+ s = socket_loopback(family, sotype | SOCK_NONBLOCK);
+ if (s < 0)
+ return;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ c = xsocket(family, sotype, 0);
+ if (c < 0)
+ goto close_srv;
+ err = xconnect(c, sockaddr(&addr), len);
+ if (err)
+ goto close_cli;
+
+ p = xaccept_nonblock(s, NULL, NULL);
+ if (p < 0)
+ goto close_cli;
+
+ key = 0;
+ value = s;
+ err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
+ if (err)
+ goto close_peer;
+
+ key = 1;
+ value = p;
+ err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
+ if (err)
+ goto close_peer;
+
+ n = write(mode == REDIR_INGRESS ? c : p, "a", 1);
+ if (n < 0 && errno != EACCES)
+ FAIL_ERRNO("%s: write", log_prefix);
+ if (n == 0)
+ FAIL("%s: incomplete write", log_prefix);
+ if (n < 1)
+ goto close_peer;
+
+ key = SK_DROP;
+ err = xbpf_map_lookup_elem(verd_mapfd, &key, &drop);
+ if (err)
+ goto close_peer;
+ if (drop != 1)
+ FAIL("%s: want drop count 1, have %d", log_prefix, drop);
+
+close_peer:
+ xclose(p);
+close_cli:
+ xclose(c);
+close_srv:
+ xclose(s);
+}
+
+static void test_skb_redir_to_listening(struct test_sockmap_listen *skel,
+ struct bpf_map *inner_map, int family,
+ int sotype)
+{
+ int verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
+ int parser = bpf_program__fd(skel->progs.prog_skb_parser);
+ int verdict_map = bpf_map__fd(skel->maps.verdict_map);
+ int sock_map = bpf_map__fd(inner_map);
+ int err;
+
+ err = xbpf_prog_attach(parser, sock_map, BPF_SK_SKB_STREAM_PARSER, 0);
+ if (err)
+ return;
+ err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (err)
+ goto detach;
+
+ redir_to_listening(family, sotype, sock_map, verdict_map,
+ REDIR_INGRESS);
+
+ xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_STREAM_VERDICT);
+detach:
+ xbpf_prog_detach2(parser, sock_map, BPF_SK_SKB_STREAM_PARSER);
+}
+
+static void test_msg_redir_to_listening(struct test_sockmap_listen *skel,
+ struct bpf_map *inner_map, int family,
+ int sotype)
+{
+ int verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
+ int verdict_map = bpf_map__fd(skel->maps.verdict_map);
+ int sock_map = bpf_map__fd(inner_map);
+ int err;
+
+ err = xbpf_prog_attach(verdict, sock_map, BPF_SK_MSG_VERDICT, 0);
+ if (err)
+ return;
+
+ redir_to_listening(family, sotype, sock_map, verdict_map, REDIR_EGRESS);
+
+ xbpf_prog_detach2(verdict, sock_map, BPF_SK_MSG_VERDICT);
+}
+
+static void test_reuseport_select_listening(int family, int sotype,
+ int sock_map, int verd_map,
+ int reuseport_prog)
+{
+ struct sockaddr_storage addr;
+ unsigned int pass;
+ int s, c, err;
+ socklen_t len;
+ u64 value;
+ u32 key;
+
+ zero_verdict_count(verd_map);
+
+ s = socket_loopback_reuseport(family, sotype | SOCK_NONBLOCK,
+ reuseport_prog);
+ if (s < 0)
+ return;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ key = 0;
+ value = s;
+ err = xbpf_map_update_elem(sock_map, &key, &value, BPF_NOEXIST);
+ if (err)
+ goto close_srv;
+
+ c = xsocket(family, sotype, 0);
+ if (c < 0)
+ goto close_srv;
+ err = xconnect(c, sockaddr(&addr), len);
+ if (err)
+ goto close_cli;
+
+ if (sotype == SOCK_STREAM) {
+ int p;
+
+ p = xaccept_nonblock(s, NULL, NULL);
+ if (p < 0)
+ goto close_cli;
+ xclose(p);
+ } else {
+ char b = 'a';
+ ssize_t n;
+
+ n = xsend(c, &b, sizeof(b), 0);
+ if (n == -1)
+ goto close_cli;
+
+ n = xrecv_nonblock(s, &b, sizeof(b), 0);
+ if (n == -1)
+ goto close_cli;
+ }
+
+ key = SK_PASS;
+ err = xbpf_map_lookup_elem(verd_map, &key, &pass);
+ if (err)
+ goto close_cli;
+ if (pass != 1)
+ FAIL("want pass count 1, have %d", pass);
+
+close_cli:
+ xclose(c);
+close_srv:
+ xclose(s);
+}
+
+static void test_reuseport_select_connected(int family, int sotype,
+ int sock_map, int verd_map,
+ int reuseport_prog)
+{
+ struct sockaddr_storage addr;
+ int s, c0, c1, p0, err;
+ unsigned int drop;
+ socklen_t len;
+ u64 value;
+ u32 key;
+
+ zero_verdict_count(verd_map);
+
+ s = socket_loopback_reuseport(family, sotype, reuseport_prog);
+ if (s < 0)
+ return;
+
+ /* Populate sock_map[0] to avoid ENOENT on first connection */
+ key = 0;
+ value = s;
+ err = xbpf_map_update_elem(sock_map, &key, &value, BPF_NOEXIST);
+ if (err)
+ goto close_srv;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ c0 = xsocket(family, sotype, 0);
+ if (c0 < 0)
+ goto close_srv;
+
+ err = xconnect(c0, sockaddr(&addr), len);
+ if (err)
+ goto close_cli0;
+
+ if (sotype == SOCK_STREAM) {
+ p0 = xaccept_nonblock(s, NULL, NULL);
+ if (p0 < 0)
+ goto close_cli0;
+ } else {
+ p0 = xsocket(family, sotype, 0);
+ if (p0 < 0)
+ goto close_cli0;
+
+ len = sizeof(addr);
+ err = xgetsockname(c0, sockaddr(&addr), &len);
+ if (err)
+ goto close_cli0;
+
+ err = xconnect(p0, sockaddr(&addr), len);
+ if (err)
+ goto close_cli0;
+ }
+
+ /* Update sock_map[0] to redirect to a connected socket */
+ key = 0;
+ value = p0;
+ err = xbpf_map_update_elem(sock_map, &key, &value, BPF_EXIST);
+ if (err)
+ goto close_peer0;
+
+ c1 = xsocket(family, sotype, 0);
+ if (c1 < 0)
+ goto close_peer0;
+
+ len = sizeof(addr);
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv;
+
+ errno = 0;
+ err = connect(c1, sockaddr(&addr), len);
+ if (sotype == SOCK_DGRAM) {
+ char b = 'a';
+ ssize_t n;
+
+ n = xsend(c1, &b, sizeof(b), 0);
+ if (n == -1)
+ goto close_cli1;
+
+ n = recv_timeout(c1, &b, sizeof(b), 0, IO_TIMEOUT_SEC);
+ err = n == -1;
+ }
+ if (!err || errno != ECONNREFUSED)
+ FAIL_ERRNO("connect: expected ECONNREFUSED");
+
+ key = SK_DROP;
+ err = xbpf_map_lookup_elem(verd_map, &key, &drop);
+ if (err)
+ goto close_cli1;
+ if (drop != 1)
+ FAIL("want drop count 1, have %d", drop);
+
+close_cli1:
+ xclose(c1);
+close_peer0:
+ xclose(p0);
+close_cli0:
+ xclose(c0);
+close_srv:
+ xclose(s);
+}
+
+/* Check that redirecting across reuseport groups is not allowed. */
+static void test_reuseport_mixed_groups(int family, int sotype, int sock_map,
+ int verd_map, int reuseport_prog)
+{
+ struct sockaddr_storage addr;
+ int s1, s2, c, err;
+ unsigned int drop;
+ socklen_t len;
+ u64 value;
+ u32 key;
+
+ zero_verdict_count(verd_map);
+
+ /* Create two listeners, each in its own reuseport group */
+ s1 = socket_loopback_reuseport(family, sotype, reuseport_prog);
+ if (s1 < 0)
+ return;
+
+ s2 = socket_loopback_reuseport(family, sotype, reuseport_prog);
+ if (s2 < 0)
+ goto close_srv1;
+
+ key = 0;
+ value = s1;
+ err = xbpf_map_update_elem(sock_map, &key, &value, BPF_NOEXIST);
+ if (err)
+ goto close_srv2;
+
+ key = 1;
+ value = s2;
+ err = xbpf_map_update_elem(sock_map, &key, &value, BPF_NOEXIST);
+
+ /* Connect to s2, reuseport BPF selects s1 via sock_map[0] */
+ len = sizeof(addr);
+ err = xgetsockname(s2, sockaddr(&addr), &len);
+ if (err)
+ goto close_srv2;
+
+ c = xsocket(family, sotype, 0);
+ if (c < 0)
+ goto close_srv2;
+
+ err = connect(c, sockaddr(&addr), len);
+ if (sotype == SOCK_DGRAM) {
+ char b = 'a';
+ ssize_t n;
+
+ n = xsend(c, &b, sizeof(b), 0);
+ if (n == -1)
+ goto close_cli;
+
+ n = recv_timeout(c, &b, sizeof(b), 0, IO_TIMEOUT_SEC);
+ err = n == -1;
+ }
+ if (!err || errno != ECONNREFUSED) {
+ FAIL_ERRNO("connect: expected ECONNREFUSED");
+ goto close_cli;
+ }
+
+ /* Expect drop, can't redirect outside of reuseport group */
+ key = SK_DROP;
+ err = xbpf_map_lookup_elem(verd_map, &key, &drop);
+ if (err)
+ goto close_cli;
+ if (drop != 1)
+ FAIL("want drop count 1, have %d", drop);
+
+close_cli:
+ xclose(c);
+close_srv2:
+ xclose(s2);
+close_srv1:
+ xclose(s1);
+}
+
+#define TEST(fn, ...) \
+ { \
+ fn, #fn, __VA_ARGS__ \
+ }
+
+static void test_ops_cleanup(const struct bpf_map *map)
+{
+ const struct bpf_map_def *def;
+ int err, mapfd;
+ u32 key;
+
+ def = bpf_map__def(map);
+ mapfd = bpf_map__fd(map);
+
+ for (key = 0; key < def->max_entries; key++) {
+ err = bpf_map_delete_elem(mapfd, &key);
+ if (err && errno != EINVAL && errno != ENOENT)
+ FAIL_ERRNO("map_delete: expected EINVAL/ENOENT");
+ }
+}
+
+static const char *family_str(sa_family_t family)
+{
+ switch (family) {
+ case AF_INET:
+ return "IPv4";
+ case AF_INET6:
+ return "IPv6";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *map_type_str(const struct bpf_map *map)
+{
+ const struct bpf_map_def *def;
+
+ def = bpf_map__def(map);
+ if (IS_ERR(def))
+ return "invalid";
+
+ switch (def->type) {
+ case BPF_MAP_TYPE_SOCKMAP:
+ return "sockmap";
+ case BPF_MAP_TYPE_SOCKHASH:
+ return "sockhash";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *sotype_str(int sotype)
+{
+ switch (sotype) {
+ case SOCK_DGRAM:
+ return "UDP";
+ case SOCK_STREAM:
+ return "TCP";
+ default:
+ return "unknown";
+ }
+}
+
+static void test_ops(struct test_sockmap_listen *skel, struct bpf_map *map,
+ int family, int sotype)
+{
+ const struct op_test {
+ void (*fn)(int family, int sotype, int mapfd);
+ const char *name;
+ int sotype;
+ } tests[] = {
+ /* insert */
+ TEST(test_insert_invalid),
+ TEST(test_insert_opened),
+ TEST(test_insert_bound, SOCK_STREAM),
+ TEST(test_insert),
+ /* delete */
+ TEST(test_delete_after_insert),
+ TEST(test_delete_after_close),
+ /* lookup */
+ TEST(test_lookup_after_insert),
+ TEST(test_lookup_after_delete),
+ TEST(test_lookup_32_bit_value),
+ /* update */
+ TEST(test_update_existing),
+ /* races with insert/delete */
+ TEST(test_destroy_orphan_child, SOCK_STREAM),
+ TEST(test_syn_recv_insert_delete, SOCK_STREAM),
+ TEST(test_race_insert_listen, SOCK_STREAM),
+ /* child clone */
+ TEST(test_clone_after_delete, SOCK_STREAM),
+ TEST(test_accept_after_delete, SOCK_STREAM),
+ TEST(test_accept_before_delete, SOCK_STREAM),
+ };
+ const char *family_name, *map_name, *sotype_name;
+ const struct op_test *t;
+ char s[MAX_TEST_NAME];
+ int map_fd;
+
+ family_name = family_str(family);
+ map_name = map_type_str(map);
+ sotype_name = sotype_str(sotype);
+ map_fd = bpf_map__fd(map);
+
+ for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
+ snprintf(s, sizeof(s), "%s %s %s %s", map_name, family_name,
+ sotype_name, t->name);
+
+ if (t->sotype != 0 && t->sotype != sotype)
+ continue;
+
+ if (!test__start_subtest(s))
+ continue;
+
+ t->fn(family, sotype, map_fd);
+ test_ops_cleanup(map);
+ }
+}
+
+static void test_redir(struct test_sockmap_listen *skel, struct bpf_map *map,
+ int family, int sotype)
+{
+ const struct redir_test {
+ void (*fn)(struct test_sockmap_listen *skel,
+ struct bpf_map *map, int family, int sotype);
+ const char *name;
+ } tests[] = {
+ TEST(test_skb_redir_to_connected),
+ TEST(test_skb_redir_to_listening),
+ TEST(test_msg_redir_to_connected),
+ TEST(test_msg_redir_to_listening),
+ };
+ const char *family_name, *map_name;
+ const struct redir_test *t;
+ char s[MAX_TEST_NAME];
+
+ family_name = family_str(family);
+ map_name = map_type_str(map);
+
+ for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
+ snprintf(s, sizeof(s), "%s %s %s", map_name, family_name,
+ t->name);
+
+ if (!test__start_subtest(s))
+ continue;
+
+ t->fn(skel, map, family, sotype);
+ }
+}
+
+static void test_reuseport(struct test_sockmap_listen *skel,
+ struct bpf_map *map, int family, int sotype)
+{
+ const struct reuseport_test {
+ void (*fn)(int family, int sotype, int socket_map,
+ int verdict_map, int reuseport_prog);
+ const char *name;
+ int sotype;
+ } tests[] = {
+ TEST(test_reuseport_select_listening),
+ TEST(test_reuseport_select_connected),
+ TEST(test_reuseport_mixed_groups),
+ };
+ int socket_map, verdict_map, reuseport_prog;
+ const char *family_name, *map_name, *sotype_name;
+ const struct reuseport_test *t;
+ char s[MAX_TEST_NAME];
+
+ family_name = family_str(family);
+ map_name = map_type_str(map);
+ sotype_name = sotype_str(sotype);
+
+ socket_map = bpf_map__fd(map);
+ verdict_map = bpf_map__fd(skel->maps.verdict_map);
+ reuseport_prog = bpf_program__fd(skel->progs.prog_reuseport);
+
+ for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
+ snprintf(s, sizeof(s), "%s %s %s %s", map_name, family_name,
+ sotype_name, t->name);
+
+ if (t->sotype != 0 && t->sotype != sotype)
+ continue;
+
+ if (!test__start_subtest(s))
+ continue;
+
+ t->fn(family, sotype, socket_map, verdict_map, reuseport_prog);
+ }
+}
+
+static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map,
+ int family)
+{
+ test_ops(skel, map, family, SOCK_STREAM);
+ test_ops(skel, map, family, SOCK_DGRAM);
+ test_redir(skel, map, family, SOCK_STREAM);
+ test_reuseport(skel, map, family, SOCK_STREAM);
+ test_reuseport(skel, map, family, SOCK_DGRAM);
+}
+
+void test_sockmap_listen(void)
+{
+ struct test_sockmap_listen *skel;
+
+ skel = test_sockmap_listen__open_and_load();
+ if (!skel) {
+ FAIL("skeleton open/load failed");
+ return;
+ }
+
+ skel->bss->test_sockmap = true;
+ run_tests(skel, skel->maps.sock_map, AF_INET);
+ run_tests(skel, skel->maps.sock_map, AF_INET6);
+
+ skel->bss->test_sockmap = false;
+ run_tests(skel, skel->maps.sock_hash, AF_INET);
+ run_tests(skel, skel->maps.sock_hash, AF_INET6);
+
+ test_sockmap_listen__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
index f4cd60d6fba2..e56b52ab41da 100644
--- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
@@ -188,7 +188,7 @@ static int start_server(void)
};
int fd;
- fd = socket(AF_INET, SOCK_STREAM, 0);
+ fd = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0);
if (fd < 0) {
log_err("Failed to create server socket");
return -1;
@@ -205,6 +205,7 @@ static int start_server(void)
static pthread_mutex_t server_started_mtx = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t server_started = PTHREAD_COND_INITIALIZER;
+static volatile bool server_done = false;
static void *server_thread(void *arg)
{
@@ -222,23 +223,24 @@ static void *server_thread(void *arg)
if (CHECK_FAIL(err < 0)) {
perror("Failed to listed on socket");
- return NULL;
+ return ERR_PTR(err);
}
- client_fd = accept(fd, (struct sockaddr *)&addr, &len);
+ while (true) {
+ client_fd = accept(fd, (struct sockaddr *)&addr, &len);
+ if (client_fd == -1 && errno == EAGAIN) {
+ usleep(50);
+ continue;
+ }
+ break;
+ }
if (CHECK_FAIL(client_fd < 0)) {
perror("Failed to accept client");
- return NULL;
+ return ERR_PTR(err);
}
- /* Wait for the next connection (that never arrives)
- * to keep this thread alive to prevent calling
- * close() on client_fd.
- */
- if (CHECK_FAIL(accept(fd, (struct sockaddr *)&addr, &len) >= 0)) {
- perror("Unexpected success in second accept");
- return NULL;
- }
+ while (!server_done)
+ usleep(50);
close(client_fd);
@@ -249,6 +251,7 @@ void test_tcp_rtt(void)
{
int server_fd, cgroup_fd;
pthread_t tid;
+ void *server_res;
cgroup_fd = test__join_cgroup("/tcp_rtt");
if (CHECK_FAIL(cgroup_fd < 0))
@@ -267,6 +270,11 @@ void test_tcp_rtt(void)
pthread_mutex_unlock(&server_started_mtx);
CHECK_FAIL(run_test(cgroup_fd, server_fd));
+
+ server_done = true;
+ CHECK_FAIL(pthread_join(tid, &server_res));
+ CHECK_FAIL(IS_ERR(server_res));
+
close_server_fd:
close(server_fd);
close_cgroup_fd:
diff --git a/tools/testing/selftests/bpf/prog_tests/test_lsm.c b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
new file mode 100644
index 000000000000..1e4c258de09d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2020 Google LLC.
+ */
+
+#include <test_progs.h>
+#include <sys/mman.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <malloc.h>
+#include <stdlib.h>
+
+#include "lsm.skel.h"
+
+char *CMD_ARGS[] = {"true", NULL};
+
+int heap_mprotect(void)
+{
+ void *buf;
+ long sz;
+ int ret;
+
+ sz = sysconf(_SC_PAGESIZE);
+ if (sz < 0)
+ return sz;
+
+ buf = memalign(sz, 2 * sz);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ ret = mprotect(buf, sz, PROT_READ | PROT_WRITE | PROT_EXEC);
+ free(buf);
+ return ret;
+}
+
+int exec_cmd(int *monitored_pid)
+{
+ int child_pid, child_status;
+
+ child_pid = fork();
+ if (child_pid == 0) {
+ *monitored_pid = getpid();
+ execvp(CMD_ARGS[0], CMD_ARGS);
+ return -EINVAL;
+ } else if (child_pid > 0) {
+ waitpid(child_pid, &child_status, 0);
+ return child_status;
+ }
+
+ return -EINVAL;
+}
+
+void test_test_lsm(void)
+{
+ struct lsm *skel = NULL;
+ int err, duration = 0;
+
+ skel = lsm__open_and_load();
+ if (CHECK(!skel, "skel_load", "lsm skeleton failed\n"))
+ goto close_prog;
+
+ err = lsm__attach(skel);
+ if (CHECK(err, "attach", "lsm attach failed: %d\n", err))
+ goto close_prog;
+
+ err = exec_cmd(&skel->bss->monitored_pid);
+ if (CHECK(err < 0, "exec_cmd", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ CHECK(skel->bss->bprm_count != 1, "bprm_count", "bprm_count = %d\n",
+ skel->bss->bprm_count);
+
+ skel->bss->monitored_pid = getpid();
+
+ err = heap_mprotect();
+ if (CHECK(errno != EPERM, "heap_mprotect", "want errno=EPERM, got %d\n",
+ errno))
+ goto close_prog;
+
+ CHECK(skel->bss->mprotect_count != 1, "mprotect_count",
+ "mprotect_count = %d\n", skel->bss->mprotect_count);
+
+close_prog:
+ lsm__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
index 1f6ccdaed1ac..781c8d11604b 100644
--- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
+++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
@@ -55,31 +55,40 @@ void test_trampoline_count(void)
/* attach 'allowed' 40 trampoline programs */
for (i = 0; i < MAX_TRAMP_PROGS; i++) {
obj = bpf_object__open_file(object, NULL);
- if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj)))
+ if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) {
+ obj = NULL;
goto cleanup;
+ }
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err))
goto cleanup;
inst[i].obj = obj;
+ obj = NULL;
if (rand() % 2) {
- link = load(obj, fentry_name);
- if (CHECK(IS_ERR(link), "attach prog", "err %ld\n", PTR_ERR(link)))
+ link = load(inst[i].obj, fentry_name);
+ if (CHECK(IS_ERR(link), "attach prog", "err %ld\n", PTR_ERR(link))) {
+ link = NULL;
goto cleanup;
+ }
inst[i].link_fentry = link;
} else {
- link = load(obj, fexit_name);
- if (CHECK(IS_ERR(link), "attach prog", "err %ld\n", PTR_ERR(link)))
+ link = load(inst[i].obj, fexit_name);
+ if (CHECK(IS_ERR(link), "attach prog", "err %ld\n", PTR_ERR(link))) {
+ link = NULL;
goto cleanup;
+ }
inst[i].link_fexit = link;
}
}
/* and try 1 extra.. */
obj = bpf_object__open_file(object, NULL);
- if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj)))
+ if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) {
+ obj = NULL;
goto cleanup;
+ }
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err))
@@ -104,7 +113,9 @@ void test_trampoline_count(void)
cleanup_extra:
bpf_object__close(obj);
cleanup:
- while (--i) {
+ if (i >= MAX_TRAMP_PROGS)
+ i = MAX_TRAMP_PROGS - 1;
+ for (; i >= 0; i--) {
bpf_link__destroy(inst[i].link_fentry);
bpf_link__destroy(inst[i].link_fexit);
bpf_object__close(inst[i].obj);
diff --git a/tools/testing/selftests/bpf/prog_tests/vmlinux.c b/tools/testing/selftests/bpf/prog_tests/vmlinux.c
new file mode 100644
index 000000000000..72310cfc6474
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/vmlinux.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <test_progs.h>
+#include <time.h>
+#include "test_vmlinux.skel.h"
+
+#define MY_TV_NSEC 1337
+
+static void nsleep()
+{
+ struct timespec ts = { .tv_nsec = MY_TV_NSEC };
+
+ (void)syscall(__NR_nanosleep, &ts, NULL);
+}
+
+void test_vmlinux(void)
+{
+ int duration = 0, err;
+ struct test_vmlinux* skel;
+ struct test_vmlinux__bss *bss;
+
+ skel = test_vmlinux__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ return;
+ bss = skel->bss;
+
+ err = test_vmlinux__attach(skel);
+ if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ goto cleanup;
+
+ /* trigger everything */
+ nsleep();
+
+ CHECK(!bss->tp_called, "tp", "not called\n");
+ CHECK(!bss->raw_tp_called, "raw_tp", "not called\n");
+ CHECK(!bss->tp_btf_called, "tp_btf", "not called\n");
+ CHECK(!bss->kprobe_called, "kprobe", "not called\n");
+ CHECK(!bss->fentry_called, "fentry", "not called\n");
+
+cleanup:
+ test_vmlinux__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c
new file mode 100644
index 000000000000..05b294d6b923
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+#define IFINDEX_LO 1
+#define XDP_FLAGS_REPLACE (1U << 4)
+
+void test_xdp_attach(void)
+{
+ struct bpf_object *obj1, *obj2, *obj3;
+ const char *file = "./test_xdp.o";
+ int err, fd1, fd2, fd3;
+ __u32 duration = 0;
+ DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts,
+ .old_fd = -1);
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj1, &fd1);
+ if (CHECK_FAIL(err))
+ return;
+ err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj2, &fd2);
+ if (CHECK_FAIL(err))
+ goto out_1;
+ err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj3, &fd3);
+ if (CHECK_FAIL(err))
+ goto out_2;
+
+ err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd1, XDP_FLAGS_REPLACE,
+ &opts);
+ if (CHECK(err, "load_ok", "initial load failed"))
+ goto out_close;
+
+ err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd2, XDP_FLAGS_REPLACE,
+ &opts);
+ if (CHECK(!err, "load_fail", "load with expected id didn't fail"))
+ goto out;
+
+ opts.old_fd = fd1;
+ err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd2, 0, &opts);
+ if (CHECK(err, "replace_ok", "replace valid old_fd failed"))
+ goto out;
+
+ err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd3, 0, &opts);
+ if (CHECK(!err, "replace_fail", "replace invalid old_fd didn't fail"))
+ goto out;
+
+ err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, 0, &opts);
+ if (CHECK(!err, "remove_fail", "remove invalid old_fd didn't fail"))
+ goto out;
+
+ opts.old_fd = fd2;
+ err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, 0, &opts);
+ if (CHECK(err, "remove_ok", "remove valid old_fd failed"))
+ goto out;
+
+out:
+ bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0);
+out_close:
+ bpf_object__close(obj3);
+out_2:
+ bpf_object__close(obj2);
+out_1:
+ bpf_object__close(obj1);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
index 6b56bdc73ebc..a0f688c37023 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c
@@ -4,17 +4,51 @@
#include "test_xdp.skel.h"
#include "test_xdp_bpf2bpf.skel.h"
+struct meta {
+ int ifindex;
+ int pkt_len;
+};
+
+static void on_sample(void *ctx, int cpu, void *data, __u32 size)
+{
+ int duration = 0;
+ struct meta *meta = (struct meta *)data;
+ struct ipv4_packet *trace_pkt_v4 = data + sizeof(*meta);
+
+ if (CHECK(size < sizeof(pkt_v4) + sizeof(*meta),
+ "check_size", "size %u < %zu\n",
+ size, sizeof(pkt_v4) + sizeof(*meta)))
+ return;
+
+ if (CHECK(meta->ifindex != if_nametoindex("lo"), "check_meta_ifindex",
+ "meta->ifindex = %d\n", meta->ifindex))
+ return;
+
+ if (CHECK(meta->pkt_len != sizeof(pkt_v4), "check_meta_pkt_len",
+ "meta->pkt_len = %zd\n", sizeof(pkt_v4)))
+ return;
+
+ if (CHECK(memcmp(trace_pkt_v4, &pkt_v4, sizeof(pkt_v4)),
+ "check_packet_content", "content not the same\n"))
+ return;
+
+ *(bool *)ctx = true;
+}
+
void test_xdp_bpf2bpf(void)
{
__u32 duration = 0, retval, size;
char buf[128];
int err, pkt_fd, map_fd;
+ bool passed = false;
struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
struct iptnl_info value4 = {.family = AF_INET};
struct test_xdp *pkt_skel = NULL;
struct test_xdp_bpf2bpf *ftrace_skel = NULL;
struct vip key4 = {.protocol = 6, .family = AF_INET};
- DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
+ struct bpf_program *prog;
+ struct perf_buffer *pb = NULL;
+ struct perf_buffer_opts pb_opts = {};
/* Load XDP program to introspect */
pkt_skel = test_xdp__open_and_load();
@@ -27,11 +61,21 @@ void test_xdp_bpf2bpf(void)
bpf_map_update_elem(map_fd, &key4, &value4, 0);
/* Load trace program */
- opts.attach_prog_fd = pkt_fd,
- ftrace_skel = test_xdp_bpf2bpf__open_opts(&opts);
+ ftrace_skel = test_xdp_bpf2bpf__open();
if (CHECK(!ftrace_skel, "__open", "ftrace skeleton failed\n"))
goto out;
+ /* Demonstrate the bpf_program__set_attach_target() API rather than
+ * the load with options, i.e. opts.attach_prog_fd.
+ */
+ prog = ftrace_skel->progs.trace_on_entry;
+ bpf_program__set_expected_attach_type(prog, BPF_TRACE_FENTRY);
+ bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel");
+
+ prog = ftrace_skel->progs.trace_on_exit;
+ bpf_program__set_expected_attach_type(prog, BPF_TRACE_FEXIT);
+ bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel");
+
err = test_xdp_bpf2bpf__load(ftrace_skel);
if (CHECK(err, "__load", "ftrace skeleton failed\n"))
goto out;
@@ -40,6 +84,14 @@ void test_xdp_bpf2bpf(void)
if (CHECK(err, "ftrace_attach", "ftrace attach failed: %d\n", err))
goto out;
+ /* Set up perf buffer */
+ pb_opts.sample_cb = on_sample;
+ pb_opts.ctx = &passed;
+ pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map),
+ 1, &pb_opts);
+ if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
+ goto out;
+
/* Run test program */
err = bpf_prog_test_run(pkt_fd, 1, &pkt_v4, sizeof(pkt_v4),
buf, &size, &retval, &duration);
@@ -50,6 +102,15 @@ void test_xdp_bpf2bpf(void)
err, errno, retval, size))
goto out;
+ /* Make sure bpf_xdp_output() was triggered and it sent the expected
+ * data to the perf ring buffer.
+ */
+ err = perf_buffer__poll(pb, 100);
+ if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
+ goto out;
+
+ CHECK_FAIL(!passed);
+
/* Verify test results */
if (CHECK(ftrace_skel->bss->test_result_fentry != if_nametoindex("lo"),
"result", "fentry failed err %llu\n",
@@ -60,6 +121,8 @@ void test_xdp_bpf2bpf(void)
"fexit failed err %llu\n", ftrace_skel->bss->test_result_fexit);
out:
+ if (pb)
+ perf_buffer__free(pb);
test_xdp__destroy(pkt_skel);
test_xdp_bpf2bpf__destroy(ftrace_skel);
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
index b631fb5032d2..3fb4260570b1 100644
--- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c
+++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
@@ -6,14 +6,24 @@
* the kernel BPF logic.
*/
+#include <stddef.h>
#include <linux/bpf.h>
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
#include "bpf_tcp_helpers.h"
char _license[] SEC("license") = "GPL";
+int stg_result = 0;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} sk_stg_map SEC(".maps");
+
#define DCTCP_MAX_ALPHA 1024U
struct dctcp {
@@ -43,12 +53,18 @@ void BPF_PROG(dctcp_init, struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct dctcp *ca = inet_csk_ca(sk);
+ int *stg;
ca->prior_rcv_nxt = tp->rcv_nxt;
ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
ca->loss_cwnd = 0;
ca->ce_state = 0;
+ stg = bpf_sk_storage_get(&sk_stg_map, (void *)tp, NULL, 0);
+ if (stg) {
+ stg_result = *stg;
+ bpf_sk_storage_delete(&sk_stg_map, (void *)tp);
+ }
dctcp_reset(tp, ca);
}
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
index d4a02fe44a12..31975c96e2c9 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
@@ -13,7 +13,7 @@ enum e1 {
enum e2 {
C = 100,
- D = -100,
+ D = 4294967295,
E = 0,
};
diff --git a/tools/testing/selftests/bpf/progs/fentry_test.c b/tools/testing/selftests/bpf/progs/fentry_test.c
index 38d3a82144ca..9365b686f84b 100644
--- a/tools/testing/selftests/bpf/progs/fentry_test.c
+++ b/tools/testing/selftests/bpf/progs/fentry_test.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2019 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
index c329fccf9842..98e1efe14549 100644
--- a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
@@ -5,7 +5,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
struct sk_buff {
unsigned int len;
diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf_simple.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf_simple.c
index 92f3fa47cf40..85c0b516d6ee 100644
--- a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf_simple.c
+++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf_simple.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2019 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
struct sk_buff {
unsigned int len;
diff --git a/tools/testing/selftests/bpf/progs/fexit_test.c b/tools/testing/selftests/bpf/progs/fexit_test.c
index 348109b9ea07..bd1e17d8024c 100644
--- a/tools/testing/selftests/bpf/progs/fexit_test.c
+++ b/tools/testing/selftests/bpf/progs/fexit_test.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2019 Facebook */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kfree_skb.c b/tools/testing/selftests/bpf/progs/kfree_skb.c
index 8f48a909f079..a46a264ce24e 100644
--- a/tools/testing/selftests/bpf/progs/kfree_skb.c
+++ b/tools/testing/selftests/bpf/progs/kfree_skb.c
@@ -4,7 +4,7 @@
#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
struct {
diff --git a/tools/testing/selftests/bpf/progs/lsm.c b/tools/testing/selftests/bpf/progs/lsm.c
new file mode 100644
index 000000000000..a4e3c223028d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lsm.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2020 Google LLC.
+ */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <errno.h>
+
+char _license[] SEC("license") = "GPL";
+
+int monitored_pid = 0;
+int mprotect_count = 0;
+int bprm_count = 0;
+
+SEC("lsm/file_mprotect")
+int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
+ unsigned long reqprot, unsigned long prot, int ret)
+{
+ if (ret != 0)
+ return ret;
+
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
+ int is_heap = 0;
+
+ is_heap = (vma->vm_start >= vma->vm_mm->start_brk &&
+ vma->vm_end <= vma->vm_mm->brk);
+
+ if (is_heap && monitored_pid == pid) {
+ mprotect_count++;
+ ret = -EPERM;
+ }
+
+ return ret;
+}
+
+SEC("lsm/bprm_committed_creds")
+int BPF_PROG(test_void_hook, struct linux_binprm *bprm)
+{
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (monitored_pid == pid)
+ bprm_count++;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/modify_return.c b/tools/testing/selftests/bpf/progs/modify_return.c
new file mode 100644
index 000000000000..8b7466a15c6b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/modify_return.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2020 Google LLC.
+ */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+static int sequence = 0;
+__s32 input_retval = 0;
+
+__u64 fentry_result = 0;
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(fentry_test, int a, __u64 b)
+{
+ sequence++;
+ fentry_result = (sequence == 1);
+ return 0;
+}
+
+__u64 fmod_ret_result = 0;
+SEC("fmod_ret/bpf_modify_return_test")
+int BPF_PROG(fmod_ret_test, int a, int *b, int ret)
+{
+ sequence++;
+ /* This is the first fmod_ret program, the ret passed should be 0 */
+ fmod_ret_result = (sequence == 2 && ret == 0);
+ return input_retval;
+}
+
+__u64 fexit_result = 0;
+SEC("fexit/bpf_modify_return_test")
+int BPF_PROG(fexit_test, int a, __u64 b, int ret)
+{
+ sequence++;
+ /* If the input_reval is non-zero a successful modification should have
+ * occurred.
+ */
+ if (input_retval)
+ fexit_result = (sequence == 3 && ret == input_retval);
+ else
+ fexit_result = (sequence == 3 && ret == 4);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
index a5c6d5903b22..ca283af80d4e 100644
--- a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
@@ -12,7 +12,6 @@ int bpf_prog1(struct __sk_buff *skb)
__u32 lport = skb->local_port;
__u32 rport = skb->remote_port;
__u8 *d = data;
- __u32 len = (__u32) data_end - (__u32) data;
int err;
if (data + 10 > data_end) {
diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c
index dd8fae6660ab..8056a4c6d918 100644
--- a/tools/testing/selftests/bpf/progs/test_attach_probe.c
+++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c
@@ -4,6 +4,7 @@
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
int kprobe_res = 0;
int kretprobe_res = 0;
@@ -18,7 +19,7 @@ int handle_kprobe(struct pt_regs *ctx)
}
SEC("kretprobe/sys_nanosleep")
-int handle_kretprobe(struct pt_regs *ctx)
+int BPF_KRETPROBE(handle_kretprobe)
{
kretprobe_res = 2;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_cgroup_link.c b/tools/testing/selftests/bpf/progs/test_cgroup_link.c
new file mode 100644
index 000000000000..77e47b9e4446
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_cgroup_link.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+int calls = 0;
+int alt_calls = 0;
+
+SEC("cgroup_skb/egress1")
+int egress(struct __sk_buff *skb)
+{
+ __sync_fetch_and_add(&calls, 1);
+ return 1;
+}
+
+SEC("cgroup_skb/egress2")
+int egress_alt(struct __sk_buff *skb)
+{
+ __sync_fetch_and_add(&alt_calls, 1);
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
+
diff --git a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c
new file mode 100644
index 000000000000..8941a41c2a55
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp_err.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+#define MAX_STACK_RAWTP 10
+
+SEC("raw_tracepoint/sys_enter")
+int bpf_prog2(void *ctx)
+{
+ __u64 stack[MAX_STACK_RAWTP];
+ int error;
+
+ /* set all the flags which should return -EINVAL */
+ error = bpf_get_stack(ctx, stack, 0, -1);
+ if (error < 0)
+ goto loop;
+
+ return error;
+loop:
+ while (1) {
+ error++;
+ }
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_global_data.c b/tools/testing/selftests/bpf/progs/test_global_data.c
index dd7a4d3dbc0d..1319be1c54ba 100644
--- a/tools/testing/selftests/bpf/progs/test_global_data.c
+++ b/tools/testing/selftests/bpf/progs/test_global_data.c
@@ -68,7 +68,7 @@ static struct foo struct3 = {
bpf_map_update_elem(&result_##map, &key, var, 0); \
} while (0)
-SEC("static_data_load")
+SEC("classifier/static_data_load")
int load_static_data(struct __sk_buff *skb)
{
static const __u64 bar = ~0;
diff --git a/tools/testing/selftests/bpf/progs/test_link_pinning.c b/tools/testing/selftests/bpf/progs/test_link_pinning.c
new file mode 100644
index 000000000000..bbf2a5264dc0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_link_pinning.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+int in = 0;
+int out = 0;
+
+SEC("raw_tp/sys_enter")
+int raw_tp_prog(const void *ctx)
+{
+ out = in;
+ return 0;
+}
+
+SEC("tp_btf/sys_enter")
+int tp_btf_prog(const void *ctx)
+{
+ out = in;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c b/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c
new file mode 100644
index 000000000000..1dca70a6de2f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Carlos Neira cneirabustos@gmail.com */
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include <bpf/bpf_helpers.h>
+
+static volatile struct {
+ __u64 dev;
+ __u64 ino;
+ __u64 pid_tgid;
+ __u64 user_pid_tgid;
+} res;
+
+SEC("raw_tracepoint/sys_enter")
+int trace(void *ctx)
+{
+ __u64 ns_pid_tgid, expected_pid;
+ struct bpf_pidns_info nsdata;
+ __u32 key = 0;
+
+ if (bpf_get_ns_current_pid_tgid(res.dev, res.ino, &nsdata,
+ sizeof(struct bpf_pidns_info)))
+ return 0;
+
+ ns_pid_tgid = (__u64)nsdata.tgid << 32 | nsdata.pid;
+ expected_pid = res.user_pid_tgid;
+
+ if (expected_pid != ns_pid_tgid)
+ return 0;
+
+ res.pid_tgid = ns_pid_tgid;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_overhead.c b/tools/testing/selftests/bpf/progs/test_overhead.c
index bfe9fbcb9684..56a50b25cd33 100644
--- a/tools/testing/selftests/bpf/progs/test_overhead.c
+++ b/tools/testing/selftests/bpf/progs/test_overhead.c
@@ -6,7 +6,6 @@
#include <linux/ptrace.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "bpf_trace_helpers.h"
struct task_struct;
@@ -17,11 +16,9 @@ int BPF_KPROBE(prog1, struct task_struct *tsk, const char *buf, bool exec)
}
SEC("kretprobe/__set_task_comm")
-int BPF_KRETPROBE(prog2,
- struct task_struct *tsk, const char *buf, bool exec,
- int ret)
+int BPF_KRETPROBE(prog2, int ret)
{
- return !PT_REGS_PARM1(ctx) && ret;
+ return ret;
}
SEC("raw_tp/task_rename")
diff --git a/tools/testing/selftests/bpf/progs/test_perf_branches.c b/tools/testing/selftests/bpf/progs/test_perf_branches.c
new file mode 100644
index 000000000000..a1ccc831c882
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_perf_branches.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <stddef.h>
+#include <linux/ptrace.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int valid = 0;
+int required_size_out = 0;
+int written_stack_out = 0;
+int written_global_out = 0;
+
+struct {
+ __u64 _a;
+ __u64 _b;
+ __u64 _c;
+} fpbe[30] = {0};
+
+SEC("perf_event")
+int perf_branches(void *ctx)
+{
+ __u64 entries[4 * 3] = {0};
+ int required_size, written_stack, written_global;
+
+ /* write to stack */
+ written_stack = bpf_read_branch_records(ctx, entries, sizeof(entries), 0);
+ /* ignore spurious events */
+ if (!written_stack)
+ return 1;
+
+ /* get required size */
+ required_size = bpf_read_branch_records(ctx, NULL, 0,
+ BPF_F_GET_BRANCH_RECORDS_SIZE);
+
+ written_global = bpf_read_branch_records(ctx, fpbe, sizeof(fpbe), 0);
+ /* ignore spurious events */
+ if (!written_global)
+ return 1;
+
+ required_size_out = required_size;
+ written_stack_out = written_stack;
+ written_global_out = written_global;
+ valid = 1;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_perf_buffer.c b/tools/testing/selftests/bpf/progs/test_perf_buffer.c
index ebfcc9f50c35..ad59c4c9aba8 100644
--- a/tools/testing/selftests/bpf/progs/test_perf_buffer.c
+++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c
@@ -4,7 +4,7 @@
#include <linux/ptrace.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_tracing.h>
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
diff --git a/tools/testing/selftests/bpf/progs/test_probe_user.c b/tools/testing/selftests/bpf/progs/test_probe_user.c
index d556b1572cc6..89b3532ccc75 100644
--- a/tools/testing/selftests/bpf/progs/test_probe_user.c
+++ b/tools/testing/selftests/bpf/progs/test_probe_user.c
@@ -7,7 +7,6 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "bpf_trace_helpers.h"
static struct sockaddr_in old;
diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c
new file mode 100644
index 000000000000..8f530843b4da
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Cloudflare Ltd.
+// Copyright (c) 2020 Isovalent, Inc.
+
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/pkt_cls.h>
+#include <linux/tcp.h>
+#include <sys/socket.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+int _version SEC("version") = 1;
+char _license[] SEC("license") = "GPL";
+
+/* Fill 'tuple' with L3 info, and attempt to find L4. On fail, return NULL. */
+static inline struct bpf_sock_tuple *
+get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+ struct bpf_sock_tuple *result;
+ struct ethhdr *eth;
+ __u64 tuple_len;
+ __u8 proto = 0;
+ __u64 ihl_len;
+
+ eth = (struct ethhdr *)(data);
+ if (eth + 1 > data_end)
+ return NULL;
+
+ if (eth->h_proto == bpf_htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)(data + sizeof(*eth));
+
+ if (iph + 1 > data_end)
+ return NULL;
+ if (iph->ihl != 5)
+ /* Options are not supported */
+ return NULL;
+ ihl_len = iph->ihl * 4;
+ proto = iph->protocol;
+ *ipv4 = true;
+ result = (struct bpf_sock_tuple *)&iph->saddr;
+ } else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)(data + sizeof(*eth));
+
+ if (ip6h + 1 > data_end)
+ return NULL;
+ ihl_len = sizeof(*ip6h);
+ proto = ip6h->nexthdr;
+ *ipv4 = false;
+ result = (struct bpf_sock_tuple *)&ip6h->saddr;
+ } else {
+ return (struct bpf_sock_tuple *)data;
+ }
+
+ if (proto != IPPROTO_TCP && proto != IPPROTO_UDP)
+ return NULL;
+
+ *tcp = (proto == IPPROTO_TCP);
+ return result;
+}
+
+static inline int
+handle_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
+{
+ struct bpf_sock_tuple ln = {0};
+ struct bpf_sock *sk;
+ size_t tuple_len;
+ int ret;
+
+ tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
+ if ((void *)tuple + tuple_len > (void *)(long)skb->data_end)
+ return TC_ACT_SHOT;
+
+ sk = bpf_sk_lookup_udp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0);
+ if (sk)
+ goto assign;
+
+ if (ipv4) {
+ if (tuple->ipv4.dport != bpf_htons(4321))
+ return TC_ACT_OK;
+
+ ln.ipv4.daddr = bpf_htonl(0x7f000001);
+ ln.ipv4.dport = bpf_htons(1234);
+
+ sk = bpf_sk_lookup_udp(skb, &ln, sizeof(ln.ipv4),
+ BPF_F_CURRENT_NETNS, 0);
+ } else {
+ if (tuple->ipv6.dport != bpf_htons(4321))
+ return TC_ACT_OK;
+
+ /* Upper parts of daddr are already zero. */
+ ln.ipv6.daddr[3] = bpf_htonl(0x1);
+ ln.ipv6.dport = bpf_htons(1234);
+
+ sk = bpf_sk_lookup_udp(skb, &ln, sizeof(ln.ipv6),
+ BPF_F_CURRENT_NETNS, 0);
+ }
+
+ /* workaround: We can't do a single socket lookup here, because then
+ * the compiler will likely spill tuple_len to the stack. This makes it
+ * lose all bounds information in the verifier, which then rejects the
+ * call as unsafe.
+ */
+ if (!sk)
+ return TC_ACT_SHOT;
+
+assign:
+ ret = bpf_sk_assign(skb, sk, 0);
+ bpf_sk_release(sk);
+ return ret;
+}
+
+static inline int
+handle_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
+{
+ struct bpf_sock_tuple ln = {0};
+ struct bpf_sock *sk;
+ size_t tuple_len;
+ int ret;
+
+ tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
+ if ((void *)tuple + tuple_len > (void *)(long)skb->data_end)
+ return TC_ACT_SHOT;
+
+ sk = bpf_skc_lookup_tcp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0);
+ if (sk) {
+ if (sk->state != BPF_TCP_LISTEN)
+ goto assign;
+ bpf_sk_release(sk);
+ }
+
+ if (ipv4) {
+ if (tuple->ipv4.dport != bpf_htons(4321))
+ return TC_ACT_OK;
+
+ ln.ipv4.daddr = bpf_htonl(0x7f000001);
+ ln.ipv4.dport = bpf_htons(1234);
+
+ sk = bpf_skc_lookup_tcp(skb, &ln, sizeof(ln.ipv4),
+ BPF_F_CURRENT_NETNS, 0);
+ } else {
+ if (tuple->ipv6.dport != bpf_htons(4321))
+ return TC_ACT_OK;
+
+ /* Upper parts of daddr are already zero. */
+ ln.ipv6.daddr[3] = bpf_htonl(0x1);
+ ln.ipv6.dport = bpf_htons(1234);
+
+ sk = bpf_skc_lookup_tcp(skb, &ln, sizeof(ln.ipv6),
+ BPF_F_CURRENT_NETNS, 0);
+ }
+
+ /* workaround: We can't do a single socket lookup here, because then
+ * the compiler will likely spill tuple_len to the stack. This makes it
+ * lose all bounds information in the verifier, which then rejects the
+ * call as unsafe.
+ */
+ if (!sk)
+ return TC_ACT_SHOT;
+
+ if (sk->state != BPF_TCP_LISTEN) {
+ bpf_sk_release(sk);
+ return TC_ACT_SHOT;
+ }
+
+assign:
+ ret = bpf_sk_assign(skb, sk, 0);
+ bpf_sk_release(sk);
+ return ret;
+}
+
+SEC("classifier/sk_assign_test")
+int bpf_sk_assign_test(struct __sk_buff *skb)
+{
+ struct bpf_sock_tuple *tuple, ln = {0};
+ bool ipv4 = false;
+ bool tcp = false;
+ int tuple_len;
+ int ret = 0;
+
+ tuple = get_tuple(skb, &ipv4, &tcp);
+ if (!tuple)
+ return TC_ACT_SHOT;
+
+ /* Note that the verifier socket return type for bpf_skc_lookup_tcp()
+ * differs from bpf_sk_lookup_udp(), so even though the C-level type is
+ * the same here, if we try to share the implementations they will
+ * fail to verify because we're crossing pointer types.
+ */
+ if (tcp)
+ ret = handle_tcp(skb, tuple, ipv4);
+ else
+ ret = handle_udp(skb, tuple, ipv4);
+
+ return ret == 0 ? TC_ACT_OK : TC_ACT_SHOT;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
index 202de3938494..b02ea589ce7e 100644
--- a/tools/testing/selftests/bpf/progs/test_skb_ctx.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
@@ -23,6 +23,8 @@ int process(struct __sk_buff *skb)
return 1;
if (skb->gso_segs != 8)
return 1;
+ if (skb->gso_size != 10)
+ return 1;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
new file mode 100644
index 000000000000..a3a366c57ce1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Cloudflare
+
+#include <errno.h>
+#include <stdbool.h>
+#include <linux/bpf.h>
+
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u64);
+} sock_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKHASH);
+ __uint(max_entries, 2);
+ __type(key, __u32);
+ __type(value, __u64);
+} sock_hash SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 2);
+ __type(key, int);
+ __type(value, unsigned int);
+} verdict_map SEC(".maps");
+
+static volatile bool test_sockmap; /* toggled by user-space */
+
+SEC("sk_skb/stream_parser")
+int prog_skb_parser(struct __sk_buff *skb)
+{
+ return skb->len;
+}
+
+SEC("sk_skb/stream_verdict")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ unsigned int *count;
+ __u32 zero = 0;
+ int verdict;
+
+ if (test_sockmap)
+ verdict = bpf_sk_redirect_map(skb, &sock_map, zero, 0);
+ else
+ verdict = bpf_sk_redirect_hash(skb, &sock_hash, &zero, 0);
+
+ count = bpf_map_lookup_elem(&verdict_map, &verdict);
+ if (count)
+ (*count)++;
+
+ return verdict;
+}
+
+SEC("sk_msg")
+int prog_msg_verdict(struct sk_msg_md *msg)
+{
+ unsigned int *count;
+ __u32 zero = 0;
+ int verdict;
+
+ if (test_sockmap)
+ verdict = bpf_msg_redirect_map(msg, &sock_map, zero, 0);
+ else
+ verdict = bpf_msg_redirect_hash(msg, &sock_hash, &zero, 0);
+
+ count = bpf_map_lookup_elem(&verdict_map, &verdict);
+ if (count)
+ (*count)++;
+
+ return verdict;
+}
+
+SEC("sk_reuseport")
+int prog_reuseport(struct sk_reuseport_md *reuse)
+{
+ unsigned int *count;
+ int err, verdict;
+ __u32 zero = 0;
+
+ if (test_sockmap)
+ err = bpf_sk_select_reuseport(reuse, &sock_map, &zero, 0);
+ else
+ err = bpf_sk_select_reuseport(reuse, &sock_hash, &zero, 0);
+ verdict = err ? SK_DROP : SK_PASS;
+
+ count = bpf_map_lookup_elem(&verdict_map, &verdict);
+ if (count)
+ (*count)++;
+
+ return verdict;
+}
+
+int _version SEC("version") = 1;
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_trampoline_count.c b/tools/testing/selftests/bpf/progs/test_trampoline_count.c
index e51e6e3a81c2..f030e469d05b 100644
--- a/tools/testing/selftests/bpf/progs/test_trampoline_count.c
+++ b/tools/testing/selftests/bpf/progs/test_trampoline_count.c
@@ -2,7 +2,8 @@
#include <stdbool.h>
#include <stddef.h>
#include <linux/bpf.h>
-#include "bpf_trace_helpers.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
struct task_struct;
diff --git a/tools/testing/selftests/bpf/progs/test_vmlinux.c b/tools/testing/selftests/bpf/progs/test_vmlinux.c
new file mode 100644
index 000000000000..5611b564d3b1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_vmlinux.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include "vmlinux.h"
+#include <asm/unistd.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+#define MY_TV_NSEC 1337
+
+bool tp_called = false;
+bool raw_tp_called = false;
+bool tp_btf_called = false;
+bool kprobe_called = false;
+bool fentry_called = false;
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int handle__tp(struct trace_event_raw_sys_enter *args)
+{
+ struct __kernel_timespec *ts;
+
+ if (args->id != __NR_nanosleep)
+ return 0;
+
+ ts = (void *)args->args[0];
+ if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
+ return 0;
+
+ tp_called = true;
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int BPF_PROG(handle__raw_tp, struct pt_regs *regs, long id)
+{
+ struct __kernel_timespec *ts;
+
+ if (id != __NR_nanosleep)
+ return 0;
+
+ ts = (void *)PT_REGS_PARM1_CORE(regs);
+ if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
+ return 0;
+
+ raw_tp_called = true;
+ return 0;
+}
+
+SEC("tp_btf/sys_enter")
+int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id)
+{
+ struct __kernel_timespec *ts;
+
+ if (id != __NR_nanosleep)
+ return 0;
+
+ ts = (void *)PT_REGS_PARM1_CORE(regs);
+ if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
+ return 0;
+
+ tp_btf_called = true;
+ return 0;
+}
+
+SEC("kprobe/hrtimer_nanosleep")
+int BPF_KPROBE(handle__kprobe,
+ ktime_t rqtp, enum hrtimer_mode mode, clockid_t clockid)
+{
+ if (rqtp == MY_TV_NSEC)
+ kprobe_called = true;
+ return 0;
+}
+
+SEC("fentry/hrtimer_nanosleep")
+int BPF_PROG(handle__fentry,
+ ktime_t rqtp, enum hrtimer_mode mode, clockid_t clockid)
+{
+ if (rqtp == MY_TV_NSEC)
+ fentry_called = true;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
index cb8a04ab7a78..a038e827f850 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
+#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
-#include "bpf_trace_helpers.h"
+
+char _license[] SEC("license") = "GPL";
struct net_device {
/* Structure does not need to contain all entries,
@@ -27,16 +29,38 @@ struct xdp_buff {
struct xdp_rxq_info *rxq;
} __attribute__((preserve_access_index));
+struct meta {
+ int ifindex;
+ int pkt_len;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+} perf_buf_map SEC(".maps");
+
__u64 test_result_fentry = 0;
-SEC("fentry/_xdp_tx_iptunnel")
+SEC("fentry/FUNC")
int BPF_PROG(trace_on_entry, struct xdp_buff *xdp)
{
+ struct meta meta;
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+
+ meta.ifindex = xdp->rxq->dev->ifindex;
+ meta.pkt_len = data_end - data;
+ bpf_xdp_output(xdp, &perf_buf_map,
+ ((__u64) meta.pkt_len << 32) |
+ BPF_F_CURRENT_CPU,
+ &meta, sizeof(meta));
+
test_result_fentry = xdp->rxq->dev->ifindex;
return 0;
}
__u64 test_result_fexit = 0;
-SEC("fexit/_xdp_tx_iptunnel")
+SEC("fexit/FUNC")
int BPF_PROG(trace_on_exit, struct xdp_buff *xdp, int ret)
{
test_result_fexit = ret;
diff --git a/tools/testing/selftests/bpf/test_bpftool.py b/tools/testing/selftests/bpf/test_bpftool.py
new file mode 100644
index 000000000000..4fed2dc25c0a
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_bpftool.py
@@ -0,0 +1,178 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2020 SUSE LLC.
+
+import collections
+import functools
+import json
+import os
+import socket
+import subprocess
+import unittest
+
+
+# Add the source tree of bpftool and /usr/local/sbin to PATH
+cur_dir = os.path.dirname(os.path.realpath(__file__))
+bpftool_dir = os.path.abspath(os.path.join(cur_dir, "..", "..", "..", "..",
+ "tools", "bpf", "bpftool"))
+os.environ["PATH"] = bpftool_dir + ":/usr/local/sbin:" + os.environ["PATH"]
+
+
+class IfaceNotFoundError(Exception):
+ pass
+
+
+class UnprivilegedUserError(Exception):
+ pass
+
+
+def _bpftool(args, json=True):
+ _args = ["bpftool"]
+ if json:
+ _args.append("-j")
+ _args.extend(args)
+
+ return subprocess.check_output(_args)
+
+
+def bpftool(args):
+ return _bpftool(args, json=False).decode("utf-8")
+
+
+def bpftool_json(args):
+ res = _bpftool(args)
+ return json.loads(res)
+
+
+def get_default_iface():
+ for iface in socket.if_nameindex():
+ if iface[1] != "lo":
+ return iface[1]
+ raise IfaceNotFoundError("Could not find any network interface to probe")
+
+
+def default_iface(f):
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ iface = get_default_iface()
+ return f(*args, iface, **kwargs)
+ return wrapper
+
+
+class TestBpftool(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ if os.getuid() != 0:
+ raise UnprivilegedUserError(
+ "This test suite needs root privileges")
+
+ @default_iface
+ def test_feature_dev_json(self, iface):
+ unexpected_helpers = [
+ "bpf_probe_write_user",
+ "bpf_trace_printk",
+ ]
+ expected_keys = [
+ "syscall_config",
+ "program_types",
+ "map_types",
+ "helpers",
+ "misc",
+ ]
+
+ res = bpftool_json(["feature", "probe", "dev", iface])
+ # Check if the result has all expected keys.
+ self.assertCountEqual(res.keys(), expected_keys)
+ # Check if unexpected helpers are not included in helpers probes
+ # result.
+ for helpers in res["helpers"].values():
+ for unexpected_helper in unexpected_helpers:
+ self.assertNotIn(unexpected_helper, helpers)
+
+ def test_feature_kernel(self):
+ test_cases = [
+ bpftool_json(["feature", "probe", "kernel"]),
+ bpftool_json(["feature", "probe"]),
+ bpftool_json(["feature"]),
+ ]
+ unexpected_helpers = [
+ "bpf_probe_write_user",
+ "bpf_trace_printk",
+ ]
+ expected_keys = [
+ "syscall_config",
+ "system_config",
+ "program_types",
+ "map_types",
+ "helpers",
+ "misc",
+ ]
+
+ for tc in test_cases:
+ # Check if the result has all expected keys.
+ self.assertCountEqual(tc.keys(), expected_keys)
+ # Check if unexpected helpers are not included in helpers probes
+ # result.
+ for helpers in tc["helpers"].values():
+ for unexpected_helper in unexpected_helpers:
+ self.assertNotIn(unexpected_helper, helpers)
+
+ def test_feature_kernel_full(self):
+ test_cases = [
+ bpftool_json(["feature", "probe", "kernel", "full"]),
+ bpftool_json(["feature", "probe", "full"]),
+ ]
+ expected_helpers = [
+ "bpf_probe_write_user",
+ "bpf_trace_printk",
+ ]
+
+ for tc in test_cases:
+ # Check if expected helpers are included at least once in any
+ # helpers list for any program type. Unfortunately we cannot assume
+ # that they will be included in all program types or a specific
+ # subset of programs. It depends on the kernel version and
+ # configuration.
+ found_helpers = False
+
+ for helpers in tc["helpers"].values():
+ if all(expected_helper in helpers
+ for expected_helper in expected_helpers):
+ found_helpers = True
+ break
+
+ self.assertTrue(found_helpers)
+
+ def test_feature_kernel_full_vs_not_full(self):
+ full_res = bpftool_json(["feature", "probe", "full"])
+ not_full_res = bpftool_json(["feature", "probe"])
+ not_full_set = set()
+ full_set = set()
+
+ for helpers in full_res["helpers"].values():
+ for helper in helpers:
+ full_set.add(helper)
+
+ for helpers in not_full_res["helpers"].values():
+ for helper in helpers:
+ not_full_set.add(helper)
+
+ self.assertCountEqual(full_set - not_full_set,
+ {"bpf_probe_write_user", "bpf_trace_printk"})
+ self.assertCountEqual(not_full_set - full_set, set())
+
+ def test_feature_macros(self):
+ expected_patterns = [
+ r"/\*\*\* System call availability \*\*\*/",
+ r"#define HAVE_BPF_SYSCALL",
+ r"/\*\*\* eBPF program types \*\*\*/",
+ r"#define HAVE.*PROG_TYPE",
+ r"/\*\*\* eBPF map types \*\*\*/",
+ r"#define HAVE.*MAP_TYPE",
+ r"/\*\*\* eBPF helper functions \*\*\*/",
+ r"#define HAVE.*HELPER",
+ r"/\*\*\* eBPF misc features \*\*\*/",
+ ]
+
+ res = bpftool(["feature", "probe", "macros"])
+ for pattern in expected_patterns:
+ self.assertRegex(res, pattern)
diff --git a/tools/testing/selftests/bpf/test_bpftool.sh b/tools/testing/selftests/bpf/test_bpftool.sh
new file mode 100755
index 000000000000..66690778e36d
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_bpftool.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2020 SUSE LLC.
+
+python3 -m unittest -v test_bpftool.TestBpftool
diff --git a/tools/testing/selftests/bpf/test_current_pid_tgid_new_ns.c b/tools/testing/selftests/bpf/test_current_pid_tgid_new_ns.c
new file mode 100644
index 000000000000..ed253f252cd0
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_current_pid_tgid_new_ns.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Carlos Neira cneirabustos@gmail.com */
+#define _GNU_SOURCE
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sched.h>
+#include <sys/wait.h>
+#include <sys/mount.h>
+#include "test_progs.h"
+
+#define CHECK_NEWNS(condition, tag, format...) ({ \
+ int __ret = !!(condition); \
+ if (__ret) { \
+ printf("%s:FAIL:%s ", __func__, tag); \
+ printf(format); \
+ } else { \
+ printf("%s:PASS:%s\n", __func__, tag); \
+ } \
+ __ret; \
+})
+
+struct bss {
+ __u64 dev;
+ __u64 ino;
+ __u64 pid_tgid;
+ __u64 user_pid_tgid;
+};
+
+int main(int argc, char **argv)
+{
+ pid_t pid;
+ int exit_code = 1;
+ struct stat st;
+
+ printf("Testing bpf_get_ns_current_pid_tgid helper in new ns\n");
+
+ if (stat("/proc/self/ns/pid", &st)) {
+ perror("stat failed on /proc/self/ns/pid ns\n");
+ printf("%s:FAILED\n", argv[0]);
+ return exit_code;
+ }
+
+ if (CHECK_NEWNS(unshare(CLONE_NEWPID | CLONE_NEWNS),
+ "unshare CLONE_NEWPID | CLONE_NEWNS", "error errno=%d\n", errno))
+ return exit_code;
+
+ pid = fork();
+ if (pid == -1) {
+ perror("Fork() failed\n");
+ printf("%s:FAILED\n", argv[0]);
+ return exit_code;
+ }
+
+ if (pid > 0) {
+ int status;
+
+ usleep(5);
+ waitpid(pid, &status, 0);
+ return 0;
+ } else {
+
+ pid = fork();
+ if (pid == -1) {
+ perror("Fork() failed\n");
+ printf("%s:FAILED\n", argv[0]);
+ return exit_code;
+ }
+
+ if (pid > 0) {
+ int status;
+ waitpid(pid, &status, 0);
+ return 0;
+ } else {
+ if (CHECK_NEWNS(mount("none", "/proc", NULL, MS_PRIVATE|MS_REC, NULL),
+ "Unmounting proc", "Cannot umount proc! errno=%d\n", errno))
+ return exit_code;
+
+ if (CHECK_NEWNS(mount("proc", "/proc", "proc", MS_NOSUID|MS_NOEXEC|MS_NODEV, NULL),
+ "Mounting proc", "Cannot mount proc! errno=%d\n", errno))
+ return exit_code;
+
+ const char *probe_name = "raw_tracepoint/sys_enter";
+ const char *file = "test_ns_current_pid_tgid.o";
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
+ struct bpf_map *bss_map;
+ struct bpf_object *obj;
+ int exit_code = 1;
+ int err, key = 0;
+ struct bss bss;
+ struct stat st;
+ __u64 id;
+
+ obj = bpf_object__open_file(file, NULL);
+ if (CHECK_NEWNS(IS_ERR(obj), "obj_open", "err %ld\n", PTR_ERR(obj)))
+ return exit_code;
+
+ err = bpf_object__load(obj);
+ if (CHECK_NEWNS(err, "obj_load", "err %d errno %d\n", err, errno))
+ goto cleanup;
+
+ bss_map = bpf_object__find_map_by_name(obj, "test_ns_.bss");
+ if (CHECK_NEWNS(!bss_map, "find_bss_map", "failed\n"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_title(obj, probe_name);
+ if (CHECK_NEWNS(!prog, "find_prog", "prog '%s' not found\n",
+ probe_name))
+ goto cleanup;
+
+ memset(&bss, 0, sizeof(bss));
+ pid_t tid = syscall(SYS_gettid);
+ pid_t pid = getpid();
+
+ id = (__u64) tid << 32 | pid;
+ bss.user_pid_tgid = id;
+
+ if (CHECK_NEWNS(stat("/proc/self/ns/pid", &st),
+ "stat new ns", "Failed to stat /proc/self/ns/pid errno=%d\n", errno))
+ goto cleanup;
+
+ bss.dev = st.st_dev;
+ bss.ino = st.st_ino;
+
+ err = bpf_map_update_elem(bpf_map__fd(bss_map), &key, &bss, 0);
+ if (CHECK_NEWNS(err, "setting_bss", "failed to set bss : %d\n", err))
+ goto cleanup;
+
+ link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
+ if (CHECK_NEWNS(IS_ERR(link), "attach_raw_tp", "err %ld\n",
+ PTR_ERR(link))) {
+ link = NULL;
+ goto cleanup;
+ }
+
+ /* trigger some syscalls */
+ usleep(1);
+
+ err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &key, &bss);
+ if (CHECK_NEWNS(err, "set_bss", "failed to get bss : %d\n", err))
+ goto cleanup;
+
+ if (CHECK_NEWNS(id != bss.pid_tgid, "Compare user pid/tgid vs. bpf pid/tgid",
+ "User pid/tgid %llu BPF pid/tgid %llu\n", id, bss.pid_tgid))
+ goto cleanup;
+
+ exit_code = 0;
+ printf("%s:PASS\n", argv[0]);
+cleanup:
+ if (!link) {
+ bpf_link__destroy(link);
+ link = NULL;
+ }
+ bpf_object__close(obj);
+ }
+ }
+}
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 02eae1e864c2..c6766b2cff85 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -756,11 +756,7 @@ static void test_sockmap(unsigned int tasks, void *data)
/* Test update without programs */
for (i = 0; i < 6; i++) {
err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
- if (i < 2 && !err) {
- printf("Allowed update sockmap '%i:%i' not in ESTABLISHED\n",
- i, sfd[i]);
- goto out_sockmap;
- } else if (i >= 2 && err) {
+ if (err) {
printf("Failed noprog update sockmap '%i:%i'\n",
i, sfd[i]);
goto out_sockmap;
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index bab1e6f1d8f1..b521e0a512b6 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -1,11 +1,16 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 Facebook
*/
+#define _GNU_SOURCE
#include "test_progs.h"
#include "cgroup_helpers.h"
#include "bpf_rlimit.h"
#include <argp.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
#include <string.h>
+#include <execinfo.h> /* backtrace */
/* defined in test_progs.h */
struct test_env env = {};
@@ -27,6 +32,20 @@ struct prog_test_def {
int old_error_cnt;
};
+/* Override C runtime library's usleep() implementation to ensure nanosleep()
+ * is always called. Usleep is frequently used in selftests as a way to
+ * trigger kprobe and tracepoints.
+ */
+int usleep(useconds_t usec)
+{
+ struct timespec ts = {
+ .tv_sec = usec / 1000000,
+ .tv_nsec = (usec % 1000000) * 1000,
+ };
+
+ return syscall(__NR_nanosleep, &ts, NULL);
+}
+
static bool should_run(struct test_selector *sel, int num, const char *name)
{
int i;
@@ -74,6 +93,34 @@ static void skip_account(void)
}
}
+static void stdio_restore(void);
+
+/* A bunch of tests set custom affinity per-thread and/or per-process. Reset
+ * it after each test/sub-test.
+ */
+static void reset_affinity() {
+
+ cpu_set_t cpuset;
+ int i, err;
+
+ CPU_ZERO(&cpuset);
+ for (i = 0; i < env.nr_cpus; i++)
+ CPU_SET(i, &cpuset);
+
+ err = sched_setaffinity(0, sizeof(cpuset), &cpuset);
+ if (err < 0) {
+ stdio_restore();
+ fprintf(stderr, "Failed to reset process affinity: %d!\n", err);
+ exit(-1);
+ }
+ err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
+ if (err < 0) {
+ stdio_restore();
+ fprintf(stderr, "Failed to reset thread affinity: %d!\n", err);
+ exit(-1);
+ }
+}
+
void test__end_subtest()
{
struct prog_test_def *test = env.test;
@@ -91,6 +138,8 @@ void test__end_subtest()
test->test_num, test->subtest_num,
test->subtest_name, sub_error_cnt ? "FAIL" : "OK");
+ reset_affinity();
+
free(test->subtest_name);
test->subtest_name = NULL;
}
@@ -196,7 +245,7 @@ int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
map = bpf_object__find_map_by_name(obj, name);
if (!map) {
- printf("%s:FAIL:map '%s' not found\n", test, name);
+ fprintf(stdout, "%s:FAIL:map '%s' not found\n", test, name);
test__fail();
return -1;
}
@@ -367,7 +416,7 @@ static int libbpf_print_fn(enum libbpf_print_level level,
{
if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
return 0;
- vprintf(format, args);
+ vfprintf(stdout, format, args);
return 0;
}
@@ -408,7 +457,7 @@ err:
int parse_num_list(const char *s, struct test_selector *sel)
{
- int i, set_len = 0, num, start = 0, end = -1;
+ int i, set_len = 0, new_len, num, start = 0, end = -1;
bool *set = NULL, *tmp, parsing_end = false;
char *next;
@@ -443,18 +492,19 @@ int parse_num_list(const char *s, struct test_selector *sel)
return -EINVAL;
if (end + 1 > set_len) {
- set_len = end + 1;
- tmp = realloc(set, set_len);
+ new_len = end + 1;
+ tmp = realloc(set, new_len);
if (!tmp) {
free(set);
return -ENOMEM;
}
+ for (i = set_len; i < start; i++)
+ tmp[i] = false;
set = tmp;
+ set_len = new_len;
}
- for (i = start; i <= end; i++) {
+ for (i = start; i <= end; i++)
set[i] = true;
- }
-
}
if (!set)
@@ -613,10 +663,27 @@ int cd_flavor_subdir(const char *exec_name)
if (!flavor)
return 0;
flavor++;
- printf("Switching to flavor '%s' subdirectory...\n", flavor);
+ fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor);
return chdir(flavor);
}
+#define MAX_BACKTRACE_SZ 128
+void crash_handler(int signum)
+{
+ void *bt[MAX_BACKTRACE_SZ];
+ size_t sz;
+
+ sz = backtrace(bt, ARRAY_SIZE(bt));
+
+ if (env.test)
+ dump_test_log(env.test, true);
+ if (env.stdout)
+ stdio_restore();
+
+ fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum);
+ backtrace_symbols_fd(bt, sz, STDERR_FILENO);
+}
+
int main(int argc, char **argv)
{
static const struct argp argp = {
@@ -624,8 +691,14 @@ int main(int argc, char **argv)
.parser = parse_arg,
.doc = argp_program_doc,
};
+ struct sigaction sigact = {
+ .sa_handler = crash_handler,
+ .sa_flags = SA_RESETHAND,
+ };
int err, i;
+ sigaction(SIGSEGV, &sigact, NULL);
+
err = argp_parse(&argp, argc, argv, 0, NULL, &env);
if (err)
return err;
@@ -639,6 +712,12 @@ int main(int argc, char **argv)
srand(time(NULL));
env.jit_enabled = is_jit_enabled();
+ env.nr_cpus = libbpf_num_possible_cpus();
+ if (env.nr_cpus < 0) {
+ fprintf(stderr, "Failed to get number of CPUs: %d!\n",
+ env.nr_cpus);
+ return -1;
+ }
stdio_hijack();
for (i = 0; i < prog_test_cnt; i++) {
@@ -669,12 +748,13 @@ int main(int argc, char **argv)
test->test_num, test->test_name,
test->error_cnt ? "FAIL" : "OK");
+ reset_affinity();
if (test->need_cgroup_cleanup)
cleanup_cgroup_environment();
}
stdio_restore();
- printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
- env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt);
+ fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
+ env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt);
free(env.test_selector.blacklist.strs);
free(env.test_selector.whitelist.strs);
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index bcfa9ef23fda..f4aff6b8284b 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -71,6 +71,7 @@ struct test_env {
FILE *stderr;
char *log_buf;
size_t log_cnt;
+ int nr_cpus;
int succ_cnt; /* successful tests */
int sub_succ_cnt; /* successful sub-tests */
@@ -109,10 +110,10 @@ extern struct ipv6_packet pkt_v6;
int __save_errno = errno; \
if (__ret) { \
test__fail(); \
- printf("%s:FAIL:%s ", __func__, tag); \
- printf(format); \
+ fprintf(stdout, "%s:FAIL:%s ", __func__, tag); \
+ fprintf(stdout, ##format); \
} else { \
- printf("%s:PASS:%s %d nsec\n", \
+ fprintf(stdout, "%s:PASS:%s %d nsec\n", \
__func__, tag, duration); \
} \
errno = __save_errno; \
@@ -124,7 +125,7 @@ extern struct ipv6_packet pkt_v6;
int __save_errno = errno; \
if (__ret) { \
test__fail(); \
- printf("%s:FAIL:%d\n", __func__, __LINE__); \
+ fprintf(stdout, "%s:FAIL:%d\n", __func__, __LINE__); \
} \
errno = __save_errno; \
__ret; \
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 7f989b3e4e22..4d0e913bbb22 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -4,12 +4,15 @@
#include <string.h>
#include <assert.h>
#include <errno.h>
+#include <fcntl.h>
#include <poll.h>
#include <unistd.h>
#include <linux/perf_event.h>
#include <sys/mman.h>
#include "trace_helpers.h"
+#define DEBUGFS "/sys/kernel/debug/tracing/"
+
#define MAX_SYMS 300000
static struct ksym syms[MAX_SYMS];
static int sym_cnt;
@@ -86,3 +89,23 @@ long ksym_get_addr(const char *name)
return 0;
}
+
+void read_trace_pipe(void)
+{
+ int trace_fd;
+
+ trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0);
+ if (trace_fd < 0)
+ return;
+
+ while (1) {
+ static char buf[4096];
+ ssize_t sz;
+
+ sz = read(trace_fd, buf, sizeof(buf) - 1);
+ if (sz > 0) {
+ buf[sz] = 0;
+ puts(buf);
+ }
+ }
+}
diff --git a/tools/testing/selftests/bpf/trace_helpers.h b/tools/testing/selftests/bpf/trace_helpers.h
index 0383c9b8adc1..25ef597dd03f 100644
--- a/tools/testing/selftests/bpf/trace_helpers.h
+++ b/tools/testing/selftests/bpf/trace_helpers.h
@@ -12,5 +12,6 @@ struct ksym {
int load_kallsyms(void);
struct ksym *ksym_search(long key);
long ksym_get_addr(const char *name);
+void read_trace_pipe(void);
#endif
diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c
index d55f476f2237..4d0d09574bf4 100644
--- a/tools/testing/selftests/bpf/verifier/bounds.c
+++ b/tools/testing/selftests/bpf/verifier/bounds.c
@@ -257,17 +257,15 @@
* [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
*/
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* no-op or OOB pointer computation */
+ /* error on OOB pointer computation */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
/* not actually fully unbounded, but the bound is very high */
- .errstr = "R0 unbounded memory access",
+ .errstr = "value 72057594021150720 makes map_value pointer be out of bounds",
.result = REJECT
},
{
@@ -299,17 +297,15 @@
* [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
*/
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* no-op or OOB pointer computation */
+ /* error on OOB pointer computation */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
/* not actually fully unbounded, but the bound is very high */
- .errstr = "R0 unbounded memory access",
+ .errstr = "value 72057594021150720 makes map_value pointer be out of bounds",
.result = REJECT
},
{
@@ -411,16 +407,14 @@
BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
/* r1 = 0xffff'fffe (NOT 0!) */
BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
- /* computes OOB pointer */
+ /* error on computing OOB pointer */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
- .errstr = "R0 invalid mem access",
+ .errstr = "math between map_value pointer and 4294967294 is not allowed",
.result = REJECT,
},
{
@@ -506,3 +500,42 @@
.errstr = "map_value pointer and 1000000000000",
.result = REJECT
},
+{
+ "bounds check mixed 32bit and 64bit arithmatic. test1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ /* r1 = 0xffffFFFF00000001 */
+ BPF_JMP32_IMM(BPF_JGT, BPF_REG_1, 1, 3),
+ /* check ALU64 op keeps 32bit bounds */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_JMP32_IMM(BPF_JGT, BPF_REG_1, 2, 1),
+ BPF_JMP_A(1),
+ /* invalid ldx if bounds are lost above */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT
+},
+{
+ "bounds check mixed 32bit and 64bit arithmatic. test2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ /* r1 = 0xffffFFFF00000001 */
+ BPF_MOV64_IMM(BPF_REG_2, 3),
+ /* r1 = 0x2 */
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
+ /* check ALU32 op zero extends 64bit bounds */
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 1),
+ BPF_JMP_A(1),
+ /* invalid ldx if bounds are lost above */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT
+},
diff --git a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
index f24d50f09dbe..69b048cf46d9 100644
--- a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
+++ b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
@@ -9,17 +9,17 @@
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
+ BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)/2),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)/2),
BPF_MOV64_IMM(BPF_REG_4, 256),
BPF_EMIT_CALL(BPF_FUNC_get_stack),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
- BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_1, BPF_REG_8, 16),
BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
@@ -29,7 +29,7 @@
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
+ BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)/2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
diff --git a/tools/testing/selftests/bpf/verifier/ctx.c b/tools/testing/selftests/bpf/verifier/ctx.c
index 92762c08f5e3..93d6b1641481 100644
--- a/tools/testing/selftests/bpf/verifier/ctx.c
+++ b/tools/testing/selftests/bpf/verifier/ctx.c
@@ -91,3 +91,108 @@
.result = REJECT,
.errstr = "variable ctx access var_off=(0x0; 0x4)",
},
+{
+ "pass ctx or null check, 1: ctx",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_netns_cookie),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ .expected_attach_type = BPF_CGROUP_UDP6_SENDMSG,
+ .result = ACCEPT,
+},
+{
+ "pass ctx or null check, 2: null",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_netns_cookie),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ .expected_attach_type = BPF_CGROUP_UDP6_SENDMSG,
+ .result = ACCEPT,
+},
+{
+ "pass ctx or null check, 3: 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_netns_cookie),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ .expected_attach_type = BPF_CGROUP_UDP6_SENDMSG,
+ .result = REJECT,
+ .errstr = "R1 type=inv expected=ctx",
+},
+{
+ "pass ctx or null check, 4: ctx - const",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_netns_cookie),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ .expected_attach_type = BPF_CGROUP_UDP6_SENDMSG,
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+},
+{
+ "pass ctx or null check, 5: null (connect)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_netns_cookie),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ .expected_attach_type = BPF_CGROUP_INET4_CONNECT,
+ .result = ACCEPT,
+},
+{
+ "pass ctx or null check, 6: null (bind)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_netns_cookie),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+ .expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .result = ACCEPT,
+},
+{
+ "pass ctx or null check, 7: ctx (bind)",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_socket_cookie),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+ .expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .result = ACCEPT,
+},
+{
+ "pass ctx or null check, 8: null (bind)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_socket_cookie),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+ .expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
+ .result = REJECT,
+ .errstr = "R1 type=inv expected=ctx",
+},
diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c
index d438193804b2..2e16b8e268f2 100644
--- a/tools/testing/selftests/bpf/verifier/ctx_skb.c
+++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c
@@ -1011,6 +1011,53 @@
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "read gso_size from CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, gso_size)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "read gso_size from CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, gso_size)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "write gso_size from CGROUP_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, gso_size)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ .errstr = "invalid bpf_context access off=176 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "read gso_size from CLS",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, gso_size)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
"check wire_len is not readable by sockets",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
diff --git a/tools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh b/tools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh
index 5ba5bef44d5b..bdffe698e1d1 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh
@@ -45,6 +45,7 @@ ALL_TESTS="
blackhole_ipv6
"
NUM_NETIFS=4
+: ${TIMEOUT:=20000} # ms
source $lib_dir/tc_common.sh
source $lib_dir/lib.sh
@@ -123,7 +124,7 @@ blackhole_ipv4()
skip_hw dst_ip 198.51.100.1 src_ip 192.0.2.1 ip_proto icmp \
action pass
- ip -4 route show 198.51.100.0/30 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload ip -4 route show 198.51.100.0/30
check_err $? "route not marked as offloaded when should"
ping_do $h1 198.51.100.1
@@ -147,7 +148,7 @@ blackhole_ipv6()
skip_hw dst_ip 2001:db8:2::1 src_ip 2001:db8:1::1 \
ip_proto icmpv6 action pass
- ip -6 route show 2001:db8:2::/120 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload ip -6 route show 2001:db8:2::/120
check_err $? "route not marked as offloaded when should"
ping6_do $h1 2001:db8:2::1
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_acl_drops.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_acl_drops.sh
new file mode 100755
index 000000000000..26044e397157
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_acl_drops.sh
@@ -0,0 +1,151 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test devlink-trap ACL drops functionality over mlxsw.
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ ingress_flow_action_drop_test
+ egress_flow_action_drop_test
+"
+NUM_NETIFS=4
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+
+h1_create()
+{
+ simple_if_init $h1
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1
+}
+
+h2_create()
+{
+ simple_if_init $h2
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2
+}
+
+switch_create()
+{
+ ip link add dev br0 type bridge vlan_filtering 1 mcast_snooping 0
+
+ ip link set dev $swp1 master br0
+ ip link set dev $swp2 master br0
+
+ ip link set dev br0 up
+ ip link set dev $swp1 up
+ ip link set dev $swp2 up
+
+ tc qdisc add dev $swp1 clsact
+ tc qdisc add dev $swp2 clsact
+}
+
+switch_destroy()
+{
+ tc qdisc del dev $swp2 clsact
+ tc qdisc del dev $swp1 clsact
+
+ ip link set dev $swp2 down
+ ip link set dev $swp1 down
+
+ ip link del dev br0
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ h1mac=$(mac_get $h1)
+ h2mac=$(mac_get $h2)
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ingress_flow_action_drop_test()
+{
+ local mz_pid
+
+ tc filter add dev $swp2 egress protocol ip pref 1 handle 101 \
+ flower src_mac $h1mac action pass
+
+ tc filter add dev $swp1 ingress protocol ip pref 1 handle 101 \
+ flower dst_ip 192.0.2.2 action drop
+
+ $MZ $h1 -c 0 -p 100 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -d 1msec -q &
+ mz_pid=$!
+
+ RET=0
+
+ devlink_trap_drop_test ingress_flow_action_drop acl_drops $swp2 101
+
+ log_test "ingress_flow_action_drop"
+
+ tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower
+
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip 1 101
+}
+
+egress_flow_action_drop_test()
+{
+ local mz_pid
+
+ tc filter add dev $swp2 egress protocol ip pref 2 handle 102 \
+ flower src_mac $h1mac action pass
+
+ tc filter add dev $swp2 egress protocol ip pref 1 handle 101 \
+ flower dst_ip 192.0.2.2 action drop
+
+ $MZ $h1 -c 0 -p 100 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -d 1msec -q &
+ mz_pid=$!
+
+ RET=0
+
+ devlink_trap_drop_test egress_flow_action_drop acl_drops $swp2 102
+
+ log_test "egress_flow_action_drop"
+
+ tc filter del dev $swp2 egress protocol ip pref 1 handle 101 flower
+
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip 2 102
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh
index 58cdbfb608e9..e7aecb065409 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh
@@ -107,11 +107,11 @@ source_mac_is_multicast_test()
RET=0
- devlink_trap_drop_test $trap_name $group_name $swp2
+ devlink_trap_drop_test $trap_name $group_name $swp2 101
log_test "Source MAC is multicast"
- devlink_trap_drop_cleanup $mz_pid $swp2 ip
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip 1 101
}
__vlan_tag_mismatch_test()
@@ -132,7 +132,7 @@ __vlan_tag_mismatch_test()
$MZ $h1 "$opt" -c 0 -p 100 -a own -b $dmac -t ip -d 1msec -q &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $swp2
+ devlink_trap_drop_test $trap_name $group_name $swp2 101
# Add PVID and make sure packets are no longer dropped.
bridge vlan add vid 1 dev $swp1 pvid untagged master
@@ -148,7 +148,7 @@ __vlan_tag_mismatch_test()
devlink_trap_action_set $trap_name "drop"
- devlink_trap_drop_cleanup $mz_pid $swp2 ip
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip 1 101
}
vlan_tag_mismatch_untagged_test()
@@ -193,7 +193,7 @@ ingress_vlan_filter_test()
$MZ $h1 -Q $vid -c 0 -p 100 -a own -b $dmac -t ip -d 1msec -q &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $swp2
+ devlink_trap_drop_test $trap_name $group_name $swp2 101
# Add the VLAN on the bridge port and make sure packets are no longer
# dropped.
@@ -212,7 +212,7 @@ ingress_vlan_filter_test()
log_test "Ingress VLAN filter"
- devlink_trap_drop_cleanup $mz_pid $swp2 ip
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip 1 101
bridge vlan del vid $vid dev $swp1 master
bridge vlan del vid $vid dev $swp2 master
@@ -237,7 +237,7 @@ __ingress_stp_filter_test()
$MZ $h1 -Q $vid -c 0 -p 100 -a own -b $dmac -t ip -d 1msec -q &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $swp2
+ devlink_trap_drop_test $trap_name $group_name $swp2 101
# Change STP state to forwarding and make sure packets are no longer
# dropped.
@@ -254,7 +254,7 @@ __ingress_stp_filter_test()
devlink_trap_action_set $trap_name "drop"
- devlink_trap_drop_cleanup $mz_pid $swp2 ip
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip 1 101
bridge vlan del vid $vid dev $swp1 master
bridge vlan del vid $vid dev $swp2 master
@@ -308,7 +308,7 @@ port_list_is_empty_uc_test()
$MZ $h1 -c 0 -p 100 -a own -b $dmac -t ip -d 1msec -q &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $swp2
+ devlink_trap_drop_test $trap_name $group_name $swp2 101
# Allow packets to be flooded to one port.
ip link set dev $swp2 type bridge_slave flood on
@@ -326,7 +326,7 @@ port_list_is_empty_uc_test()
log_test "Port list is empty - unicast"
- devlink_trap_drop_cleanup $mz_pid $swp2 ip
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip 1 101
ip link set dev $swp1 type bridge_slave flood on
}
@@ -354,7 +354,7 @@ port_list_is_empty_mc_test()
$MZ $h1 -c 0 -p 100 -a own -b $dmac -t ip -B $dip -d 1msec -q &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $swp2
+ devlink_trap_drop_test $trap_name $group_name $swp2 101
# Allow packets to be flooded to one port.
ip link set dev $swp2 type bridge_slave mcast_flood on
@@ -372,7 +372,7 @@ port_list_is_empty_mc_test()
log_test "Port list is empty - multicast"
- devlink_trap_drop_cleanup $mz_pid $swp2 ip
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip 1 101
ip link set dev $swp1 type bridge_slave mcast_flood on
}
@@ -401,7 +401,7 @@ port_loopback_filter_uc_test()
$MZ $h1 -c 0 -p 100 -a own -b $dmac -t ip -d 1msec -q &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $swp2
+ devlink_trap_drop_test $trap_name $group_name $swp2 101
# Allow packets to be flooded.
ip link set dev $swp2 type bridge_slave flood on
@@ -419,7 +419,7 @@ port_loopback_filter_uc_test()
log_test "Port loopback filter - unicast"
- devlink_trap_drop_cleanup $mz_pid $swp2 ip
+ devlink_trap_drop_cleanup $mz_pid $swp2 ip 1 101
}
port_loopback_filter_test()
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
index d88d8e47d11b..616f47d86a61 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh
@@ -176,11 +176,11 @@ non_ip_test()
00:00 de:ad:be:ef" &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $rp2
+ devlink_trap_drop_test $trap_name $group_name $rp2 101
log_test "Non IP"
- devlink_trap_drop_cleanup $mz_pid $rp2 "ip"
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ip" 1 101
}
__uc_dip_over_mc_dmac_test()
@@ -206,11 +206,11 @@ __uc_dip_over_mc_dmac_test()
-B $dip -d 1msec -q &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $rp2
+ devlink_trap_drop_test $trap_name $group_name $rp2 101
log_test "Unicast destination IP over multicast destination MAC: $desc"
- devlink_trap_drop_cleanup $mz_pid $rp2 $proto
+ devlink_trap_drop_cleanup $mz_pid $rp2 $proto 1 101
}
uc_dip_over_mc_dmac_test()
@@ -242,11 +242,11 @@ __sip_is_loopback_test()
-b $rp1mac -B $dip -d 1msec -q &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $rp2
+ devlink_trap_drop_test $trap_name $group_name $rp2 101
log_test "Source IP is loopback address: $desc"
- devlink_trap_drop_cleanup $mz_pid $rp2 $proto
+ devlink_trap_drop_cleanup $mz_pid $rp2 $proto 1 101
}
sip_is_loopback_test()
@@ -277,11 +277,11 @@ __dip_is_loopback_test()
-B $dip -d 1msec -q &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $rp2
+ devlink_trap_drop_test $trap_name $group_name $rp2 101
log_test "Destination IP is loopback address: $desc"
- devlink_trap_drop_cleanup $mz_pid $rp2 $proto
+ devlink_trap_drop_cleanup $mz_pid $rp2 $proto 1 101
}
dip_is_loopback_test()
@@ -313,11 +313,11 @@ __sip_is_mc_test()
-b $rp1mac -B $dip -d 1msec -q &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $rp2
+ devlink_trap_drop_test $trap_name $group_name $rp2 101
log_test "Source IP is multicast: $desc"
- devlink_trap_drop_cleanup $mz_pid $rp2 $proto
+ devlink_trap_drop_cleanup $mz_pid $rp2 $proto 1 101
}
sip_is_mc_test()
@@ -345,11 +345,11 @@ ipv4_sip_is_limited_bc_test()
-B $h2_ipv4 -d 1msec -q &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $rp2
+ devlink_trap_drop_test $trap_name $group_name $rp2 101
log_test "IPv4 source IP is limited broadcast"
- devlink_trap_drop_cleanup $mz_pid $rp2 "ip"
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ip" 1 101
}
ipv4_payload_get()
@@ -399,11 +399,11 @@ __ipv4_header_corrupted_test()
$MZ $h1 -c 0 -d 1msec -a $h1mac -b $rp1mac -q p=$payload &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $rp2
+ devlink_trap_drop_test $trap_name $group_name $rp2 101
log_test "IP header corrupted: $desc: IPv4"
- devlink_trap_drop_cleanup $mz_pid $rp2 "ip"
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ip" 1 101
}
ipv6_payload_get()
@@ -446,11 +446,11 @@ __ipv6_header_corrupted_test()
$MZ $h1 -c 0 -d 1msec -a $h1mac -b $rp1mac -q p=$payload &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $rp2
+ devlink_trap_drop_test $trap_name $group_name $rp2 101
log_test "IP header corrupted: $desc: IPv6"
- devlink_trap_drop_cleanup $mz_pid $rp2 "ip"
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ip" 1 101
}
ip_header_corrupted_test()
@@ -485,11 +485,11 @@ ipv6_mc_dip_reserved_scope_test()
"33:33:00:00:00:00" -B $dip -d 1msec -q &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $rp2
+ devlink_trap_drop_test $trap_name $group_name $rp2 101
log_test "IPv6 multicast destination IP reserved scope"
- devlink_trap_drop_cleanup $mz_pid $rp2 "ipv6"
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ipv6" 1 101
}
ipv6_mc_dip_interface_local_scope_test()
@@ -511,11 +511,11 @@ ipv6_mc_dip_interface_local_scope_test()
"33:33:00:00:00:00" -B $dip -d 1msec -q &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $rp2
+ devlink_trap_drop_test $trap_name $group_name $rp2 101
log_test "IPv6 multicast destination IP interface-local scope"
- devlink_trap_drop_cleanup $mz_pid $rp2 "ipv6"
+ devlink_trap_drop_cleanup $mz_pid $rp2 "ipv6" 1 101
}
__blackhole_route_test()
@@ -542,10 +542,10 @@ __blackhole_route_test()
-B $dip -d 1msec -q &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $rp2
+ devlink_trap_drop_test $trap_name $group_name $rp2 101
log_test "Blackhole route: IPv$flags"
- devlink_trap_drop_cleanup $mz_pid $rp2 $proto
+ devlink_trap_drop_cleanup $mz_pid $rp2 $proto 1 101
ip -$flags route del blackhole $subnet
}
@@ -641,13 +641,9 @@ erif_disabled_test()
mz_pid=$!
sleep 5
- # In order to see this trap we need a route that points to disabled RIF.
- # When ipv6 address is flushed, there is a delay and the routes are
- # deleted before the RIF and we cannot get state that we have route
- # to disabled RIF.
- # Delete IPv6 address first and then check this trap with flushing IPv4.
- ip -6 add flush dev br0
- ip -4 add flush dev br0
+ # Unlinking the port from the bridge will disable the RIF associated
+ # with br0 as it is no longer an upper of any mlxsw port.
+ ip link set dev $rp1 nomaster
t1_packets=$(devlink_trap_rx_packets_get $trap_name)
t1_bytes=$(devlink_trap_rx_bytes_get $trap_name)
@@ -659,7 +655,6 @@ erif_disabled_test()
log_test "Egress RIF disabled"
kill $mz_pid && wait $mz_pid &> /dev/null
- ip link set dev $rp1 nomaster
__addr_add_del $rp1 add 192.0.2.2/24 2001:db8:1::2/64
ip link del dev br0 type bridge
devlink_trap_action_set $trap_name "drop"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh
new file mode 100755
index 000000000000..47edf099a17e
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh
@@ -0,0 +1,384 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test devlink-trap policer functionality over mlxsw.
+
+# +---------------------------------+
+# | H1 (vrf) |
+# | + $h1 |
+# | | 192.0.2.1/24 |
+# | | |
+# | | default via 192.0.2.2 |
+# +----|----------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | SW | |
+# | + $rp1 |
+# | 192.0.2.2/24 |
+# | |
+# | 198.51.100.2/24 |
+# | + $rp2 |
+# | | |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|----------------------------+
+# | | default via 198.51.100.2 |
+# | | |
+# | | 198.51.100.1/24 |
+# | + $h2 |
+# | H2 (vrf) |
+# +---------------------------------+
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ rate_limits_test
+ burst_limits_test
+ rate_test
+ burst_test
+"
+NUM_NETIFS=4
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24
+ mtu_set $h1 10000
+
+ ip -4 route add default vrf v$h1 nexthop via 192.0.2.2
+}
+
+h1_destroy()
+{
+ ip -4 route del default vrf v$h1 nexthop via 192.0.2.2
+
+ mtu_restore $h1
+ simple_if_fini $h1 192.0.2.1/24
+}
+
+h2_create()
+{
+ simple_if_init $h2 198.51.100.1/24
+ mtu_set $h2 10000
+
+ ip -4 route add default vrf v$h2 nexthop via 198.51.100.2
+}
+
+h2_destroy()
+{
+ ip -4 route del default vrf v$h2 nexthop via 198.51.100.2
+
+ mtu_restore $h2
+ simple_if_fini $h2 198.51.100.1/24
+}
+
+router_create()
+{
+ ip link set dev $rp1 up
+ ip link set dev $rp2 up
+
+ __addr_add_del $rp1 add 192.0.2.2/24
+ __addr_add_del $rp2 add 198.51.100.2/24
+ mtu_set $rp1 10000
+ mtu_set $rp2 10000
+
+ ip -4 route add blackhole 198.51.100.100
+
+ devlink trap set $DEVLINK_DEV trap blackhole_route action trap
+}
+
+router_destroy()
+{
+ devlink trap set $DEVLINK_DEV trap blackhole_route action drop
+
+ ip -4 route del blackhole 198.51.100.100
+
+ mtu_restore $rp2
+ mtu_restore $rp1
+ __addr_add_del $rp2 del 198.51.100.2/24
+ __addr_add_del $rp1 del 192.0.2.2/24
+
+ ip link set dev $rp2 down
+ ip link set dev $rp1 down
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp1=${NETIFS[p2]}
+
+ rp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp1_mac=$(mac_get $rp1)
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+
+ router_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ router_destroy
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+
+ # Reload to ensure devlink-trap settings are back to default.
+ devlink_reload
+}
+
+rate_limits_test()
+{
+ RET=0
+
+ devlink trap policer set $DEVLINK_DEV policer 1 rate 0 &> /dev/null
+ check_fail $? "Policer rate was changed to rate lower than limit"
+ devlink trap policer set $DEVLINK_DEV policer 1 \
+ rate 2000000001 &> /dev/null
+ check_fail $? "Policer rate was changed to rate higher than limit"
+
+ devlink trap policer set $DEVLINK_DEV policer 1 rate 1
+ check_err $? "Failed to set policer rate to minimum"
+ devlink trap policer set $DEVLINK_DEV policer 1 rate 2000000000
+ check_err $? "Failed to set policer rate to maximum"
+
+ log_test "Trap policer rate limits"
+}
+
+burst_limits_test()
+{
+ RET=0
+
+ devlink trap policer set $DEVLINK_DEV policer 1 burst 0 &> /dev/null
+ check_fail $? "Policer burst size was changed to 0"
+ devlink trap policer set $DEVLINK_DEV policer 1 burst 17 &> /dev/null
+ check_fail $? "Policer burst size was changed to burst size that is not power of 2"
+ devlink trap policer set $DEVLINK_DEV policer 1 burst 8 &> /dev/null
+ check_fail $? "Policer burst size was changed to burst size lower than limit"
+ devlink trap policer set $DEVLINK_DEV policer 1 \
+ burst $((2**25)) &> /dev/null
+ check_fail $? "Policer burst size was changed to burst size higher than limit"
+
+ devlink trap policer set $DEVLINK_DEV policer 1 burst 16
+ check_err $? "Failed to set policer burst size to minimum"
+ devlink trap policer set $DEVLINK_DEV policer 1 burst $((2**24))
+ check_err $? "Failed to set policer burst size to maximum"
+
+ log_test "Trap policer burst size limits"
+}
+
+trap_rate_get()
+{
+ local t0 t1
+
+ t0=$(devlink_trap_rx_packets_get blackhole_route)
+ sleep 10
+ t1=$(devlink_trap_rx_packets_get blackhole_route)
+
+ echo $(((t1 - t0) / 10))
+}
+
+policer_drop_rate_get()
+{
+ local id=$1; shift
+ local t0 t1
+
+ t0=$(devlink_trap_policer_rx_dropped_get $id)
+ sleep 10
+ t1=$(devlink_trap_policer_rx_dropped_get $id)
+
+ echo $(((t1 - t0) / 10))
+}
+
+__rate_test()
+{
+ local rate pct drop_rate
+ local id=$1; shift
+
+ RET=0
+
+ devlink trap policer set $DEVLINK_DEV policer $id rate 1000 burst 16
+ devlink trap group set $DEVLINK_DEV group l3_drops policer $id
+
+ # Send packets at highest possible rate and make sure they are dropped
+ # by the policer. Make sure measured received rate is about 1000 pps
+ log_info "=== Tx rate: Highest, Policer rate: 1000 pps ==="
+
+ start_traffic $h1 192.0.2.1 198.51.100.100 $rp1_mac
+
+ sleep 5 # Take measurements when rate is stable
+
+ rate=$(trap_rate_get)
+ pct=$((100 * (rate - 1000) / 1000))
+ ((-5 <= pct && pct <= 5))
+ check_err $? "Expected rate 1000 pps, got $rate pps, which is $pct% off. Required accuracy is +-5%"
+ log_info "Expected rate 1000 pps, measured rate $rate pps"
+
+ drop_rate=$(policer_drop_rate_get $id)
+ (( drop_rate > 0 ))
+ check_err $? "Expected non-zero policer drop rate, got 0"
+ log_info "Measured policer drop rate of $drop_rate pps"
+
+ stop_traffic
+
+ # Send packets at a rate of 1000 pps and make sure they are not dropped
+ # by the policer
+ log_info "=== Tx rate: 1000 pps, Policer rate: 1000 pps ==="
+
+ start_traffic $h1 192.0.2.1 198.51.100.100 $rp1_mac -d 1msec
+
+ sleep 5 # Take measurements when rate is stable
+
+ drop_rate=$(policer_drop_rate_get $id)
+ (( drop_rate == 0 ))
+ check_err $? "Expected zero policer drop rate, got a drop rate of $drop_rate pps"
+ log_info "Measured policer drop rate of $drop_rate pps"
+
+ stop_traffic
+
+ # Unbind the policer and send packets at highest possible rate. Make
+ # sure they are not dropped by the policer and that the measured
+ # received rate is higher than 1000 pps
+ log_info "=== Tx rate: Highest, Policer rate: No policer ==="
+
+ devlink trap group set $DEVLINK_DEV group l3_drops nopolicer
+
+ start_traffic $h1 192.0.2.1 198.51.100.100 $rp1_mac
+
+ rate=$(trap_rate_get)
+ (( rate > 1000 ))
+ check_err $? "Expected rate higher than 1000 pps, got $rate pps"
+ log_info "Measured rate $rate pps"
+
+ drop_rate=$(policer_drop_rate_get $id)
+ (( drop_rate == 0 ))
+ check_err $? "Expected zero policer drop rate, got a drop rate of $drop_rate pps"
+ log_info "Measured policer drop rate of $drop_rate pps"
+
+ stop_traffic
+
+ log_test "Trap policer rate"
+}
+
+rate_test()
+{
+ local id
+
+ for id in $(devlink_trap_policer_ids_get); do
+ echo
+ log_info "Running rate test for policer $id"
+ __rate_test $id
+ done
+}
+
+__burst_test()
+{
+ local t0_rx t0_drop t1_rx t1_drop rx drop
+ local id=$1; shift
+
+ RET=0
+
+ devlink trap policer set $DEVLINK_DEV policer $id rate 1000 burst 32
+ devlink trap group set $DEVLINK_DEV group l3_drops policer $id
+
+ # Send a burst of 64 packets and make sure that about 32 are received
+ # and the rest are dropped by the policer
+ log_info "=== Tx burst size: 64, Policer burst size: 32 pps ==="
+
+ t0_rx=$(devlink_trap_rx_packets_get blackhole_route)
+ t0_drop=$(devlink_trap_policer_rx_dropped_get $id)
+
+ start_traffic $h1 192.0.2.1 198.51.100.100 $rp1_mac -c 64
+
+ t1_rx=$(devlink_trap_rx_packets_get blackhole_route)
+ t1_drop=$(devlink_trap_policer_rx_dropped_get $id)
+
+ rx=$((t1_rx - t0_rx))
+ pct=$((100 * (rx - 32) / 32))
+ ((-20 <= pct && pct <= 20))
+ check_err $? "Expected burst size of 32 packets, got $rx packets, which is $pct% off. Required accuracy is +-20%"
+ log_info "Expected burst size of 32 packets, measured burst size of $rx packets"
+
+ drop=$((t1_drop - t0_drop))
+ (( drop > 0 ))
+ check_err $? "Expected non-zero policer drops, got 0"
+ log_info "Measured policer drops of $drop packets"
+
+ # Send a burst of 16 packets and make sure that 16 are received
+ # and that none are dropped by the policer
+ log_info "=== Tx burst size: 16, Policer burst size: 32 pps ==="
+
+ t0_rx=$(devlink_trap_rx_packets_get blackhole_route)
+ t0_drop=$(devlink_trap_policer_rx_dropped_get $id)
+
+ start_traffic $h1 192.0.2.1 198.51.100.100 $rp1_mac -c 16
+
+ t1_rx=$(devlink_trap_rx_packets_get blackhole_route)
+ t1_drop=$(devlink_trap_policer_rx_dropped_get $id)
+
+ rx=$((t1_rx - t0_rx))
+ (( rx == 16 ))
+ check_err $? "Expected burst size of 16 packets, got $rx packets"
+ log_info "Expected burst size of 16 packets, measured burst size of $rx packets"
+
+ drop=$((t1_drop - t0_drop))
+ (( drop == 0 ))
+ check_err $? "Expected zero policer drops, got $drop"
+ log_info "Measured policer drops of $drop packets"
+
+ # Unbind the policer and send a burst of 64 packets. Make sure that
+ # 64 packets are received and that none are dropped by the policer
+ log_info "=== Tx burst size: 64, Policer burst size: No policer ==="
+
+ devlink trap group set $DEVLINK_DEV group l3_drops nopolicer
+
+ t0_rx=$(devlink_trap_rx_packets_get blackhole_route)
+ t0_drop=$(devlink_trap_policer_rx_dropped_get $id)
+
+ start_traffic $h1 192.0.2.1 198.51.100.100 $rp1_mac -c 64
+
+ t1_rx=$(devlink_trap_rx_packets_get blackhole_route)
+ t1_drop=$(devlink_trap_policer_rx_dropped_get $id)
+
+ rx=$((t1_rx - t0_rx))
+ (( rx == 64 ))
+ check_err $? "Expected burst size of 64 packets, got $rx packets"
+ log_info "Expected burst size of 64 packets, measured burst size of $rx packets"
+
+ drop=$((t1_drop - t0_drop))
+ (( drop == 0 ))
+ check_err $? "Expected zero policer drops, got $drop"
+ log_info "Measured policer drops of $drop packets"
+
+ log_test "Trap policer burst size"
+}
+
+burst_test()
+{
+ local id
+
+ for id in $(devlink_trap_policer_ids_get); do
+ echo
+ log_info "Running burst size test for policer $id"
+ __burst_test $id
+ done
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan.sh
index fd19161dd4ec..e11a416323cf 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan.sh
@@ -314,11 +314,11 @@ overlay_smac_is_mc_test()
-B 192.0.2.17 -t udp sp=12345,dp=$VXPORT,p=$payload -q &
mz_pid=$!
- devlink_trap_drop_test $trap_name $group_name $swp1
+ devlink_trap_drop_test $trap_name $group_name $swp1 101
log_test "Overlay source MAC is multicast"
- devlink_trap_drop_cleanup $mz_pid $swp1 "ip"
+ devlink_trap_drop_cleanup $mz_pid $swp1 "ip" 1 101
}
trap cleanup EXIT
diff --git a/tools/testing/selftests/drivers/net/mlxsw/extack.sh b/tools/testing/selftests/drivers/net/mlxsw/extack.sh
index d72d8488a3b2..7a0a99c1d22f 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/extack.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/extack.sh
@@ -8,7 +8,8 @@ lib_dir=$(dirname $0)/../../../net/forwarding
ALL_TESTS="
netdev_pre_up_test
vxlan_vlan_add_test
- port_vlan_add_test
+ vxlan_bridge_create_test
+ bridge_create_test
"
NUM_NETIFS=2
source $lib_dir/lib.sh
@@ -106,32 +107,56 @@ vxlan_vlan_add_test()
ip link del dev br1
}
-port_vlan_add_test()
+vxlan_bridge_create_test()
{
RET=0
- ip link add name br1 up type bridge vlan_filtering 1 mcast_snooping 0
-
# Unsupported configuration: mlxsw demands VXLAN with "noudpcsum".
ip link add name vx1 up type vxlan id 1000 \
local 192.0.2.17 remote 192.0.2.18 \
dstport 4789 tos inherit ttl 100
- ip link set dev $swp1 master br1
- check_err $?
-
- bridge vlan del dev $swp1 vid 1
+ # Test with VLAN-aware bridge.
+ ip link add name br1 up type bridge vlan_filtering 1 mcast_snooping 0
ip link set dev vx1 master br1
+
+ ip link set dev $swp1 master br1 2>&1 > /dev/null \
+ | grep -q mlxsw_spectrum
check_err $?
- bridge vlan add dev $swp1 vid 1 pvid untagged 2>&1 >/dev/null \
+ # Test with VLAN-unaware bridge.
+ ip link set dev br1 type bridge vlan_filtering 0
+
+ ip link set dev $swp1 master br1 2>&1 > /dev/null \
| grep -q mlxsw_spectrum
check_err $?
- log_test "extack - map VLAN at port"
+ log_test "extack - bridge creation with VXLAN"
+ ip link del dev br1
ip link del dev vx1
+}
+
+bridge_create_test()
+{
+ RET=0
+
+ ip link add name br1 up type bridge vlan_filtering 1
+ ip link add name br2 up type bridge vlan_filtering 1
+
+ ip link set dev $swp1 master br1
+ check_err $?
+
+ # Only one VLAN-aware bridge is supported, so this should fail with
+ # an extack.
+ ip link set dev $swp2 master br2 2>&1 > /dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $?
+
+ log_test "extack - multiple VLAN-aware bridges creation"
+
+ ip link del dev br2
ip link del dev br1
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh b/tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh
new file mode 100644
index 000000000000..cbe50f260a40
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+##############################################################################
+# Defines
+
+if [[ ! -v MLXSW_CHIP ]]; then
+ MLXSW_CHIP=$(devlink -j dev info $DEVLINK_DEV | jq -r '.[][]["driver"]')
+ if [ -z "$MLXSW_CHIP" ]; then
+ echo "SKIP: Device $DEVLINK_DEV doesn't support devlink info command"
+ exit 1
+ fi
+fi
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh
index eff6393ce974..71066bc4b886 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh
@@ -114,23 +114,12 @@ ping_ipv4()
ping_test $h1 192.0.2.2
}
-wait_for_packets()
-{
- local t0=$1; shift
- local prio_observe=$1; shift
-
- local t1=$(ethtool_stats_get $swp1 rx_frames_prio_$prio_observe)
- local delta=$((t1 - t0))
- echo $delta
- ((delta >= 10))
-}
-
__test_defprio()
{
local prio_install=$1; shift
local prio_observe=$1; shift
- local delta
local key
+ local t1
local i
RET=0
@@ -139,9 +128,10 @@ __test_defprio()
local t0=$(ethtool_stats_get $swp1 rx_frames_prio_$prio_observe)
mausezahn -q $h1 -d 100m -c 10 -t arp reply
- delta=$(busywait "$HIT_TIMEOUT" wait_for_packets $t0 $prio_observe)
+ t1=$(busywait "$HIT_TIMEOUT" until_counter_is ">= $((t0 + 10))" \
+ ethtool_stats_get $swp1 rx_frames_prio_$prio_observe)
- check_err $? "Default priority $prio_install/$prio_observe: Expected to capture 10 packets, got $delta."
+ check_err $? "Default priority $prio_install/$prio_observe: Expected to capture 10 packets, got $((t1 - t0))."
log_test "Default priority $prio_install/$prio_observe"
defprio_uninstall $swp1 $prio_install
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
index c745ce3befee..4cb2aa65278a 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
@@ -31,6 +31,7 @@ ALL_TESTS="
ping_ipv4
test_update
test_no_update
+ test_pedit_norewrite
test_dscp_leftover
"
@@ -56,6 +57,11 @@ zero()
echo 0
}
+three()
+{
+ echo 3
+}
+
h1_create()
{
simple_if_init $h1 192.0.2.1/28
@@ -103,6 +109,9 @@ switch_create()
simple_if_init $swp1 192.0.2.2/28
__simple_if_init $swp2 v$swp1 192.0.2.17/28
+ tc qdisc add dev $swp1 clsact
+ tc qdisc add dev $swp2 clsact
+
lldptool -T -i $swp1 -V APP $(dscp_map 0) >/dev/null
lldptool -T -i $swp2 -V APP $(dscp_map 0) >/dev/null
lldpad_app_wait_set $swp1
@@ -115,6 +124,9 @@ switch_destroy()
lldptool -T -i $swp1 -V APP -d $(dscp_map 0) >/dev/null
lldpad_app_wait_del
+ tc qdisc del dev $swp2 clsact
+ tc qdisc del dev $swp1 clsact
+
__simple_if_fini $swp2 192.0.2.17/28
simple_if_fini $swp1 192.0.2.2/28
}
@@ -223,18 +235,36 @@ __test_update()
test_update()
{
+ echo "Test net.ipv4.ip_forward_update_priority=1"
__test_update 1 reprioritize
}
test_no_update()
{
+ echo "Test net.ipv4.ip_forward_update_priority=0"
__test_update 0 echo
}
+# Test that when DSCP is updated in pedit, the DSCP rewrite is turned off.
+test_pedit_norewrite()
+{
+ echo "Test no DSCP rewrite after DSCP is updated by pedit"
+
+ tc filter add dev $swp1 ingress handle 101 pref 1 prot ip flower \
+ action pedit ex munge ip dsfield set $((3 << 2)) retain 0xfc \
+ action skbedit priority 3
+
+ __test_update 0 three
+
+ tc filter del dev $swp1 ingress pref 1
+}
+
# Test that when the last APP rule is removed, the prio->DSCP map is properly
# set to zeroes, and that the last APP rule does not stay active in the ASIC.
test_dscp_leftover()
{
+ echo "Test that last removed DSCP rule is deconfigured correctly"
+
lldptool -T -i $swp2 -V APP -d $(dscp_map 0) >/dev/null
lldpad_app_wait_del
diff --git a/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh
index d231649b4f01..e93878d42596 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/router_scale.sh
@@ -2,16 +2,15 @@
# SPDX-License-Identifier: GPL-2.0
ROUTER_NUM_NETIFS=4
+: ${TIMEOUT:=20000} # ms
router_h1_create()
{
simple_if_init $h1 192.0.1.1/24
- ip route add 193.0.0.0/8 via 192.0.1.2 dev $h1
}
router_h1_destroy()
{
- ip route del 193.0.0.0/8 via 192.0.1.2 dev $h1
simple_if_fini $h1 192.0.1.1/24
}
@@ -64,13 +63,15 @@ router_setup_prepare()
router_create
}
-router_offload_validate()
+wait_for_routes()
{
- local route_count=$1
- local offloaded_count
+ local t0=$1; shift
+ local route_count=$1; shift
- offloaded_count=$(ip route | grep -o 'offload' | wc -l)
- [[ $offloaded_count -ge $route_count ]]
+ local t1=$(ip route | grep -o 'offload' | wc -l)
+ local delta=$((t1 - t0))
+ echo $delta
+ [[ $delta -ge $route_count ]]
}
router_routes_create()
@@ -90,8 +91,8 @@ router_routes_create()
break 3
fi
- echo route add 193.${i}.${j}.${k}/32 via \
- 192.0.2.1 dev $rp2 >> $ROUTE_FILE
+ echo route add 193.${i}.${j}.${k}/32 dev $rp2 \
+ >> $ROUTE_FILE
((count++))
done
done
@@ -111,45 +112,19 @@ router_test()
{
local route_count=$1
local should_fail=$2
- local count=0
+ local delta
RET=0
+ local t0=$(ip route | grep -o 'offload' | wc -l)
router_routes_create $route_count
+ delta=$(busywait "$TIMEOUT" wait_for_routes $t0 $route_count)
- router_offload_validate $route_count
- check_err_fail $should_fail $? "Offload of $route_count routes"
+ check_err_fail $should_fail $? "Offload routes: Expected $route_count, got $delta."
if [[ $RET -ne 0 ]] || [[ $should_fail -eq 1 ]]; then
return
fi
- tc filter add dev $h2 ingress protocol ip pref 1 flower \
- skip_sw dst_ip 193.0.0.0/8 action drop
-
- for i in {0..255}
- do
- for j in {0..255}
- do
- for k in {0..255}
- do
- if [[ $count -eq $route_count ]]; then
- break 3
- fi
-
- $MZ $h1 -c 1 -p 64 -a $h1mac -b $rp1mac \
- -A 192.0.1.1 -B 193.${i}.${j}.${k} \
- -t ip -q
- ((count++))
- done
- done
- done
-
- tc_check_packets "dev $h2 ingress" 1 $route_count
- check_err $? "Offload mismatch"
-
- tc filter del dev $h2 ingress protocol ip pref 1 flower \
- skip_sw dst_ip 193.0.0.0/8 action drop
-
router_routes_destroy
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
index 5c39e5f6a480..f4031002d5e9 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
@@ -32,6 +32,7 @@ ALL_TESTS="
devlink_reload_test
"
NUM_NETIFS=2
+: ${TIMEOUT:=20000} # ms
source $lib_dir/lib.sh
source $lib_dir/devlink_lib.sh
@@ -360,20 +361,24 @@ vlan_rif_refcount_test()
ip link add link br0 name br0.10 up type vlan id 10
ip -6 address add 2001:db8:1::1/64 dev br0.10
- ip -6 route get fibmatch 2001:db8:1::2 dev br0.10 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip -6 route get fibmatch 2001:db8:1::2 dev br0.10
check_err $? "vlan rif was not created before adding port to vlan"
bridge vlan add vid 10 dev $swp1
- ip -6 route get fibmatch 2001:db8:1::2 dev br0.10 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip -6 route get fibmatch 2001:db8:1::2 dev br0.10
check_err $? "vlan rif was destroyed after adding port to vlan"
bridge vlan del vid 10 dev $swp1
- ip -6 route get fibmatch 2001:db8:1::2 dev br0.10 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip -6 route get fibmatch 2001:db8:1::2 dev br0.10
check_err $? "vlan rif was destroyed after removing port from vlan"
ip link set dev $swp1 nomaster
- ip -6 route get fibmatch 2001:db8:1::2 dev br0.10 | grep -q offload
- check_fail $? "vlan rif was not destroyed after unlinking port from bridge"
+ busywait "$TIMEOUT" not wait_for_offload \
+ ip -6 route get fibmatch 2001:db8:1::2 dev br0.10
+ check_err $? "vlan rif was not destroyed after unlinking port from bridge"
log_test "vlan rif refcount"
@@ -401,22 +406,28 @@ subport_rif_refcount_test()
ip -6 address add 2001:db8:1::1/64 dev bond1
ip -6 address add 2001:db8:2::1/64 dev bond1.10
- ip -6 route get fibmatch 2001:db8:1::2 dev bond1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip -6 route get fibmatch 2001:db8:1::2 dev bond1
check_err $? "subport rif was not created on lag device"
- ip -6 route get fibmatch 2001:db8:2::2 dev bond1.10 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip -6 route get fibmatch 2001:db8:2::2 dev bond1.10
check_err $? "subport rif was not created on vlan device"
ip link set dev $swp1 nomaster
- ip -6 route get fibmatch 2001:db8:1::2 dev bond1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip -6 route get fibmatch 2001:db8:1::2 dev bond1
check_err $? "subport rif of lag device was destroyed when should not"
- ip -6 route get fibmatch 2001:db8:2::2 dev bond1.10 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip -6 route get fibmatch 2001:db8:2::2 dev bond1.10
check_err $? "subport rif of vlan device was destroyed when should not"
ip link set dev $swp2 nomaster
- ip -6 route get fibmatch 2001:db8:1::2 dev bond1 | grep -q offload
- check_fail $? "subport rif of lag device was not destroyed when should"
- ip -6 route get fibmatch 2001:db8:2::2 dev bond1.10 | grep -q offload
- check_fail $? "subport rif of vlan device was not destroyed when should"
+ busywait "$TIMEOUT" not wait_for_offload \
+ ip -6 route get fibmatch 2001:db8:1::2 dev bond1
+ check_err $? "subport rif of lag device was not destroyed when should"
+ busywait "$TIMEOUT" not wait_for_offload \
+ ip -6 route get fibmatch 2001:db8:2::2 dev bond1.10
+ check_err $? "subport rif of vlan device was not destroyed when should"
log_test "subport rif refcount"
@@ -575,7 +586,8 @@ bridge_extern_learn_test()
bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn
- bridge fdb show brport $swp1 | grep de:ad:be:ef:13:37 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ bridge fdb show brport $swp1 de:ad:be:ef:13:37
check_err $? "fdb entry not marked as offloaded when should"
log_test "externally learned fdb entry"
@@ -595,9 +607,11 @@ neigh_offload_test()
ip -6 neigh add 2001:db8:1::2 lladdr de:ad:be:ef:13:37 nud perm \
dev $swp1
- ip -4 neigh show dev $swp1 | grep 192.0.2.2 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip -4 neigh show dev $swp1 192.0.2.2
check_err $? "ipv4 neigh entry not marked as offloaded when should"
- ip -6 neigh show dev $swp1 | grep 2001:db8:1::2 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip -6 neigh show dev $swp1 2001:db8:1::2
check_err $? "ipv6 neigh entry not marked as offloaded when should"
log_test "neighbour offload indication"
@@ -623,25 +637,31 @@ nexthop_offload_test()
ip -6 route add 2001:db8:2::/64 vrf v$swp1 \
nexthop via 2001:db8:1::2 dev $swp1
- ip -4 route show 198.51.100.0/24 vrf v$swp1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip -4 route show 198.51.100.0/24 vrf v$swp1
check_err $? "ipv4 nexthop not marked as offloaded when should"
- ip -6 route show 2001:db8:2::/64 vrf v$swp1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip -6 route show 2001:db8:2::/64 vrf v$swp1
check_err $? "ipv6 nexthop not marked as offloaded when should"
ip link set dev $swp2 down
sleep 1
- ip -4 route show 198.51.100.0/24 vrf v$swp1 | grep -q offload
- check_fail $? "ipv4 nexthop marked as offloaded when should not"
- ip -6 route show 2001:db8:2::/64 vrf v$swp1 | grep -q offload
- check_fail $? "ipv6 nexthop marked as offloaded when should not"
+ busywait "$TIMEOUT" not wait_for_offload \
+ ip -4 route show 198.51.100.0/24 vrf v$swp1
+ check_err $? "ipv4 nexthop marked as offloaded when should not"
+ busywait "$TIMEOUT" not wait_for_offload \
+ ip -6 route show 2001:db8:2::/64 vrf v$swp1
+ check_err $? "ipv6 nexthop marked as offloaded when should not"
ip link set dev $swp2 up
setup_wait
- ip -4 route show 198.51.100.0/24 vrf v$swp1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip -4 route show 198.51.100.0/24 vrf v$swp1
check_err $? "ipv4 nexthop not marked as offloaded after neigh add"
- ip -6 route show 2001:db8:2::/64 vrf v$swp1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip -6 route show 2001:db8:2::/64 vrf v$swp1
check_err $? "ipv6 nexthop not marked as offloaded after neigh add"
log_test "nexthop offload indication"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh
index c9fc4d4885c1..94c37124a840 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh
@@ -56,11 +56,19 @@ switch_destroy()
}
# Callback from sch_ets_tests.sh
-get_stats()
+collect_stats()
{
- local band=$1; shift
+ local -a streams=("$@")
+ local stream
- ethtool_stats_get "$h2" rx_octets_prio_$band
+ # Wait for qdisc counter update so that we don't get it mid-way through.
+ busywait_for_counter 1000 +1 \
+ qdisc_parent_stats_get $swp2 10:$((${streams[0]} + 1)) .bytes \
+ > /dev/null
+
+ for stream in ${streams[@]}; do
+ qdisc_parent_stats_get $swp2 10:$((stream + 1)) .bytes
+ done
}
bail_on_lldpad
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
new file mode 100644
index 000000000000..0d347d48c112
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
@@ -0,0 +1,533 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# This test sends a >1Gbps stream of traffic from H1, to the switch, which
+# forwards it to a 1Gbps port. This 1Gbps stream is then looped back to the
+# switch and forwarded to the port under test $swp3, which is also 1Gbps.
+#
+# This way, $swp3 should be 100% filled with traffic without any of it spilling
+# to the backlog. Any extra packets sent should almost 1:1 go to backlog. That
+# is what H2 is used for--it sends the extra traffic to create backlog.
+#
+# A RED Qdisc is installed on $swp3. The configuration is such that the minimum
+# and maximum size are 1 byte apart, so there is a very clear border under which
+# no marking or dropping takes place, and above which everything is marked or
+# dropped.
+#
+# The test uses the buffer build-up behavior to test the installed RED.
+#
+# In order to test WRED, $swp3 actually contains RED under PRIO, with two
+# different configurations. Traffic is prioritized using 802.1p and relies on
+# the implicit mlxsw configuration, where packet priority is taken 1:1 from the
+# 802.1p marking.
+#
+# +--------------------------+ +--------------------------+
+# | H1 | | H2 |
+# | + $h1.10 | | + $h2.10 |
+# | | 192.0.2.1/28 | | | 192.0.2.2/28 |
+# | | | | | |
+# | | $h1.11 + | | | $h2.11 + |
+# | | 192.0.2.17/28 | | | | 192.0.2.18/28 | |
+# | | | | | | | |
+# | \______ ______/ | | \______ ______/ |
+# | \ / | | \ / |
+# | + $h1 | | + $h2 |
+# +-------------|------------+ +-------------|------------+
+# | >1Gbps |
+# +-------------|------------------------------------------------|------------+
+# | SW + $swp1 + $swp2 |
+# | _______/ \___________ ___________/ \_______ |
+# | / \ / \ |
+# | +-|-----------------+ | +-|-----------------+ | |
+# | | + $swp1.10 | | | + $swp2.10 | | |
+# | | | | .-------------+ $swp5.10 | | |
+# | | BR1_10 | | | | | | |
+# | | | | | | BR2_10 | | |
+# | | + $swp2.10 | | | | | | |
+# | +-|-----------------+ | | | + $swp3.10 | | |
+# | | | | +-|-----------------+ | |
+# | | +-----------------|-+ | | +-----------------|-+ |
+# | | | $swp1.11 + | | | | $swp2.11 + | |
+# | | | | | .-----------------+ $swp5.11 | |
+# | | | BR1_11 | | | | | | |
+# | | | | | | | | BR2_11 | |
+# | | | $swp2.11 + | | | | | | |
+# | | +-----------------|-+ | | | | $swp3.11 + | |
+# | | | | | | +-----------------|-+ |
+# | \_______ ___________/ | | \___________ _______/ |
+# | \ / \ / \ / |
+# | + $swp4 + $swp5 + $swp3 |
+# +-------------|----------------------|-------------------------|------------+
+# | | | 1Gbps
+# \________1Gbps_________/ |
+# +----------------------------|------------+
+# | H3 + $h3 |
+# | _____________________/ \_______ |
+# | / \ |
+# | | | |
+# | + $h3.10 $h3.11 + |
+# | 192.0.2.3/28 192.0.2.19/28 |
+# +-----------------------------------------+
+
+NUM_NETIFS=8
+CHECK_TC="yes"
+lib_dir=$(dirname $0)/../../../net/forwarding
+source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+source qos_lib.sh
+
+ipaddr()
+{
+ local host=$1; shift
+ local vlan=$1; shift
+
+ echo 192.0.2.$((16 * (vlan - 10) + host))
+}
+
+host_create()
+{
+ local dev=$1; shift
+ local host=$1; shift
+
+ simple_if_init $dev
+ mtu_set $dev 10000
+
+ vlan_create $dev 10 v$dev $(ipaddr $host 10)/28
+ ip link set dev $dev.10 type vlan egress 0:0
+
+ vlan_create $dev 11 v$dev $(ipaddr $host 11)/28
+ ip link set dev $dev.11 type vlan egress 0:1
+}
+
+host_destroy()
+{
+ local dev=$1; shift
+
+ vlan_destroy $dev 11
+ vlan_destroy $dev 10
+ mtu_restore $dev
+ simple_if_fini $dev
+}
+
+h1_create()
+{
+ host_create $h1 1
+}
+
+h1_destroy()
+{
+ host_destroy $h1
+}
+
+h2_create()
+{
+ host_create $h2 2
+
+ # Some of the tests in this suite use multicast traffic. As this traffic
+ # enters BR2_10 resp. BR2_11, it is flooded to all other ports. Thus
+ # e.g. traffic ingressing through $swp2 is flooded to $swp3 (the
+ # intended destination) and $swp5 (which is intended as ingress for
+ # another stream of traffic).
+ #
+ # This is generally not a problem, but if the $swp5 throughput is lower
+ # than $swp2 throughput, there will be a build-up at $swp5. That may
+ # cause packets to fail to queue up at $swp3 due to shared buffer
+ # quotas, and the test to spuriously fail.
+ #
+ # Prevent this by setting the speed of $h2 to 1Gbps.
+
+ ethtool -s $h2 speed 1000 autoneg off
+}
+
+h2_destroy()
+{
+ ethtool -s $h2 autoneg on
+ host_destroy $h2
+}
+
+h3_create()
+{
+ host_create $h3 3
+ ethtool -s $h3 speed 1000 autoneg off
+}
+
+h3_destroy()
+{
+ ethtool -s $h3 autoneg on
+ host_destroy $h3
+}
+
+switch_create()
+{
+ local intf
+ local vlan
+
+ ip link add dev br1_10 type bridge
+ ip link add dev br1_11 type bridge
+
+ ip link add dev br2_10 type bridge
+ ip link add dev br2_11 type bridge
+
+ for intf in $swp1 $swp2 $swp3 $swp4 $swp5; do
+ ip link set dev $intf up
+ mtu_set $intf 10000
+ done
+
+ for intf in $swp1 $swp4; do
+ for vlan in 10 11; do
+ vlan_create $intf $vlan
+ ip link set dev $intf.$vlan master br1_$vlan
+ ip link set dev $intf.$vlan up
+ done
+ done
+
+ for intf in $swp2 $swp3 $swp5; do
+ for vlan in 10 11; do
+ vlan_create $intf $vlan
+ ip link set dev $intf.$vlan master br2_$vlan
+ ip link set dev $intf.$vlan up
+ done
+ done
+
+ ip link set dev $swp4.10 type vlan egress 0:0
+ ip link set dev $swp4.11 type vlan egress 0:1
+ for intf in $swp1 $swp2 $swp5; do
+ for vlan in 10 11; do
+ ip link set dev $intf.$vlan type vlan ingress 0:0 1:1
+ done
+ done
+
+ for intf in $swp2 $swp3 $swp4 $swp5; do
+ ethtool -s $intf speed 1000 autoneg off
+ done
+
+ ip link set dev br1_10 up
+ ip link set dev br1_11 up
+ ip link set dev br2_10 up
+ ip link set dev br2_11 up
+
+ local size=$(devlink_pool_size_thtype 0 | cut -d' ' -f 1)
+ devlink_port_pool_th_set $swp3 8 $size
+}
+
+switch_destroy()
+{
+ local intf
+ local vlan
+
+ devlink_port_pool_th_restore $swp3 8
+
+ tc qdisc del dev $swp3 root 2>/dev/null
+
+ ip link set dev br2_11 down
+ ip link set dev br2_10 down
+ ip link set dev br1_11 down
+ ip link set dev br1_10 down
+
+ for intf in $swp5 $swp4 $swp3 $swp2; do
+ ethtool -s $intf autoneg on
+ done
+
+ for intf in $swp5 $swp3 $swp2 $swp4 $swp1; do
+ for vlan in 11 10; do
+ ip link set dev $intf.$vlan down
+ ip link set dev $intf.$vlan nomaster
+ vlan_destroy $intf $vlan
+ done
+
+ mtu_restore $intf
+ ip link set dev $intf down
+ done
+
+ ip link del dev br2_11
+ ip link del dev br2_10
+ ip link del dev br1_11
+ ip link del dev br1_10
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ swp4=${NETIFS[p7]}
+ swp5=${NETIFS[p8]}
+
+ h3_mac=$(mac_get $h3)
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ h3_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h3_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1.10 $(ipaddr 3 10) " from host 1, vlan 10"
+ ping_test $h1.11 $(ipaddr 3 11) " from host 1, vlan 11"
+ ping_test $h2.10 $(ipaddr 3 10) " from host 2, vlan 10"
+ ping_test $h2.11 $(ipaddr 3 11) " from host 2, vlan 11"
+}
+
+get_tc()
+{
+ local vlan=$1; shift
+
+ echo $((vlan - 10))
+}
+
+get_qdisc_handle()
+{
+ local vlan=$1; shift
+
+ local tc=$(get_tc $vlan)
+ local band=$((8 - tc))
+
+ # Handle is 107: for TC1, 108: for TC0.
+ echo "10$band:"
+}
+
+get_qdisc_backlog()
+{
+ local vlan=$1; shift
+
+ qdisc_stats_get $swp3 $(get_qdisc_handle $vlan) .backlog
+}
+
+get_mc_transmit_queue()
+{
+ local vlan=$1; shift
+
+ local tc=$(($(get_tc $vlan) + 8))
+ ethtool_stats_get $swp3 tc_transmit_queue_tc_$tc
+}
+
+get_nmarked()
+{
+ local vlan=$1; shift
+
+ ethtool_stats_get $swp3 ecn_marked
+}
+
+get_qdisc_npackets()
+{
+ local vlan=$1; shift
+
+ busywait_for_counter 1100 +1 \
+ qdisc_stats_get $swp3 $(get_qdisc_handle $vlan) .packets
+}
+
+# This sends traffic in an attempt to build a backlog of $size. Returns 0 on
+# success. After 10 failed attempts it bails out and returns 1. It dumps the
+# backlog size to stdout.
+build_backlog()
+{
+ local vlan=$1; shift
+ local size=$1; shift
+ local proto=$1; shift
+
+ local tc=$((vlan - 10))
+ local band=$((8 - tc))
+ local cur=-1
+ local i=0
+
+ while :; do
+ local cur=$(busywait 1100 until_counter_is "> $cur" \
+ get_qdisc_backlog $vlan)
+ local diff=$((size - cur))
+ local pkts=$(((diff + 7999) / 8000))
+
+ if ((cur >= size)); then
+ echo $cur
+ return 0
+ elif ((i++ > 10)); then
+ echo $cur
+ return 1
+ fi
+
+ $MZ $h2.$vlan -p 8000 -a own -b $h3_mac \
+ -A $(ipaddr 2 $vlan) -B $(ipaddr 3 $vlan) \
+ -t $proto -q -c $pkts "$@"
+ done
+}
+
+check_marking()
+{
+ local vlan=$1; shift
+ local cond=$1; shift
+
+ local npackets_0=$(get_qdisc_npackets $vlan)
+ local nmarked_0=$(get_nmarked $vlan)
+ sleep 5
+ local npackets_1=$(get_qdisc_npackets $vlan)
+ local nmarked_1=$(get_nmarked $vlan)
+
+ local nmarked_d=$((nmarked_1 - nmarked_0))
+ local npackets_d=$((npackets_1 - npackets_0))
+ local pct=$((100 * nmarked_d / npackets_d))
+
+ echo $pct
+ ((pct $cond))
+}
+
+ecn_test_common()
+{
+ local name=$1; shift
+ local vlan=$1; shift
+ local limit=$1; shift
+ local backlog
+ local pct
+
+ # Build the below-the-limit backlog using UDP. We could use TCP just
+ # fine, but this way we get a proof that UDP is accepted when queue
+ # length is below the limit. The main stream is using TCP, and if the
+ # limit is misconfigured, we would see this traffic being ECN marked.
+ RET=0
+ backlog=$(build_backlog $vlan $((2 * limit / 3)) udp)
+ check_err $? "Could not build the requested backlog"
+ pct=$(check_marking $vlan "== 0")
+ check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
+ log_test "TC $((vlan - 10)): $name backlog < limit"
+
+ # Now push TCP, because non-TCP traffic would be early-dropped after the
+ # backlog crosses the limit, and we want to make sure that the backlog
+ # is above the limit.
+ RET=0
+ backlog=$(build_backlog $vlan $((3 * limit / 2)) tcp tos=0x01)
+ check_err $? "Could not build the requested backlog"
+ pct=$(check_marking $vlan ">= 95")
+ check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected >= 95."
+ log_test "TC $((vlan - 10)): $name backlog > limit"
+}
+
+do_ecn_test()
+{
+ local vlan=$1; shift
+ local limit=$1; shift
+ local name=ECN
+
+ start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) \
+ $h3_mac tos=0x01
+ sleep 1
+
+ ecn_test_common "$name" $vlan $limit
+
+ # Up there we saw that UDP gets accepted when backlog is below the
+ # limit. Now that it is above, it should all get dropped, and backlog
+ # building should fail.
+ RET=0
+ build_backlog $vlan $((2 * limit)) udp >/dev/null
+ check_fail $? "UDP traffic went into backlog instead of being early-dropped"
+ log_test "TC $((vlan - 10)): $name backlog > limit: UDP early-dropped"
+
+ stop_traffic
+ sleep 1
+}
+
+do_ecn_nodrop_test()
+{
+ local vlan=$1; shift
+ local limit=$1; shift
+ local name="ECN nodrop"
+
+ start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) \
+ $h3_mac tos=0x01
+ sleep 1
+
+ ecn_test_common "$name" $vlan $limit
+
+ # Up there we saw that UDP gets accepted when backlog is below the
+ # limit. Now that it is above, in nodrop mode, make sure it goes to
+ # backlog as well.
+ RET=0
+ build_backlog $vlan $((2 * limit)) udp >/dev/null
+ check_err $? "UDP traffic was early-dropped instead of getting into backlog"
+ log_test "TC $((vlan - 10)): $name backlog > limit: UDP not dropped"
+
+ stop_traffic
+ sleep 1
+}
+
+do_red_test()
+{
+ local vlan=$1; shift
+ local limit=$1; shift
+ local backlog
+ local pct
+
+ # Use ECN-capable TCP to verify there's no marking even though the queue
+ # is above limit.
+ start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) \
+ $h3_mac tos=0x01
+
+ # Pushing below the queue limit should work.
+ RET=0
+ backlog=$(build_backlog $vlan $((2 * limit / 3)) tcp tos=0x01)
+ check_err $? "Could not build the requested backlog"
+ pct=$(check_marking $vlan "== 0")
+ check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
+ log_test "TC $((vlan - 10)): RED backlog < limit"
+
+ # Pushing above should not.
+ RET=0
+ backlog=$(build_backlog $vlan $((3 * limit / 2)) tcp tos=0x01)
+ check_fail $? "Traffic went into backlog instead of being early-dropped"
+ pct=$(check_marking $vlan "== 0")
+ check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
+ local diff=$((limit - backlog))
+ pct=$((100 * diff / limit))
+ ((0 <= pct && pct <= 5))
+ check_err $? "backlog $backlog / $limit expected <= 5% distance"
+ log_test "TC $((vlan - 10)): RED backlog > limit"
+
+ stop_traffic
+ sleep 1
+}
+
+do_mc_backlog_test()
+{
+ local vlan=$1; shift
+ local limit=$1; shift
+ local backlog
+ local pct
+
+ RET=0
+
+ start_tcp_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) bc
+ start_tcp_traffic $h2.$vlan $(ipaddr 2 $vlan) $(ipaddr 3 $vlan) bc
+
+ qbl=$(busywait 5000 until_counter_is ">= 500000" \
+ get_qdisc_backlog $vlan)
+ check_err $? "Could not build MC backlog"
+
+ # Verify that we actually see the backlog on BUM TC. Do a busywait as
+ # well, performance blips might cause false fail.
+ local ebl
+ ebl=$(busywait 5000 until_counter_is ">= 500000" \
+ get_mc_transmit_queue $vlan)
+ check_err $? "MC backlog reported by qdisc not visible in ethtool"
+
+ stop_traffic
+ stop_traffic
+
+ log_test "TC $((vlan - 10)): Qdisc reports MC backlog"
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
new file mode 100755
index 000000000000..1c36c576613b
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
@@ -0,0 +1,94 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="
+ ping_ipv4
+ ecn_test
+ ecn_nodrop_test
+ red_test
+ mc_backlog_test
+"
+: ${QDISC:=ets}
+source sch_red_core.sh
+
+# do_ecn_test first build 2/3 of the requested backlog and expects no marking,
+# and then builds 3/2 of it and does expect marking. The values of $BACKLOG1 and
+# $BACKLOG2 are far enough not to overlap, so that we can assume that if we do
+# see (do not see) marking, it is actually due to the configuration of that one
+# TC, and not due to configuration of the other TC leaking over.
+BACKLOG1=200000
+BACKLOG2=500000
+
+install_qdisc()
+{
+ local -a args=("$@")
+
+ tc qdisc add dev $swp3 root handle 10: $QDISC \
+ bands 8 priomap 7 6 5 4 3 2 1 0
+ tc qdisc add dev $swp3 parent 10:8 handle 108: red \
+ limit 1000000 min $BACKLOG1 max $((BACKLOG1 + 1)) \
+ probability 1.0 avpkt 8000 burst 38 "${args[@]}"
+ tc qdisc add dev $swp3 parent 10:7 handle 107: red \
+ limit 1000000 min $BACKLOG2 max $((BACKLOG2 + 1)) \
+ probability 1.0 avpkt 8000 burst 63 "${args[@]}"
+ sleep 1
+}
+
+uninstall_qdisc()
+{
+ tc qdisc del dev $swp3 parent 10:7
+ tc qdisc del dev $swp3 parent 10:8
+ tc qdisc del dev $swp3 root
+}
+
+ecn_test()
+{
+ install_qdisc ecn
+
+ do_ecn_test 10 $BACKLOG1
+ do_ecn_test 11 $BACKLOG2
+
+ uninstall_qdisc
+}
+
+ecn_nodrop_test()
+{
+ install_qdisc ecn nodrop
+
+ do_ecn_nodrop_test 10 $BACKLOG1
+ do_ecn_nodrop_test 11 $BACKLOG2
+
+ uninstall_qdisc
+}
+
+red_test()
+{
+ install_qdisc
+
+ do_red_test 10 $BACKLOG1
+ do_red_test 11 $BACKLOG2
+
+ uninstall_qdisc
+}
+
+mc_backlog_test()
+{
+ install_qdisc
+
+ # Note that the backlog numbers here do not correspond to RED
+ # configuration, but are arbitrary.
+ do_mc_backlog_test 10 $BACKLOG1
+ do_mc_backlog_test 11 $BACKLOG2
+
+ uninstall_qdisc
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+bail_on_lldpad
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_prio.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_prio.sh
new file mode 100755
index 000000000000..76820a0e9a1b
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_prio.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+QDISC=prio
+source sch_red_ets.sh
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh
new file mode 100755
index 000000000000..558667ea11ec
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh
@@ -0,0 +1,68 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="
+ ping_ipv4
+ ecn_test
+ ecn_nodrop_test
+ red_test
+ mc_backlog_test
+"
+source sch_red_core.sh
+
+BACKLOG=300000
+
+install_qdisc()
+{
+ local -a args=("$@")
+
+ tc qdisc add dev $swp3 root handle 108: red \
+ limit 1000000 min $BACKLOG max $((BACKLOG + 1)) \
+ probability 1.0 avpkt 8000 burst 38 "${args[@]}"
+ sleep 1
+}
+
+uninstall_qdisc()
+{
+ tc qdisc del dev $swp3 root
+}
+
+ecn_test()
+{
+ install_qdisc ecn
+ do_ecn_test 10 $BACKLOG
+ uninstall_qdisc
+}
+
+ecn_nodrop_test()
+{
+ install_qdisc ecn nodrop
+ do_ecn_nodrop_test 10 $BACKLOG
+ uninstall_qdisc
+}
+
+red_test()
+{
+ install_qdisc
+ do_red_test 10 $BACKLOG
+ uninstall_qdisc
+}
+
+mc_backlog_test()
+{
+ install_qdisc
+ # Note that the backlog value here does not correspond to RED
+ # configuration, but is arbitrary.
+ do_mc_backlog_test 10 $BACKLOG
+ uninstall_qdisc
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+bail_on_lldpad
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh b/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh
new file mode 100755
index 000000000000..58f3a05f08af
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh
@@ -0,0 +1,222 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="
+ port_pool_test
+ port_tc_ip_test
+ port_tc_arp_test
+"
+
+NUM_NETIFS=2
+source ../../../net/forwarding/lib.sh
+source ../../../net/forwarding/devlink_lib.sh
+source mlxsw_lib.sh
+
+SB_POOL_ING=0
+SB_POOL_EGR_CPU=10
+
+SB_ITC_CPU_IP=3
+SB_ITC_CPU_ARP=2
+SB_ITC=0
+
+h1_create()
+{
+ simple_if_init $h1 192.0.1.1/24
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 192.0.1.1/24
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.1.2/24
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 192.0.1.2/24
+}
+
+sb_occ_pool_check()
+{
+ local dl_port=$1; shift
+ local pool=$1; shift
+ local exp_max_occ=$1
+ local max_occ
+ local err=0
+
+ max_occ=$(devlink sb -j occupancy show $dl_port \
+ | jq -e ".[][][\"pool\"][\"$pool\"][\"max\"]")
+
+ if [[ "$max_occ" -ne "$exp_max_occ" ]]; then
+ err=1
+ fi
+
+ echo $max_occ
+ return $err
+}
+
+sb_occ_itc_check()
+{
+ local dl_port=$1; shift
+ local itc=$1; shift
+ local exp_max_occ=$1
+ local max_occ
+ local err=0
+
+ max_occ=$(devlink sb -j occupancy show $dl_port \
+ | jq -e ".[][][\"itc\"][\"$itc\"][\"max\"]")
+
+ if [[ "$max_occ" -ne "$exp_max_occ" ]]; then
+ err=1
+ fi
+
+ echo $max_occ
+ return $err
+}
+
+sb_occ_etc_check()
+{
+ local dl_port=$1; shift
+ local etc=$1; shift
+ local exp_max_occ=$1; shift
+ local max_occ
+ local err=0
+
+ max_occ=$(devlink sb -j occupancy show $dl_port \
+ | jq -e ".[][][\"etc\"][\"$etc\"][\"max\"]")
+
+ if [[ "$max_occ" -ne "$exp_max_occ" ]]; then
+ err=1
+ fi
+
+ echo $max_occ
+ return $err
+}
+
+port_pool_test()
+{
+ local exp_max_occ=288
+ local max_occ
+
+ devlink sb occupancy clearmax $DEVLINK_DEV
+
+ $MZ $h1 -c 1 -p 160 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \
+ -t ip -q
+
+ devlink sb occupancy snapshot $DEVLINK_DEV
+
+ RET=0
+ max_occ=$(sb_occ_pool_check $dl_port1 $SB_POOL_ING $exp_max_occ)
+ check_err $? "Expected iPool($SB_POOL_ING) max occupancy to be $exp_max_occ, but got $max_occ"
+ log_test "physical port's($h1) ingress pool"
+
+ RET=0
+ max_occ=$(sb_occ_pool_check $dl_port2 $SB_POOL_ING $exp_max_occ)
+ check_err $? "Expected iPool($SB_POOL_ING) max occupancy to be $exp_max_occ, but got $max_occ"
+ log_test "physical port's($h2) ingress pool"
+
+ RET=0
+ max_occ=$(sb_occ_pool_check $cpu_dl_port $SB_POOL_EGR_CPU $exp_max_occ)
+ check_err $? "Expected ePool($SB_POOL_EGR_CPU) max occupancy to be $exp_max_occ, but got $max_occ"
+ log_test "CPU port's egress pool"
+}
+
+port_tc_ip_test()
+{
+ local exp_max_occ=288
+ local max_occ
+
+ devlink sb occupancy clearmax $DEVLINK_DEV
+
+ $MZ $h1 -c 1 -p 160 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \
+ -t ip -q
+
+ devlink sb occupancy snapshot $DEVLINK_DEV
+
+ RET=0
+ max_occ=$(sb_occ_itc_check $dl_port2 $SB_ITC $exp_max_occ)
+ check_err $? "Expected ingress TC($SB_ITC) max occupancy to be $exp_max_occ, but got $max_occ"
+ log_test "physical port's($h1) ingress TC - IP packet"
+
+ RET=0
+ max_occ=$(sb_occ_itc_check $dl_port2 $SB_ITC $exp_max_occ)
+ check_err $? "Expected ingress TC($SB_ITC) max occupancy to be $exp_max_occ, but got $max_occ"
+ log_test "physical port's($h2) ingress TC - IP packet"
+
+ RET=0
+ max_occ=$(sb_occ_etc_check $cpu_dl_port $SB_ITC_CPU_IP $exp_max_occ)
+ check_err $? "Expected egress TC($SB_ITC_CPU_IP) max occupancy to be $exp_max_occ, but got $max_occ"
+ log_test "CPU port's egress TC - IP packet"
+}
+
+port_tc_arp_test()
+{
+ local exp_max_occ=96
+ local max_occ
+
+ if [[ $MLXSW_CHIP != "mlxsw_spectrum" ]]; then
+ exp_max_occ=144
+ fi
+
+ devlink sb occupancy clearmax $DEVLINK_DEV
+
+ $MZ $h1 -c 1 -p 160 -a $h1mac -A 192.0.1.1 -t arp -q
+
+ devlink sb occupancy snapshot $DEVLINK_DEV
+
+ RET=0
+ max_occ=$(sb_occ_itc_check $dl_port2 $SB_ITC $exp_max_occ)
+ check_err $? "Expected ingress TC($SB_ITC) max occupancy to be $exp_max_occ, but got $max_occ"
+ log_test "physical port's($h1) ingress TC - ARP packet"
+
+ RET=0
+ max_occ=$(sb_occ_itc_check $dl_port2 $SB_ITC $exp_max_occ)
+ check_err $? "Expected ingress TC($SB_ITC) max occupancy to be $exp_max_occ, but got $max_occ"
+ log_test "physical port's($h2) ingress TC - ARP packet"
+
+ RET=0
+ max_occ=$(sb_occ_etc_check $cpu_dl_port $SB_ITC_CPU_ARP $exp_max_occ)
+ check_err $? "Expected egress TC($SB_ITC_IP2ME) max occupancy to be $exp_max_occ, but got $max_occ"
+ log_test "CPU port's egress TC - ARP packet"
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ h2=${NETIFS[p2]}
+
+ h1mac=$(mac_get $h1)
+ h2mac=$(mac_get $h2)
+
+ dl_port1=$(devlink_port_by_netdev $h1)
+ dl_port2=$(devlink_port_by_netdev $h2)
+
+ cpu_dl_port=$(devlink_cpu_port_get)
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer_configuration.py b/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer_configuration.py
new file mode 100755
index 000000000000..0d4b9327c9b3
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer_configuration.py
@@ -0,0 +1,416 @@
+#!/usr/bin/python
+# SPDX-License-Identifier: GPL-2.0
+
+import subprocess
+import json as j
+import random
+
+
+class SkipTest(Exception):
+ pass
+
+
+class RandomValuePicker:
+ """
+ Class for storing shared buffer configuration. Can handle 3 different
+ objects, pool, tcbind and portpool. Provide an interface to get random
+ values for a specific object type as the follow:
+ 1. Pool:
+ - random size
+
+ 2. TcBind:
+ - random pool number
+ - random threshold
+
+ 3. PortPool:
+ - random threshold
+ """
+ def __init__(self, pools):
+ self._pools = []
+ for pool in pools:
+ self._pools.append(pool)
+
+ def _cell_size(self):
+ return self._pools[0]["cell_size"]
+
+ def _get_static_size(self, th):
+ # For threshold of 16, this works out to be about 12MB on Spectrum-1,
+ # and about 17MB on Spectrum-2.
+ return th * 8000 * self._cell_size()
+
+ def _get_size(self):
+ return self._get_static_size(16)
+
+ def _get_thtype(self):
+ return "static"
+
+ def _get_th(self, pool):
+ # Threshold value could be any integer between 3 to 16
+ th = random.randint(3, 16)
+ if pool["thtype"] == "dynamic":
+ return th
+ else:
+ return self._get_static_size(th)
+
+ def _get_pool(self, direction):
+ ing_pools = []
+ egr_pools = []
+ for pool in self._pools:
+ if pool["type"] == "ingress":
+ ing_pools.append(pool)
+ else:
+ egr_pools.append(pool)
+ if direction == "ingress":
+ arr = ing_pools
+ else:
+ arr = egr_pools
+ return arr[random.randint(0, len(arr) - 1)]
+
+ def get_value(self, objid):
+ if isinstance(objid, Pool):
+ if objid["pool"] in [4, 8, 9, 10]:
+ # The threshold type of pools 4, 8, 9 and 10 cannot be changed
+ raise SkipTest()
+ else:
+ return (self._get_size(), self._get_thtype())
+ if isinstance(objid, TcBind):
+ if objid["tc"] >= 8:
+ # Multicast TCs cannot be changed
+ raise SkipTest()
+ else:
+ pool = self._get_pool(objid["type"])
+ th = self._get_th(pool)
+ pool_n = pool["pool"]
+ return (pool_n, th)
+ if isinstance(objid, PortPool):
+ pool_n = objid["pool"]
+ pool = self._pools[pool_n]
+ assert pool["pool"] == pool_n
+ th = self._get_th(pool)
+ return (th,)
+
+
+class RecordValuePickerException(Exception):
+ pass
+
+
+class RecordValuePicker:
+ """
+ Class for storing shared buffer configuration. Can handle 2 different
+ objects, pool and tcbind. Provide an interface to get the stored values per
+ object type.
+ """
+ def __init__(self, objlist):
+ self._recs = []
+ for item in objlist:
+ self._recs.append({"objid": item, "value": item.var_tuple()})
+
+ def get_value(self, objid):
+ if isinstance(objid, Pool) and objid["pool"] in [4, 8, 9, 10]:
+ # The threshold type of pools 4, 8, 9 and 10 cannot be changed
+ raise SkipTest()
+ if isinstance(objid, TcBind) and objid["tc"] >= 8:
+ # Multicast TCs cannot be changed
+ raise SkipTest()
+ for rec in self._recs:
+ if rec["objid"].weak_eq(objid):
+ return rec["value"]
+ raise RecordValuePickerException()
+
+
+def run_cmd(cmd, json=False):
+ out = subprocess.check_output(cmd, shell=True)
+ if json:
+ return j.loads(out)
+ return out
+
+
+def run_json_cmd(cmd):
+ return run_cmd(cmd, json=True)
+
+
+def log_test(test_name, err_msg=None):
+ if err_msg:
+ print("\t%s" % err_msg)
+ print("TEST: %-80s [FAIL]" % test_name)
+ else:
+ print("TEST: %-80s [ OK ]" % test_name)
+
+
+class CommonItem(dict):
+ varitems = []
+
+ def var_tuple(self):
+ ret = []
+ self.varitems.sort()
+ for key in self.varitems:
+ ret.append(self[key])
+ return tuple(ret)
+
+ def weak_eq(self, other):
+ for key in self:
+ if key in self.varitems:
+ continue
+ if self[key] != other[key]:
+ return False
+ return True
+
+
+class CommonList(list):
+ def get_by(self, by_obj):
+ for item in self:
+ if item.weak_eq(by_obj):
+ return item
+ return None
+
+ def del_by(self, by_obj):
+ for item in self:
+ if item.weak_eq(by_obj):
+ self.remove(item)
+
+
+class Pool(CommonItem):
+ varitems = ["size", "thtype"]
+
+ def dl_set(self, dlname, size, thtype):
+ run_cmd("devlink sb pool set {} sb {} pool {} size {} thtype {}".format(dlname, self["sb"],
+ self["pool"],
+ size, thtype))
+
+
+class PoolList(CommonList):
+ pass
+
+
+def get_pools(dlname, direction=None):
+ d = run_json_cmd("devlink sb pool show -j")
+ pools = PoolList()
+ for pooldict in d["pool"][dlname]:
+ if not direction or direction == pooldict["type"]:
+ pools.append(Pool(pooldict))
+ return pools
+
+
+def do_check_pools(dlname, pools, vp):
+ for pool in pools:
+ pre_pools = get_pools(dlname)
+ try:
+ (size, thtype) = vp.get_value(pool)
+ except SkipTest:
+ continue
+ pool.dl_set(dlname, size, thtype)
+ post_pools = get_pools(dlname)
+ pool = post_pools.get_by(pool)
+
+ err_msg = None
+ if pool["size"] != size:
+ err_msg = "Incorrect pool size (got {}, expected {})".format(pool["size"], size)
+ if pool["thtype"] != thtype:
+ err_msg = "Incorrect pool threshold type (got {}, expected {})".format(pool["thtype"], thtype)
+
+ pre_pools.del_by(pool)
+ post_pools.del_by(pool)
+ if pre_pools != post_pools:
+ err_msg = "Other pool setup changed as well"
+ log_test("pool {} of sb {} set verification".format(pool["pool"],
+ pool["sb"]), err_msg)
+
+
+def check_pools(dlname, pools):
+ # Save defaults
+ record_vp = RecordValuePicker(pools)
+
+ # For each pool, set random size and static threshold type
+ do_check_pools(dlname, pools, RandomValuePicker(pools))
+
+ # Restore defaults
+ do_check_pools(dlname, pools, record_vp)
+
+
+class TcBind(CommonItem):
+ varitems = ["pool", "threshold"]
+
+ def __init__(self, port, d):
+ super(TcBind, self).__init__(d)
+ self["dlportname"] = port.name
+
+ def dl_set(self, pool, th):
+ run_cmd("devlink sb tc bind set {} sb {} tc {} type {} pool {} th {}".format(self["dlportname"],
+ self["sb"],
+ self["tc"],
+ self["type"],
+ pool, th))
+
+
+class TcBindList(CommonList):
+ pass
+
+
+def get_tcbinds(ports, verify_existence=False):
+ d = run_json_cmd("devlink sb tc bind show -j -n")
+ tcbinds = TcBindList()
+ for port in ports:
+ err_msg = None
+ if port.name not in d["tc_bind"] or len(d["tc_bind"][port.name]) == 0:
+ err_msg = "No tc bind for port"
+ else:
+ for tcbinddict in d["tc_bind"][port.name]:
+ tcbinds.append(TcBind(port, tcbinddict))
+ if verify_existence:
+ log_test("tc bind existence for port {} verification".format(port.name), err_msg)
+ return tcbinds
+
+
+def do_check_tcbind(ports, tcbinds, vp):
+ for tcbind in tcbinds:
+ pre_tcbinds = get_tcbinds(ports)
+ try:
+ (pool, th) = vp.get_value(tcbind)
+ except SkipTest:
+ continue
+ tcbind.dl_set(pool, th)
+ post_tcbinds = get_tcbinds(ports)
+ tcbind = post_tcbinds.get_by(tcbind)
+
+ err_msg = None
+ if tcbind["pool"] != pool:
+ err_msg = "Incorrect pool (got {}, expected {})".format(tcbind["pool"], pool)
+ if tcbind["threshold"] != th:
+ err_msg = "Incorrect threshold (got {}, expected {})".format(tcbind["threshold"], th)
+
+ pre_tcbinds.del_by(tcbind)
+ post_tcbinds.del_by(tcbind)
+ if pre_tcbinds != post_tcbinds:
+ err_msg = "Other tc bind setup changed as well"
+ log_test("tc bind {}-{} of sb {} set verification".format(tcbind["dlportname"],
+ tcbind["tc"],
+ tcbind["sb"]), err_msg)
+
+
+def check_tcbind(dlname, ports, pools):
+ tcbinds = get_tcbinds(ports, verify_existence=True)
+
+ # Save defaults
+ record_vp = RecordValuePicker(tcbinds)
+
+ # Bind each port and unicast TC (TCs < 8) to a random pool and a random
+ # threshold
+ do_check_tcbind(ports, tcbinds, RandomValuePicker(pools))
+
+ # Restore defaults
+ do_check_tcbind(ports, tcbinds, record_vp)
+
+
+class PortPool(CommonItem):
+ varitems = ["threshold"]
+
+ def __init__(self, port, d):
+ super(PortPool, self).__init__(d)
+ self["dlportname"] = port.name
+
+ def dl_set(self, th):
+ run_cmd("devlink sb port pool set {} sb {} pool {} th {}".format(self["dlportname"],
+ self["sb"],
+ self["pool"], th))
+
+
+class PortPoolList(CommonList):
+ pass
+
+
+def get_portpools(ports, verify_existence=False):
+ d = run_json_cmd("devlink sb port pool -j -n")
+ portpools = PortPoolList()
+ for port in ports:
+ err_msg = None
+ if port.name not in d["port_pool"] or len(d["port_pool"][port.name]) == 0:
+ err_msg = "No port pool for port"
+ else:
+ for portpooldict in d["port_pool"][port.name]:
+ portpools.append(PortPool(port, portpooldict))
+ if verify_existence:
+ log_test("port pool existence for port {} verification".format(port.name), err_msg)
+ return portpools
+
+
+def do_check_portpool(ports, portpools, vp):
+ for portpool in portpools:
+ pre_portpools = get_portpools(ports)
+ (th,) = vp.get_value(portpool)
+ portpool.dl_set(th)
+ post_portpools = get_portpools(ports)
+ portpool = post_portpools.get_by(portpool)
+
+ err_msg = None
+ if portpool["threshold"] != th:
+ err_msg = "Incorrect threshold (got {}, expected {})".format(portpool["threshold"], th)
+
+ pre_portpools.del_by(portpool)
+ post_portpools.del_by(portpool)
+ if pre_portpools != post_portpools:
+ err_msg = "Other port pool setup changed as well"
+ log_test("port pool {}-{} of sb {} set verification".format(portpool["dlportname"],
+ portpool["pool"],
+ portpool["sb"]), err_msg)
+
+
+def check_portpool(dlname, ports, pools):
+ portpools = get_portpools(ports, verify_existence=True)
+
+ # Save defaults
+ record_vp = RecordValuePicker(portpools)
+
+ # For each port pool, set a random threshold
+ do_check_portpool(ports, portpools, RandomValuePicker(pools))
+
+ # Restore defaults
+ do_check_portpool(ports, portpools, record_vp)
+
+
+class Port:
+ def __init__(self, name):
+ self.name = name
+
+
+class PortList(list):
+ pass
+
+
+def get_ports(dlname):
+ d = run_json_cmd("devlink port show -j")
+ ports = PortList()
+ for name in d["port"]:
+ if name.find(dlname) == 0 and d["port"][name]["flavour"] == "physical":
+ ports.append(Port(name))
+ return ports
+
+
+def get_device():
+ devices_info = run_json_cmd("devlink -j dev info")["info"]
+ for d in devices_info:
+ if "mlxsw_spectrum" in devices_info[d]["driver"]:
+ return d
+ return None
+
+
+class UnavailableDevlinkNameException(Exception):
+ pass
+
+
+def test_sb_configuration():
+ # Use static seed
+ random.seed(0)
+
+ dlname = get_device()
+ if not dlname:
+ raise UnavailableDevlinkNameException()
+
+ ports = get_ports(dlname)
+ pools = get_pools(dlname)
+
+ check_pools(dlname, pools)
+ check_tcbind(dlname, ports, pools)
+ check_portpool(dlname, ports, pools)
+
+
+test_sb_configuration()
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
index 7b2acba82a49..fd583a171db7 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
@@ -8,8 +8,9 @@ source $lib_dir/lib.sh
source $lib_dir/tc_common.sh
source $lib_dir/devlink_lib.sh
-if [ "$DEVLINK_VIDDID" != "15b3:cf6c" ]; then
- echo "SKIP: test is tailored for Mellanox Spectrum-2"
+if [[ "$DEVLINK_VIDDID" != "15b3:cf6c" && \
+ "$DEVLINK_VIDDID" != "15b3:cf70" ]]; then
+ echo "SKIP: test is tailored for Mellanox Spectrum-2 and Spectrum-3"
exit 1
fi
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower_scale.sh
index a0795227216e..efd798a85931 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower_scale.sh
@@ -8,9 +8,9 @@ tc_flower_get_target()
# The driver associates a counter with each tc filter, which means the
# number of supported filters is bounded by the number of available
# counters.
- # Currently, the driver supports 12K (12,288) flow counters and six of
+ # Currently, the driver supports 30K (30,720) flow counters and six of
# these are used for multicast routing.
- local target=12282
+ local target=30714
if ((! should_fail)); then
echo $target
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_action_hw_stats.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_action_hw_stats.sh
new file mode 100755
index 000000000000..20ed98fe5a60
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/tc_action_hw_stats.sh
@@ -0,0 +1,130 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ default_hw_stats_test
+ immediate_hw_stats_test
+ delayed_hw_stats_test
+ disabled_hw_stats_test
+"
+NUM_NETIFS=2
+
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 192.0.2.1/24
+}
+
+switch_create()
+{
+ simple_if_init $swp1 192.0.2.2/24
+ tc qdisc add dev $swp1 clsact
+}
+
+switch_destroy()
+{
+ tc qdisc del dev $swp1 clsact
+ simple_if_fini $swp1 192.0.2.2/24
+}
+
+hw_stats_test()
+{
+ RET=0
+
+ local name=$1
+ local action_hw_stats=$2
+ local occ_delta=$3
+ local expected_packet_count=$4
+
+ local orig_occ=$(devlink_resource_get "counters" "flow" | jq '.["occ"]')
+
+ tc filter add dev $swp1 ingress protocol ip pref 1 handle 101 flower \
+ skip_sw dst_ip 192.0.2.2 action drop $action_hw_stats
+ check_err $? "Failed to add rule with $name hw_stats"
+
+ local new_occ=$(devlink_resource_get "counters" "flow" | jq '.["occ"]')
+ local expected_occ=$((orig_occ + occ_delta))
+ [ "$new_occ" == "$expected_occ" ]
+ check_err $? "Expected occupancy of $expected_occ, got $new_occ"
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $swp1mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $swp1 ingress" 101 $expected_packet_count
+ check_err $? "Did not match incoming packet"
+
+ tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower
+
+ log_test "$name hw_stats"
+}
+
+default_hw_stats_test()
+{
+ hw_stats_test "default" "" 2 1
+}
+
+immediate_hw_stats_test()
+{
+ hw_stats_test "immediate" "hw_stats immediate" 2 1
+}
+
+delayed_hw_stats_test()
+{
+ RET=0
+
+ tc filter add dev $swp1 ingress protocol ip pref 1 handle 101 flower \
+ skip_sw dst_ip 192.0.2.2 action drop hw_stats delayed
+ check_fail $? "Unexpected success in adding rule with delayed hw_stats"
+
+ log_test "delayed hw_stats"
+}
+
+disabled_hw_stats_test()
+{
+ hw_stats_test "disabled" "hw_stats disabled" 0 0
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ h1mac=$(mac_get $h1)
+ swp1mac=$(mac_get $swp1)
+
+ vrf_prepare
+
+ h1_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+check_tc_action_hw_stats_support
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_restrictions.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_restrictions.sh
new file mode 100755
index 000000000000..68c80d0ec1ec
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_restrictions.sh
@@ -0,0 +1,186 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ shared_block_drop_test
+ egress_redirect_test
+ multi_mirror_test
+"
+NUM_NETIFS=2
+
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+
+switch_create()
+{
+ simple_if_init $swp1 192.0.2.1/24
+ simple_if_init $swp2 192.0.2.2/24
+}
+
+switch_destroy()
+{
+ simple_if_fini $swp2 192.0.2.2/24
+ simple_if_fini $swp1 192.0.2.1/24
+}
+
+shared_block_drop_test()
+{
+ RET=0
+
+ # It is forbidden in mlxsw driver to have mixed-bound
+ # shared block with a drop rule.
+
+ tc qdisc add dev $swp1 ingress_block 22 clsact
+ check_err $? "Failed to create clsact with ingress block"
+
+ tc filter add block 22 protocol ip pref 1 handle 101 flower \
+ skip_sw dst_ip 192.0.2.2 action drop
+ check_err $? "Failed to add drop rule to ingress bound block"
+
+ tc qdisc add dev $swp2 ingress_block 22 clsact
+ check_err $? "Failed to create another clsact with ingress shared block"
+
+ tc qdisc del dev $swp2 clsact
+
+ tc qdisc add dev $swp2 egress_block 22 clsact
+ check_fail $? "Incorrect success to create another clsact with egress shared block"
+
+ tc filter del block 22 protocol ip pref 1 handle 101 flower
+
+ tc qdisc add dev $swp2 egress_block 22 clsact
+ check_err $? "Failed to create another clsact with egress shared block after blocker drop rule removed"
+
+ tc filter add block 22 protocol ip pref 1 handle 101 flower \
+ skip_sw dst_ip 192.0.2.2 action drop
+ check_fail $? "Incorrect success to add drop rule to mixed bound block"
+
+ tc qdisc del dev $swp1 clsact
+
+ tc qdisc add dev $swp1 egress_block 22 clsact
+ check_err $? "Failed to create another clsact with egress shared block"
+
+ tc filter add block 22 protocol ip pref 1 handle 101 flower \
+ skip_sw dst_ip 192.0.2.2 action drop
+ check_err $? "Failed to add drop rule to egress bound shared block"
+
+ tc filter del block 22 protocol ip pref 1 handle 101 flower
+
+ tc qdisc del dev $swp2 clsact
+ tc qdisc del dev $swp1 clsact
+
+ log_test "shared block drop"
+}
+
+egress_redirect_test()
+{
+ RET=0
+
+ # It is forbidden in mlxsw driver to have mirred redirect on
+ # egress-bound block.
+
+ tc qdisc add dev $swp1 ingress_block 22 clsact
+ check_err $? "Failed to create clsact with ingress block"
+
+ tc filter add block 22 protocol ip pref 1 handle 101 flower \
+ skip_sw dst_ip 192.0.2.2 \
+ action mirred egress redirect dev $swp2
+ check_err $? "Failed to add redirect rule to ingress bound block"
+
+ tc qdisc add dev $swp2 ingress_block 22 clsact
+ check_err $? "Failed to create another clsact with ingress shared block"
+
+ tc qdisc del dev $swp2 clsact
+
+ tc qdisc add dev $swp2 egress_block 22 clsact
+ check_fail $? "Incorrect success to create another clsact with egress shared block"
+
+ tc filter del block 22 protocol ip pref 1 handle 101 flower
+
+ tc qdisc add dev $swp2 egress_block 22 clsact
+ check_err $? "Failed to create another clsact with egress shared block after blocker redirect rule removed"
+
+ tc filter add block 22 protocol ip pref 1 handle 101 flower \
+ skip_sw dst_ip 192.0.2.2 \
+ action mirred egress redirect dev $swp2
+ check_fail $? "Incorrect success to add redirect rule to mixed bound block"
+
+ tc qdisc del dev $swp1 clsact
+
+ tc qdisc add dev $swp1 egress_block 22 clsact
+ check_err $? "Failed to create another clsact with egress shared block"
+
+ tc filter add block 22 protocol ip pref 1 handle 101 flower \
+ skip_sw dst_ip 192.0.2.2 \
+ action mirred egress redirect dev $swp2
+ check_fail $? "Incorrect success to add redirect rule to egress bound shared block"
+
+ tc qdisc del dev $swp2 clsact
+
+ tc filter add block 22 protocol ip pref 1 handle 101 flower \
+ skip_sw dst_ip 192.0.2.2 \
+ action mirred egress redirect dev $swp2
+ check_fail $? "Incorrect success to add redirect rule to egress bound block"
+
+ tc qdisc del dev $swp1 clsact
+
+ log_test "shared block drop"
+}
+
+multi_mirror_test()
+{
+ RET=0
+
+ # It is forbidden in mlxsw driver to have multiple mirror
+ # actions in a single rule.
+
+ tc qdisc add dev $swp1 clsact
+
+ tc filter add dev $swp1 ingress protocol ip pref 1 handle 101 flower \
+ skip_sw dst_ip 192.0.2.2 \
+ action mirred egress mirror dev $swp2
+ check_err $? "Failed to add rule with single mirror action"
+
+ tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower
+
+ tc filter add dev $swp1 ingress protocol ip pref 1 handle 101 flower \
+ skip_sw dst_ip 192.0.2.2 \
+ action mirred egress mirror dev $swp2 \
+ action mirred egress mirror dev $swp1
+ check_fail $? "Incorrect success to add rule with two mirror actions"
+
+ tc qdisc del dev $swp1 clsact
+
+ log_test "multi mirror"
+}
+
+setup_prepare()
+{
+ swp1=${NETIFS[p1]}
+ swp2=${NETIFS[p2]}
+
+ vrf_prepare
+
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+
+ vrf_cleanup
+}
+
+check_tc_shblock_support
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
index a6d733d2a4b4..cc0f07e72cf2 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
@@ -2,9 +2,9 @@
# SPDX-License-Identifier: GPL-2.0
# Test for resource limit of offloaded flower rules. The test adds a given
-# number of flower matches for different IPv6 addresses, then generates traffic,
-# and ensures each was hit exactly once. This file contains functions to set up
-# a testing topology and run the test, and is meant to be sourced from a test
+# number of flower matches for different IPv6 addresses, then check the offload
+# indication for all of the tc flower rules. This file contains functions to set
+# up a testing topology and run the test, and is meant to be sourced from a test
# script that calls the testing routine with a given number of rules.
TC_FLOWER_NUM_NETIFS=2
@@ -94,22 +94,15 @@ __tc_flower_test()
tc_flower_rules_create $count $should_fail
- for ((i = 0; i < count; ++i)); do
- $MZ $h1 -q -c 1 -t ip -p 20 -b bc -6 \
- -A 2001:db8:2::1 \
- -B $(tc_flower_addr $i)
- done
-
- MISMATCHES=$(
- tc -j -s filter show dev $h2 ingress |
- jq -r '[ .[] | select(.kind == "flower") | .options |
- values as $rule | .actions[].stats.packets |
- select(. != 1) | "\(.) on \($rule.keys.dst_ip)" ] |
- join(", ")'
- )
-
- test -z "$MISMATCHES"
- check_err $? "Expected to capture 1 packet for each IP, but got $MISMATCHES"
+ offload_count=$(tc -j -s filter show dev $h2 ingress |
+ jq -r '[ .[] | select(.kind == "flower") |
+ .options | .in_hw ]' | jq .[] | wc -l)
+ [[ $((offload_count - 1)) -eq $count ]]
+ if [[ $should_fail -eq 0 ]]; then
+ check_err $? "Offload mismatch"
+ else
+ check_err_fail $should_fail $? "Offload more than expacted"
+ fi
}
tc_flower_test()
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
index 4632f51af7ab..729a86cc4ede 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
@@ -9,6 +9,7 @@ lib_dir=$(dirname $0)/../../../net/forwarding
ALL_TESTS="sanitization_test offload_indication_test \
sanitization_vlan_aware_test offload_indication_vlan_aware_test"
NUM_NETIFS=2
+: ${TIMEOUT:=20000} # ms
source $lib_dir/lib.sh
setup_prepare()
@@ -470,8 +471,8 @@ offload_indication_fdb_flood_test()
bridge fdb append 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.2
- bridge fdb show brport vxlan0 | grep 00:00:00:00:00:00 \
- | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb 00:00:00:00:00:00 \
+ bridge fdb show brport vxlan0
check_err $?
bridge fdb del 00:00:00:00:00:00 dev vxlan0 self
@@ -486,11 +487,11 @@ offload_indication_fdb_bridge_test()
bridge fdb add de:ad:be:ef:13:37 dev vxlan0 self master static \
dst 198.51.100.2
- bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep self \
- | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb \
+ de:ad:be:ef:13:37 self bridge fdb show brport vxlan0
check_err $?
- bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep -v self \
- | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb \
+ de:ad:be:ef:13:37 self -v bridge fdb show brport vxlan0
check_err $?
log_test "vxlan entry offload indication - initial state"
@@ -500,9 +501,9 @@ offload_indication_fdb_bridge_test()
RET=0
bridge fdb del de:ad:be:ef:13:37 dev vxlan0 master
- bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep self \
- | grep -q offload
- check_fail $?
+ busywait "$TIMEOUT" not wait_for_offload grep_bridge_fdb \
+ de:ad:be:ef:13:37 self bridge fdb show brport vxlan0
+ check_err $?
log_test "vxlan entry offload indication - after removal from bridge"
@@ -511,11 +512,11 @@ offload_indication_fdb_bridge_test()
RET=0
bridge fdb add de:ad:be:ef:13:37 dev vxlan0 master static
- bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep self \
- | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb \
+ de:ad:be:ef:13:37 self bridge fdb show brport vxlan0
check_err $?
- bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep -v self \
- | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb \
+ de:ad:be:ef:13:37 self -v bridge fdb show brport vxlan0
check_err $?
log_test "vxlan entry offload indication - after re-add to bridge"
@@ -525,9 +526,9 @@ offload_indication_fdb_bridge_test()
RET=0
bridge fdb del de:ad:be:ef:13:37 dev vxlan0 self
- bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep -v self \
- | grep -q offload
- check_fail $?
+ busywait "$TIMEOUT" not wait_for_offload grep_bridge_fdb \
+ de:ad:be:ef:13:37 self -v bridge fdb show brport vxlan0
+ check_err $?
log_test "vxlan entry offload indication - after removal from vxlan"
@@ -536,11 +537,11 @@ offload_indication_fdb_bridge_test()
RET=0
bridge fdb add de:ad:be:ef:13:37 dev vxlan0 self dst 198.51.100.2
- bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep self \
- | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb \
+ de:ad:be:ef:13:37 self bridge fdb show brport vxlan0
check_err $?
- bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep -v self \
- | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb \
+ de:ad:be:ef:13:37 self -v bridge fdb show brport vxlan0
check_err $?
log_test "vxlan entry offload indication - after re-add to vxlan"
@@ -558,27 +559,32 @@ offload_indication_decap_route_test()
{
RET=0
- ip route show table local | grep 198.51.100.1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip route show table local 198.51.100.1
check_err $?
ip link set dev vxlan0 down
- ip route show table local | grep 198.51.100.1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip route show table local 198.51.100.1
check_err $?
ip link set dev vxlan1 down
- ip route show table local | grep 198.51.100.1 | grep -q offload
- check_fail $?
+ busywait "$TIMEOUT" not wait_for_offload \
+ ip route show table local 198.51.100.1
+ check_err $?
log_test "vxlan decap route - vxlan device down"
RET=0
ip link set dev vxlan1 up
- ip route show table local | grep 198.51.100.1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip route show table local 198.51.100.1
check_err $?
ip link set dev vxlan0 up
- ip route show table local | grep 198.51.100.1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip route show table local 198.51.100.1
check_err $?
log_test "vxlan decap route - vxlan device up"
@@ -586,11 +592,13 @@ offload_indication_decap_route_test()
RET=0
ip address delete 198.51.100.1/32 dev lo
- ip route show table local | grep 198.51.100.1 | grep -q offload
- check_fail $?
+ busywait "$TIMEOUT" not wait_for_offload \
+ ip route show table local 198.51.100.1
+ check_err $?
ip address add 198.51.100.1/32 dev lo
- ip route show table local | grep 198.51.100.1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip route show table local 198.51.100.1
check_err $?
log_test "vxlan decap route - add local route"
@@ -598,16 +606,19 @@ offload_indication_decap_route_test()
RET=0
ip link set dev $swp1 nomaster
- ip route show table local | grep 198.51.100.1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip route show table local 198.51.100.1
check_err $?
ip link set dev $swp2 nomaster
- ip route show table local | grep 198.51.100.1 | grep -q offload
- check_fail $?
+ busywait "$TIMEOUT" not wait_for_offload \
+ ip route show table local 198.51.100.1
+ check_err $?
ip link set dev $swp1 master br0
ip link set dev $swp2 master br1
- ip route show table local | grep 198.51.100.1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip route show table local 198.51.100.1
check_err $?
log_test "vxlan decap route - local ports enslavement"
@@ -615,12 +626,14 @@ offload_indication_decap_route_test()
RET=0
ip link del dev br0
- ip route show table local | grep 198.51.100.1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip route show table local 198.51.100.1
check_err $?
ip link del dev br1
- ip route show table local | grep 198.51.100.1 | grep -q offload
- check_fail $?
+ busywait "$TIMEOUT" not wait_for_offload \
+ ip route show table local 198.51.100.1
+ check_err $?
log_test "vxlan decap route - bridge device deletion"
@@ -632,16 +645,19 @@ offload_indication_decap_route_test()
ip link set dev $swp2 master br1
ip link set dev vxlan0 master br0
ip link set dev vxlan1 master br1
- ip route show table local | grep 198.51.100.1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip route show table local 198.51.100.1
check_err $?
ip link del dev vxlan0
- ip route show table local | grep 198.51.100.1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip route show table local 198.51.100.1
check_err $?
ip link del dev vxlan1
- ip route show table local | grep 198.51.100.1 | grep -q offload
- check_fail $?
+ busywait "$TIMEOUT" not wait_for_offload \
+ ip route show table local 198.51.100.1
+ check_err $?
log_test "vxlan decap route - vxlan device deletion"
@@ -656,12 +672,15 @@ check_fdb_offloaded()
local mac=00:11:22:33:44:55
local zmac=00:00:00:00:00:00
- bridge fdb show dev vxlan0 | grep $mac | grep self | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb $mac self \
+ bridge fdb show dev vxlan0
check_err $?
- bridge fdb show dev vxlan0 | grep $mac | grep master | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb $mac master \
+ bridge fdb show dev vxlan0
check_err $?
- bridge fdb show dev vxlan0 | grep $zmac | grep self | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb $zmac self \
+ bridge fdb show dev vxlan0
check_err $?
}
@@ -672,13 +691,15 @@ check_vxlan_fdb_not_offloaded()
bridge fdb show dev vxlan0 | grep $mac | grep -q self
check_err $?
- bridge fdb show dev vxlan0 | grep $mac | grep self | grep -q offload
- check_fail $?
+ busywait "$TIMEOUT" not wait_for_offload grep_bridge_fdb $mac self \
+ bridge fdb show dev vxlan0
+ check_err $?
bridge fdb show dev vxlan0 | grep $zmac | grep -q self
check_err $?
- bridge fdb show dev vxlan0 | grep $zmac | grep self | grep -q offload
- check_fail $?
+ busywait "$TIMEOUT" not wait_for_offload grep_bridge_fdb $zmac self \
+ bridge fdb show dev vxlan0
+ check_err $?
}
check_bridge_fdb_not_offloaded()
@@ -688,8 +709,9 @@ check_bridge_fdb_not_offloaded()
bridge fdb show dev vxlan0 | grep $mac | grep -q master
check_err $?
- bridge fdb show dev vxlan0 | grep $mac | grep master | grep -q offload
- check_fail $?
+ busywait "$TIMEOUT" not wait_for_offload grep_bridge_fdb $mac master \
+ bridge fdb show dev vxlan0
+ check_err $?
}
__offload_indication_join_vxlan_first()
@@ -771,12 +793,14 @@ __offload_indication_join_vxlan_last()
ip link set dev $swp1 master br0
- bridge fdb show dev vxlan0 | grep $zmac | grep self | grep -q offload
- check_fail $?
+ busywait "$TIMEOUT" not wait_for_offload grep_bridge_fdb $zmac self \
+ bridge fdb show dev vxlan0
+ check_err $?
ip link set dev vxlan0 master br0
- bridge fdb show dev vxlan0 | grep $zmac | grep self | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb $zmac self \
+ bridge fdb show dev vxlan0
check_err $?
log_test "offload indication - attach vxlan last"
@@ -854,20 +878,26 @@ sanitization_vlan_aware_test()
bridge vlan del vid 10 dev vxlan20
bridge vlan add vid 20 dev vxlan20 pvid untagged
- # Test that offloading of an unsupported tunnel fails when it is
- # triggered by addition of VLAN to a local port
- RET=0
+ # Test that when two VXLAN tunnels with conflicting configurations
+ # (i.e., different TTL) are enslaved to the same VLAN-aware bridge,
+ # then the enslavement of a port to the bridge is denied.
- # TOS must be set to inherit
- ip link set dev vxlan10 type vxlan tos 42
+ # Use the offload indication of the local route to ensure the VXLAN
+ # configuration was correctly rollbacked.
+ ip address add 198.51.100.1/32 dev lo
- ip link set dev $swp1 master br0
- bridge vlan add vid 10 dev $swp1 &> /dev/null
+ ip link set dev vxlan10 type vxlan ttl 10
+ ip link set dev $swp1 master br0 &> /dev/null
check_fail $?
- log_test "vlan-aware - failed vlan addition to a local port"
+ busywait "$TIMEOUT" not wait_for_offload \
+ ip route show table local 198.51.100.1
+ check_err $?
+
+ log_test "vlan-aware - failed enslavement to bridge due to conflict"
- ip link set dev vxlan10 type vxlan tos inherit
+ ip link set dev vxlan10 type vxlan ttl 20
+ ip address del 198.51.100.1/32 dev lo
ip link del dev vxlan20
ip link del dev vxlan10
@@ -924,11 +954,11 @@ offload_indication_vlan_aware_fdb_test()
bridge fdb add de:ad:be:ef:13:37 dev vxlan10 self master static \
dst 198.51.100.2 vlan 10
- bridge fdb show brport vxlan10 | grep de:ad:be:ef:13:37 | grep self \
- | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb \
+ de:ad:be:ef:13:37 self bridge fdb show brport vxlan10
check_err $?
- bridge fdb show brport vxlan10 | grep de:ad:be:ef:13:37 | grep -v self \
- | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb \
+ de:ad:be:ef:13:37 self -v bridge fdb show brport vxlan10
check_err $?
log_test "vxlan entry offload indication - initial state"
@@ -938,9 +968,9 @@ offload_indication_vlan_aware_fdb_test()
RET=0
bridge fdb del de:ad:be:ef:13:37 dev vxlan10 master vlan 10
- bridge fdb show brport vxlan10 | grep de:ad:be:ef:13:37 | grep self \
- | grep -q offload
- check_fail $?
+ busywait "$TIMEOUT" not wait_for_offload grep_bridge_fdb \
+ de:ad:be:ef:13:37 self bridge fdb show brport vxlan10
+ check_err $?
log_test "vxlan entry offload indication - after removal from bridge"
@@ -949,11 +979,11 @@ offload_indication_vlan_aware_fdb_test()
RET=0
bridge fdb add de:ad:be:ef:13:37 dev vxlan10 master static vlan 10
- bridge fdb show brport vxlan10 | grep de:ad:be:ef:13:37 | grep self \
- | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb \
+ de:ad:be:ef:13:37 self bridge fdb show brport vxlan10
check_err $?
- bridge fdb show brport vxlan10 | grep de:ad:be:ef:13:37 | grep -v self \
- | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb \
+ de:ad:be:ef:13:37 self -v bridge fdb show brport vxlan10
check_err $?
log_test "vxlan entry offload indication - after re-add to bridge"
@@ -963,9 +993,9 @@ offload_indication_vlan_aware_fdb_test()
RET=0
bridge fdb del de:ad:be:ef:13:37 dev vxlan10 self
- bridge fdb show brport vxlan10 | grep de:ad:be:ef:13:37 | grep -v self \
- | grep -q offload
- check_fail $?
+ busywait "$TIMEOUT" not wait_for_offload grep_bridge_fdb \
+ de:ad:be:ef:13:37 self -v bridge fdb show brport vxlan10
+ check_err $?
log_test "vxlan entry offload indication - after removal from vxlan"
@@ -974,11 +1004,11 @@ offload_indication_vlan_aware_fdb_test()
RET=0
bridge fdb add de:ad:be:ef:13:37 dev vxlan10 self dst 198.51.100.2
- bridge fdb show brport vxlan10 | grep de:ad:be:ef:13:37 | grep self \
- | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb \
+ de:ad:be:ef:13:37 self bridge fdb show brport vxlan10
check_err $?
- bridge fdb show brport vxlan10 | grep de:ad:be:ef:13:37 | grep -v self \
- | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb \
+ de:ad:be:ef:13:37 self -v bridge fdb show brport vxlan10
check_err $?
log_test "vxlan entry offload indication - after re-add to vxlan"
@@ -990,28 +1020,31 @@ offload_indication_vlan_aware_decap_route_test()
{
RET=0
- ip route show table local | grep 198.51.100.1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip route show table local 198.51.100.1
check_err $?
# Toggle PVID flag on one VxLAN device and make sure route is still
# marked as offloaded
bridge vlan add vid 10 dev vxlan10 untagged
- ip route show table local | grep 198.51.100.1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload \
+ ip route show table local 198.51.100.1
check_err $?
# Toggle PVID flag on second VxLAN device and make sure route is no
# longer marked as offloaded
bridge vlan add vid 20 dev vxlan20 untagged
- ip route show table local | grep 198.51.100.1 | grep -q offload
- check_fail $?
+ busywait "$TIMEOUT" not wait_for_offload \
+ ip route show table local 198.51.100.1
+ check_err $?
# Toggle PVID flag back and make sure route is marked as offloaded
bridge vlan add vid 10 dev vxlan10 pvid untagged
bridge vlan add vid 20 dev vxlan20 pvid untagged
- ip route show table local | grep 198.51.100.1 | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload ip route show table local 198.51.100.1
check_err $?
log_test "vxlan decap route - vni map/unmap"
@@ -1064,35 +1097,33 @@ offload_indication_vlan_aware_l3vni_test()
ip link set dev vxlan0 master br0
bridge vlan add dev vxlan0 vid 10 pvid untagged
- # No local port or router port is member in the VLAN, so tunnel should
- # not be offloaded
- bridge fdb show brport vxlan0 | grep $zmac | grep self \
- | grep -q offload
- check_fail $? "vxlan tunnel offloaded when should not"
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb $zmac self \
+ bridge fdb show brport vxlan0
+ check_err $? "vxlan tunnel not offloaded when should"
# Configure a VLAN interface and make sure tunnel is offloaded
ip link add link br0 name br10 up type vlan id 10
sysctl_set net.ipv6.conf.br10.disable_ipv6 0
ip -6 address add 2001:db8:1::1/64 dev br10
- bridge fdb show brport vxlan0 | grep $zmac | grep self \
- | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb $zmac self \
+ bridge fdb show brport vxlan0
check_err $? "vxlan tunnel not offloaded when should"
# Unlink the VXLAN device, make sure tunnel is no longer offloaded,
# then add it back to the bridge and make sure it is offloaded
ip link set dev vxlan0 nomaster
- bridge fdb show brport vxlan0 | grep $zmac | grep self \
- | grep -q offload
- check_fail $? "vxlan tunnel offloaded after unlinked from bridge"
+ busywait "$TIMEOUT" not wait_for_offload grep_bridge_fdb $zmac self \
+ bridge fdb show brport vxlan0
+ check_err $? "vxlan tunnel offloaded after unlinked from bridge"
ip link set dev vxlan0 master br0
- bridge fdb show brport vxlan0 | grep $zmac | grep self \
- | grep -q offload
- check_fail $? "vxlan tunnel offloaded despite no matching vid"
+ busywait "$TIMEOUT" not wait_for_offload grep_bridge_fdb $zmac self \
+ bridge fdb show brport vxlan0
+ check_err $? "vxlan tunnel offloaded despite no matching vid"
bridge vlan add dev vxlan0 vid 10 pvid untagged
- bridge fdb show brport vxlan0 | grep $zmac | grep self \
- | grep -q offload
+ busywait "$TIMEOUT" wait_for_offload grep_bridge_fdb $zmac self \
+ bridge fdb show brport vxlan0
check_err $? "vxlan tunnel not offloaded after adding vid"
log_test "vxlan - l3 vni"
diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
index 025a84c2ab5a..9f9741444549 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
@@ -141,6 +141,16 @@ regions_test()
check_region_snapshot_count dummy post-first-delete 2
+ devlink region new $DL_HANDLE/dummy snapshot 25
+ check_err $? "Failed to create a new snapshot with id 25"
+
+ check_region_snapshot_count dummy post-first-request 3
+
+ devlink region del $DL_HANDLE/dummy snapshot 25
+ check_err $? "Failed to delete snapshot with id 25"
+
+ check_region_snapshot_count dummy post-second-delete 2
+
log_test "regions test"
}
@@ -367,6 +377,11 @@ dummy_reporter_test()
{
RET=0
+ check_reporter_info dummy healthy 0 0 0 true
+
+ devlink health set $DL_HANDLE reporter dummy auto_recover false
+ check_err $? "Failed to dummy reporter auto_recover option"
+
check_reporter_info dummy healthy 0 0 0 false
local BREAK_MSG="foo bar"
diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink_trap.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink_trap.sh
index f101ab9441e2..dbd1e014ba17 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/devlink_trap.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/devlink_trap.sh
@@ -16,6 +16,8 @@ ALL_TESTS="
trap_group_action_test
bad_trap_group_test
trap_group_stats_test
+ trap_policer_test
+ trap_policer_bind_test
port_del_test
dev_del_test
"
@@ -23,6 +25,7 @@ NETDEVSIM_PATH=/sys/bus/netdevsim/
DEV_ADDR=1337
DEV=netdevsim${DEV_ADDR}
DEVLINK_DEV=netdevsim/${DEV}
+DEBUGFS_DIR=/sys/kernel/debug/netdevsim/$DEV/
SLEEP_TIME=1
NETDEV=""
NUM_NETIFS=0
@@ -103,6 +106,11 @@ trap_metadata_test()
for trap_name in $(devlink_traps_get); do
devlink_trap_metadata_test $trap_name "input_port"
check_err $? "Input port not reported as metadata of trap $trap_name"
+ if [ $trap_name == "ingress_flow_action_drop" ] ||
+ [ $trap_name == "egress_flow_action_drop" ]; then
+ devlink_trap_metadata_test $trap_name "flow_action_cookie"
+ check_err $? "Flow action cookie not reported as metadata of trap $trap_name"
+ fi
done
log_test "Trap metadata"
@@ -251,6 +259,119 @@ trap_group_stats_test()
log_test "Trap group statistics"
}
+trap_policer_test()
+{
+ local packets_t0
+ local packets_t1
+
+ if [ $(devlink_trap_policers_num_get) -eq 0 ]; then
+ check_err 1 "Failed to dump policers"
+ fi
+
+ devlink trap policer set $DEVLINK_DEV policer 1337 &> /dev/null
+ check_fail $? "Did not get an error for setting a non-existing policer"
+ devlink trap policer show $DEVLINK_DEV policer 1337 &> /dev/null
+ check_fail $? "Did not get an error for getting a non-existing policer"
+
+ devlink trap policer set $DEVLINK_DEV policer 1 rate 2000 burst 16
+ check_err $? "Failed to set valid parameters for a valid policer"
+ if [ $(devlink_trap_policer_rate_get 1) -ne 2000 ]; then
+ check_err 1 "Policer rate was not changed"
+ fi
+ if [ $(devlink_trap_policer_burst_get 1) -ne 16 ]; then
+ check_err 1 "Policer burst size was not changed"
+ fi
+
+ devlink trap policer set $DEVLINK_DEV policer 1 rate 0 &> /dev/null
+ check_fail $? "Policer rate was changed to rate lower than limit"
+ devlink trap policer set $DEVLINK_DEV policer 1 rate 9000 &> /dev/null
+ check_fail $? "Policer rate was changed to rate higher than limit"
+ devlink trap policer set $DEVLINK_DEV policer 1 burst 2 &> /dev/null
+ check_fail $? "Policer burst size was changed to burst size lower than limit"
+ devlink trap policer set $DEVLINK_DEV policer 1 rate 65537 &> /dev/null
+ check_fail $? "Policer burst size was changed to burst size higher than limit"
+ echo "y" > $DEBUGFS_DIR/fail_trap_policer_set
+ devlink trap policer set $DEVLINK_DEV policer 1 rate 3000 &> /dev/null
+ check_fail $? "Managed to set policer rate when should not"
+ echo "n" > $DEBUGFS_DIR/fail_trap_policer_set
+ if [ $(devlink_trap_policer_rate_get 1) -ne 2000 ]; then
+ check_err 1 "Policer rate was changed to an invalid value"
+ fi
+ if [ $(devlink_trap_policer_burst_get 1) -ne 16 ]; then
+ check_err 1 "Policer burst size was changed to an invalid value"
+ fi
+
+ packets_t0=$(devlink_trap_policer_rx_dropped_get 1)
+ sleep .5
+ packets_t1=$(devlink_trap_policer_rx_dropped_get 1)
+ if [ ! $packets_t1 -gt $packets_t0 ]; then
+ check_err 1 "Policer drop counter was not incremented"
+ fi
+
+ echo "y"> $DEBUGFS_DIR/fail_trap_policer_counter_get
+ devlink -s trap policer show $DEVLINK_DEV policer 1 &> /dev/null
+ check_fail $? "Managed to read policer drop counter when should not"
+ echo "n"> $DEBUGFS_DIR/fail_trap_policer_counter_get
+ devlink -s trap policer show $DEVLINK_DEV policer 1 &> /dev/null
+ check_err $? "Did not manage to read policer drop counter when should"
+
+ log_test "Trap policer"
+}
+
+trap_group_check_policer()
+{
+ local group_name=$1; shift
+
+ devlink -j -p trap group show $DEVLINK_DEV group $group_name \
+ | jq -e '.[][][]["policer"]' &> /dev/null
+}
+
+trap_policer_bind_test()
+{
+ devlink trap group set $DEVLINK_DEV group l2_drops policer 1
+ check_err $? "Failed to bind a valid policer"
+ if [ $(devlink_trap_group_policer_get "l2_drops") -ne 1 ]; then
+ check_err 1 "Bound policer was not changed"
+ fi
+
+ devlink trap group set $DEVLINK_DEV group l2_drops policer 1337 \
+ &> /dev/null
+ check_fail $? "Did not get an error for binding a non-existing policer"
+ if [ $(devlink_trap_group_policer_get "l2_drops") -ne 1 ]; then
+ check_err 1 "Bound policer was changed when should not"
+ fi
+
+ devlink trap group set $DEVLINK_DEV group l2_drops policer 0
+ check_err $? "Failed to unbind a policer when using ID 0"
+ trap_group_check_policer "l2_drops"
+ check_fail $? "Trap group has a policer after unbinding with ID 0"
+
+ devlink trap group set $DEVLINK_DEV group l2_drops policer 1
+ check_err $? "Failed to bind a valid policer"
+
+ devlink trap group set $DEVLINK_DEV group l2_drops nopolicer
+ check_err $? "Failed to unbind a policer when using 'nopolicer' keyword"
+ trap_group_check_policer "l2_drops"
+ check_fail $? "Trap group has a policer after unbinding with 'nopolicer' keyword"
+
+ devlink trap group set $DEVLINK_DEV group l2_drops policer 1
+ check_err $? "Failed to bind a valid policer"
+
+ echo "y"> $DEBUGFS_DIR/fail_trap_group_set
+ devlink trap group set $DEVLINK_DEV group l2_drops policer 2 \
+ &> /dev/null
+ check_fail $? "Managed to bind a policer when should not"
+ echo "n"> $DEBUGFS_DIR/fail_trap_group_set
+ devlink trap group set $DEVLINK_DEV group l2_drops policer 2
+ check_err $? "Did not manage to bind a policer when should"
+
+ devlink trap group set $DEVLINK_DEV group l2_drops action drop \
+ policer 1337 &> /dev/null
+ check_fail $? "Did not get an error for partially modified trap group"
+
+ log_test "Trap policer binding"
+}
+
port_del_test()
{
local group_name
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index ecc52d4c034d..997c65dcad68 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -23,3 +23,8 @@ so_txtime
tcp_fastopen_backup_key
nettest
fin_ack_lat
+reuseaddr_ports_exhausted
+hwtstamp_config
+rxtimestamp
+timestamping
+txtimestamp
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 4c1bd03ffa1c..3f386eb9e7d7 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -14,6 +14,8 @@ TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh traceroute.sh
TEST_PROGS += fin_ack_lat.sh fib_nexthop_multiprefix.sh fib_nexthops.sh
TEST_PROGS += altnames.sh icmp_redirect.sh ip6_gre_headroom.sh
TEST_PROGS += route_localnet.sh
+TEST_PROGS += reuseaddr_ports_exhausted.sh
+TEST_PROGS += txtimestamp.sh
TEST_PROGS_EXTENDED := in_netns.sh
TEST_GEN_FILES = socket nettest
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
@@ -22,6 +24,8 @@ TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx ip_defrag
TEST_GEN_FILES += so_txtime ipv6_flowlabel ipv6_flowlabel_mgr
TEST_GEN_FILES += tcp_fastopen_backup_key
TEST_GEN_FILES += fin_ack_lat
+TEST_GEN_FILES += reuseaddr_ports_exhausted
+TEST_GEN_FILES += hwtstamp_config rxtimestamp timestamping txtimestamp
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index b8503a8119b0..3b42c06b5985 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -12,6 +12,7 @@ CONFIG_IPV6_VTI=y
CONFIG_DUMMY=y
CONFIG_BRIDGE=y
CONFIG_VLAN_8021Q=y
+CONFIG_IFB=y
CONFIG_NETFILTER=y
CONFIG_NETFILTER_ADVANCED=y
CONFIG_NF_CONNTRACK=m
@@ -27,5 +28,6 @@ CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_NET_SCH_FQ=m
CONFIG_NET_SCH_ETF=m
+CONFIG_NET_SCH_NETEM=y
CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_KALLSYMS=y
diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
index 40b076983239..155d48bd4d9e 100644
--- a/tools/testing/selftests/net/forwarding/devlink_lib.sh
+++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
@@ -35,6 +35,12 @@ if [ $? -ne 0 ]; then
exit 1
fi
+devlink dev help 2>&1 | grep info &> /dev/null
+if [ $? -ne 0 ]; then
+ echo "SKIP: iproute2 too old, missing devlink dev info support"
+ exit 1
+fi
+
##############################################################################
# Devlink helpers
@@ -373,6 +379,7 @@ devlink_trap_drop_test()
local trap_name=$1; shift
local group_name=$1; shift
local dev=$1; shift
+ local handle=$1; shift
# This is the common part of all the tests. It checks that stats are
# initially idle, then non-idle after changing the trap action and
@@ -397,7 +404,7 @@ devlink_trap_drop_test()
devlink_trap_group_stats_idle_test $group_name
check_err $? "Trap group stats not idle after setting action to drop"
- tc_check_packets "dev $dev egress" 101 0
+ tc_check_packets "dev $dev egress" $handle 0
check_err $? "Packets were not dropped"
}
@@ -406,7 +413,68 @@ devlink_trap_drop_cleanup()
local mz_pid=$1; shift
local dev=$1; shift
local proto=$1; shift
+ local pref=$1; shift
+ local handle=$1; shift
kill $mz_pid && wait $mz_pid &> /dev/null
- tc filter del dev $dev egress protocol $proto pref 1 handle 101 flower
+ tc filter del dev $dev egress protocol $proto pref $pref handle $handle flower
+}
+
+devlink_trap_policers_num_get()
+{
+ devlink -j -p trap policer show | jq '.[]["'$DEVLINK_DEV'"] | length'
+}
+
+devlink_trap_policer_rate_get()
+{
+ local policer_id=$1; shift
+
+ devlink -j -p trap policer show $DEVLINK_DEV policer $policer_id \
+ | jq '.[][][]["rate"]'
+}
+
+devlink_trap_policer_burst_get()
+{
+ local policer_id=$1; shift
+
+ devlink -j -p trap policer show $DEVLINK_DEV policer $policer_id \
+ | jq '.[][][]["burst"]'
+}
+
+devlink_trap_policer_rx_dropped_get()
+{
+ local policer_id=$1; shift
+
+ devlink -j -p -s trap policer show $DEVLINK_DEV policer $policer_id \
+ | jq '.[][][]["stats"]["rx"]["dropped"]'
+}
+
+devlink_trap_group_policer_get()
+{
+ local group_name=$1; shift
+
+ devlink -j -p trap group show $DEVLINK_DEV group $group_name \
+ | jq '.[][][]["policer"]'
+}
+
+devlink_trap_policer_ids_get()
+{
+ devlink -j -p trap policer show \
+ | jq '.[]["'$DEVLINK_DEV'"][]["policer"]'
+}
+
+devlink_port_by_netdev()
+{
+ local if_name=$1
+
+ devlink -j port show $if_name | jq -e '.[] | keys' | jq -r '.[]'
+}
+
+devlink_cpu_port_get()
+{
+ local cpu_dl_port_num=$(devlink port list | grep "$DEVLINK_DEV" |
+ grep cpu | cut -d/ -f3 | cut -d: -f1 |
+ sed -n '1p')
+
+ echo "$DEVLINK_DEV/$cpu_dl_port_num"
}
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 2f5da414aaa7..977fc2b326a2 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -60,6 +60,15 @@ check_tc_chain_support()
fi
}
+check_tc_action_hw_stats_support()
+{
+ tc actions help 2>&1 | grep -q hw_stats
+ if [[ $? -ne 0 ]]; then
+ echo "SKIP: iproute2 too old; tc is missing action hw_stats support"
+ exit 1
+ fi
+}
+
if [[ "$(id -u)" -ne 0 ]]; then
echo "SKIP: need root privileges"
exit 0
@@ -248,13 +257,40 @@ busywait()
done
}
+not()
+{
+ "$@"
+ [[ $? != 0 ]]
+}
+
+grep_bridge_fdb()
+{
+ local addr=$1; shift
+ local word
+ local flag
+
+ if [ "$1" == "self" ] || [ "$1" == "master" ]; then
+ word=$1; shift
+ if [ "$1" == "-v" ]; then
+ flag=$1; shift
+ fi
+ fi
+
+ $@ | grep $addr | grep $flag "$word"
+}
+
+wait_for_offload()
+{
+ "$@" | grep -q offload
+}
+
until_counter_is()
{
- local value=$1; shift
+ local expr=$1; shift
local current=$("$@")
echo $((current))
- ((current >= value))
+ ((current $expr))
}
busywait_for_counter()
@@ -263,7 +299,7 @@ busywait_for_counter()
local delta=$1; shift
local base=$("$@")
- busywait "$timeout" until_counter_is $((base + delta)) "$@"
+ busywait "$timeout" until_counter_is ">= $((base + delta))" "$@"
}
setup_wait_dev()
@@ -599,6 +635,17 @@ tc_rule_stats_get()
| jq ".[1].options.actions[].stats$selector"
}
+tc_rule_handle_stats_get()
+{
+ local id=$1; shift
+ local handle=$1; shift
+ local selector=${1:-.packets}; shift
+
+ tc -j -s filter show $id \
+ | jq ".[] | select(.options.handle == $handle) | \
+ .options.actions[0].stats$selector"
+}
+
ethtool_stats_get()
{
local dev=$1; shift
@@ -607,6 +654,26 @@ ethtool_stats_get()
ethtool -S $dev | grep "^ *$stat:" | head -n 1 | cut -d: -f2
}
+qdisc_stats_get()
+{
+ local dev=$1; shift
+ local handle=$1; shift
+ local selector=$1; shift
+
+ tc -j -s qdisc show dev "$dev" \
+ | jq '.[] | select(.handle == "'"$handle"'") | '"$selector"
+}
+
+qdisc_parent_stats_get()
+{
+ local dev=$1; shift
+ local parent=$1; shift
+ local selector=$1; shift
+
+ tc -j -s qdisc show dev "$dev" invisible \
+ | jq '.[] | select(.parent == "'"$parent"'") | '"$selector"
+}
+
humanize()
{
local speed=$1; shift
@@ -1132,18 +1199,29 @@ flood_test()
flood_multicast_test $br_port $host1_if $host2_if
}
-start_traffic()
+__start_traffic()
{
+ local proto=$1; shift
local h_in=$1; shift # Where the traffic egresses the host
local sip=$1; shift
local dip=$1; shift
local dmac=$1; shift
$MZ $h_in -p 8000 -A $sip -B $dip -c 0 \
- -a own -b $dmac -t udp -q &
+ -a own -b $dmac -t "$proto" -q "$@" &
sleep 1
}
+start_traffic()
+{
+ __start_traffic udp "$@"
+}
+
+start_tcp_traffic()
+{
+ __start_traffic tcp "$@"
+}
+
stop_traffic()
{
# Suppress noise from killing mausezahn.
diff --git a/tools/testing/selftests/net/forwarding/pedit_dsfield.sh b/tools/testing/selftests/net/forwarding/pedit_dsfield.sh
new file mode 100755
index 000000000000..b50081855913
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/pedit_dsfield.sh
@@ -0,0 +1,238 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test sends traffic from H1 to H2. Either on ingress of $swp1, or on
+# egress of $swp2, the traffic is acted upon by a pedit action. An ingress
+# filter installed on $h2 verifies that the packet looks like expected.
+#
+# +----------------------+ +----------------------+
+# | H1 | | H2 |
+# | + $h1 | | $h2 + |
+# | | 192.0.2.1/28 | | 192.0.2.2/28 | |
+# +----|-----------------+ +----------------|-----+
+# | |
+# +----|----------------------------------------------------------------|-----+
+# | SW | | |
+# | +-|----------------------------------------------------------------|-+ |
+# | | + $swp1 BR $swp2 + | |
+# | +--------------------------------------------------------------------+ |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ test_ip_dsfield
+ test_ip_dscp
+ test_ip_ecn
+ test_ip_dscp_ecn
+"
+
+NUM_NETIFS=4
+source lib.sh
+source tc_common.sh
+
+: ${HIT_TIMEOUT:=2000} # ms
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 192.0.2.1/28 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.2/28 2001:db8:1::2/64
+ tc qdisc add dev $h2 clsact
+}
+
+h2_destroy()
+{
+ tc qdisc del dev $h2 clsact
+ simple_if_fini $h2 192.0.2.2/28 2001:db8:1::2/64
+}
+
+switch_create()
+{
+ ip link add name br1 up type bridge vlan_filtering 1
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+
+ tc qdisc add dev $swp1 clsact
+ tc qdisc add dev $swp2 clsact
+}
+
+switch_destroy()
+{
+ tc qdisc del dev $swp2 clsact
+ tc qdisc del dev $swp1 clsact
+
+ ip link set dev $swp2 nomaster
+ ip link set dev $swp1 nomaster
+ ip link del dev br1
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ h2mac=$(mac_get $h2)
+
+ vrf_prepare
+ h1_create
+ h2_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.2
+}
+
+do_test_pedit_dsfield_common()
+{
+ local pedit_locus=$1; shift
+ local pedit_action=$1; shift
+ local mz_flags=$1; shift
+
+ RET=0
+
+ # TOS 125: DSCP 31, ECN 1. Used for testing that the relevant part is
+ # overwritten when zero is selected.
+ $MZ $mz_flags $h1 -c 10 -d 20msec -p 100 \
+ -a own -b $h2mac -q -t tcp tos=0x7d,sp=54321,dp=12345
+
+ local pkts
+ pkts=$(busywait "$TC_HIT_TIMEOUT" until_counter_is ">= 10" \
+ tc_rule_handle_stats_get "dev $h2 ingress" 101)
+ check_err $? "Expected to get 10 packets, but got $pkts."
+ log_test "$pedit_locus pedit $pedit_action"
+}
+
+do_test_pedit_dsfield()
+{
+ local pedit_locus=$1; shift
+ local pedit_action=$1; shift
+ local match_prot=$1; shift
+ local match_flower=$1; shift
+ local mz_flags=$1; shift
+ local saddr=$1; shift
+ local daddr=$1; shift
+
+ tc filter add $pedit_locus handle 101 pref 1 \
+ flower action pedit ex munge $pedit_action
+ tc filter add dev $h2 ingress handle 101 pref 1 prot $match_prot \
+ flower skip_hw $match_flower action pass
+
+ do_test_pedit_dsfield_common "$pedit_locus" "$pedit_action" "$mz_flags"
+
+ tc filter del dev $h2 ingress pref 1
+ tc filter del $pedit_locus pref 1
+}
+
+do_test_ip_dsfield()
+{
+ local locus=$1; shift
+ local dsfield
+
+ for dsfield in 0 1 2 3 128 252 253 254 255; do
+ do_test_pedit_dsfield "$locus" \
+ "ip dsfield set $dsfield" \
+ ip "ip_tos $dsfield" \
+ "-A 192.0.2.1 -B 192.0.2.2"
+ done
+}
+
+test_ip_dsfield()
+{
+ do_test_ip_dsfield "dev $swp1 ingress"
+ do_test_ip_dsfield "dev $swp2 egress"
+}
+
+do_test_ip_dscp()
+{
+ local locus=$1; shift
+ local dscp
+
+ for dscp in 0 1 2 3 32 61 62 63; do
+ do_test_pedit_dsfield "$locus" \
+ "ip dsfield set $((dscp << 2)) retain 0xfc" \
+ ip "ip_tos $(((dscp << 2) | 1))" \
+ "-A 192.0.2.1 -B 192.0.2.2"
+ done
+}
+
+test_ip_dscp()
+{
+ do_test_ip_dscp "dev $swp1 ingress"
+ do_test_ip_dscp "dev $swp2 egress"
+}
+
+do_test_ip_ecn()
+{
+ local locus=$1; shift
+ local ecn
+
+ for ecn in 0 1 2 3; do
+ do_test_pedit_dsfield "$locus" \
+ "ip dsfield set $ecn retain 0x03" \
+ ip "ip_tos $((124 | $ecn))" \
+ "-A 192.0.2.1 -B 192.0.2.2"
+ done
+}
+
+test_ip_ecn()
+{
+ do_test_ip_ecn "dev $swp1 ingress"
+ do_test_ip_ecn "dev $swp2 egress"
+}
+
+do_test_ip_dscp_ecn()
+{
+ local locus=$1; shift
+
+ tc filter add $locus handle 101 pref 1 \
+ flower action pedit ex munge ip dsfield set 124 retain 0xfc \
+ action pedit ex munge ip dsfield set 1 retain 0x03
+ tc filter add dev $h2 ingress handle 101 pref 1 prot ip \
+ flower skip_hw ip_tos 125 action pass
+
+ do_test_pedit_dsfield_common "$locus" "set DSCP + set ECN" \
+ "-A 192.0.2.1 -B 192.0.2.2"
+
+ tc filter del dev $h2 ingress pref 1
+ tc filter del $locus pref 1
+}
+
+test_ip_dscp_ecn()
+{
+ do_test_ip_dscp_ecn "dev $swp1 ingress"
+ do_test_ip_dscp_ecn "dev $swp2 egress"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/sch_ets.sh b/tools/testing/selftests/net/forwarding/sch_ets.sh
index 40e0ad1bc4f2..e60c8b4818cc 100755
--- a/tools/testing/selftests/net/forwarding/sch_ets.sh
+++ b/tools/testing/selftests/net/forwarding/sch_ets.sh
@@ -34,11 +34,14 @@ switch_destroy()
}
# Callback from sch_ets_tests.sh
-get_stats()
+collect_stats()
{
- local stream=$1; shift
+ local -a streams=("$@")
+ local stream
- link_stats_get $h2.1$stream rx bytes
+ for stream in ${streams[@]}; do
+ qdisc_parent_stats_get $swp2 10:$((stream + 1)) .bytes
+ done
}
ets_run
diff --git a/tools/testing/selftests/net/forwarding/sch_ets_tests.sh b/tools/testing/selftests/net/forwarding/sch_ets_tests.sh
index 3c3b204d47e8..cdf689e99458 100644
--- a/tools/testing/selftests/net/forwarding/sch_ets_tests.sh
+++ b/tools/testing/selftests/net/forwarding/sch_ets_tests.sh
@@ -2,7 +2,7 @@
# Global interface:
# $put -- port under test (e.g. $swp2)
-# get_stats($band) -- A function to collect stats for band
+# collect_stats($streams...) -- A function to get stats for individual streams
# ets_start_traffic($band) -- Start traffic for this band
# ets_change_qdisc($op, $dev, $nstrict, $quanta...) -- Add or change qdisc
@@ -94,15 +94,11 @@ __ets_dwrr_test()
sleep 10
- t0=($(for stream in ${streams[@]}; do
- get_stats $stream
- done))
+ t0=($(collect_stats "${streams[@]}"))
sleep 10
- t1=($(for stream in ${streams[@]}; do
- get_stats $stream
- done))
+ t1=($(collect_stats "${streams[@]}"))
d=($(for ((i = 0; i < ${#streams[@]}; i++)); do
echo $((${t1[$i]} - ${t0[$i]}))
done))
diff --git a/tools/testing/selftests/net/forwarding/skbedit_priority.sh b/tools/testing/selftests/net/forwarding/skbedit_priority.sh
new file mode 100755
index 000000000000..e3bd8a6bb8b4
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/skbedit_priority.sh
@@ -0,0 +1,168 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test sends traffic from H1 to H2. Either on ingress of $swp1, or on
+# egress of $swp2, the traffic is acted upon by an action skbedit priority. The
+# new priority should be taken into account when classifying traffic on the PRIO
+# qdisc at $swp2. The test verifies that for different priority values, the
+# traffic ends up in expected PRIO band.
+#
+# +----------------------+ +----------------------+
+# | H1 | | H2 |
+# | + $h1 | | $h2 + |
+# | | 192.0.2.1/28 | | 192.0.2.2/28 | |
+# +----|-----------------+ +----------------|-----+
+# | |
+# +----|----------------------------------------------------------------|-----+
+# | SW | | |
+# | +-|----------------------------------------------------------------|-+ |
+# | | + $swp1 BR $swp2 + | |
+# | | PRIO | |
+# | +--------------------------------------------------------------------+ |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ test_ingress
+ test_egress
+"
+
+NUM_NETIFS=4
+source lib.sh
+
+: ${HIT_TIMEOUT:=2000} # ms
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 192.0.2.1/28
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.2/28
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 192.0.2.2/28
+}
+
+switch_create()
+{
+ ip link add name br1 up type bridge vlan_filtering 1
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+
+ tc qdisc add dev $swp1 clsact
+ tc qdisc add dev $swp2 clsact
+ tc qdisc add dev $swp2 root handle 10: \
+ prio bands 8 priomap 7 6 5 4 3 2 1 0
+}
+
+switch_destroy()
+{
+ tc qdisc del dev $swp2 root
+ tc qdisc del dev $swp2 clsact
+ tc qdisc del dev $swp1 clsact
+
+ ip link set dev $swp2 nomaster
+ ip link set dev $swp1 nomaster
+ ip link del dev br1
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ h2mac=$(mac_get $h2)
+
+ vrf_prepare
+ h1_create
+ h2_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.2
+}
+
+test_skbedit_priority_one()
+{
+ local locus=$1; shift
+ local prio=$1; shift
+ local classid=$1; shift
+
+ RET=0
+
+ tc filter add $locus handle 101 pref 1 \
+ flower action skbedit priority $prio
+
+ local pkt0=$(qdisc_parent_stats_get $swp2 $classid .packets)
+ local pkt2=$(tc_rule_handle_stats_get "$locus" 101)
+ $MZ $h1 -t udp "sp=54321,dp=12345" -c 10 -d 20msec -p 100 \
+ -a own -b $h2mac -A 192.0.2.1 -B 192.0.2.2 -q
+
+ local pkt1
+ pkt1=$(busywait "$HIT_TIMEOUT" until_counter_is ">= $((pkt0 + 10))" \
+ qdisc_parent_stats_get $swp2 $classid .packets)
+ check_err $? "Expected to get 10 packets on class $classid, but got $((pkt1 - pkt0))."
+
+ local pkt3=$(tc_rule_handle_stats_get "$locus" 101)
+ ((pkt3 >= pkt2 + 10))
+ check_err $? "Expected to get 10 packets on skbedit rule but got $((pkt3 - pkt2))."
+
+ log_test "$locus skbedit priority $prio -> classid $classid"
+
+ tc filter del $locus pref 1
+}
+
+test_ingress()
+{
+ local prio
+
+ for prio in {0..7}; do
+ test_skbedit_priority_one "dev $swp1 ingress" \
+ $prio 10:$((8 - prio))
+ done
+}
+
+test_egress()
+{
+ local prio
+
+ for prio in {0..7}; do
+ test_skbedit_priority_one "dev $swp2 egress" \
+ $prio 10:$((8 - prio))
+ done
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/tc_common.sh b/tools/testing/selftests/net/forwarding/tc_common.sh
index 64f652633585..0e18e8be6e2a 100644
--- a/tools/testing/selftests/net/forwarding/tc_common.sh
+++ b/tools/testing/selftests/net/forwarding/tc_common.sh
@@ -6,39 +6,14 @@ CHECK_TC="yes"
# Can be overridden by the configuration file. See lib.sh
TC_HIT_TIMEOUT=${TC_HIT_TIMEOUT:=1000} # ms
-__tc_check_packets()
-{
- local id=$1
- local handle=$2
- local count=$3
- local operator=$4
-
- start_time="$(date -u +%s%3N)"
- while true
- do
- cmd_jq "tc -j -s filter show $id" \
- ".[] | select(.options.handle == $handle) | \
- select(.options.actions[0].stats.packets $operator $count)" \
- &> /dev/null
- ret=$?
- if [[ $ret -eq 0 ]]; then
- return $ret
- fi
- current_time="$(date -u +%s%3N)"
- diff=$(expr $current_time - $start_time)
- if [ "$diff" -gt "$TC_HIT_TIMEOUT" ]; then
- return 1
- fi
- done
-}
-
tc_check_packets()
{
local id=$1
local handle=$2
local count=$3
- __tc_check_packets "$id" "$handle" "$count" "=="
+ busywait "$TC_HIT_TIMEOUT" until_counter_is "== $count" \
+ tc_rule_handle_stats_get "$id" "$handle" > /dev/null
}
tc_check_packets_hitting()
@@ -46,5 +21,6 @@ tc_check_packets_hitting()
local id=$1
local handle=$2
- __tc_check_packets "$id" "$handle" 0 ">"
+ busywait "$TC_HIT_TIMEOUT" until_counter_is "> 0" \
+ tc_rule_handle_stats_get "$id" "$handle" > /dev/null
}
diff --git a/tools/testing/selftests/networking/timestamping/hwtstamp_config.c b/tools/testing/selftests/net/hwtstamp_config.c
index e1fdee841021..e1fdee841021 100644
--- a/tools/testing/selftests/networking/timestamping/hwtstamp_config.c
+++ b/tools/testing/selftests/net/hwtstamp_config.c
diff --git a/tools/testing/selftests/net/mptcp/.gitignore b/tools/testing/selftests/net/mptcp/.gitignore
index d72f07642738..ea13b255a99d 100644
--- a/tools/testing/selftests/net/mptcp/.gitignore
+++ b/tools/testing/selftests/net/mptcp/.gitignore
@@ -1,2 +1,3 @@
mptcp_connect
+pm_nl_ctl
*.pcap
diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile
index ba450e62dc5b..f50976ee7d44 100644
--- a/tools/testing/selftests/net/mptcp/Makefile
+++ b/tools/testing/selftests/net/mptcp/Makefile
@@ -1,12 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
top_srcdir = ../../../../..
+KSFT_KHDR_INSTALL := 1
-CFLAGS = -Wall -Wl,--no-as-needed -O2 -g
+CFLAGS = -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include
-TEST_PROGS := mptcp_connect.sh
+TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh
-TEST_GEN_FILES = mptcp_connect
+TEST_GEN_FILES = mptcp_connect pm_nl_ctl
TEST_FILES := settings
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index 99579c0223c1..cedee5b952ba 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -34,8 +34,8 @@ extern int optind;
#define TCP_ULP 31
#endif
+static int poll_timeout = 10 * 1000;
static bool listen_mode;
-static int poll_timeout;
enum cfg_mode {
CFG_MODE_POLL,
@@ -50,11 +50,21 @@ static int cfg_sock_proto = IPPROTO_MPTCP;
static bool tcpulp_audit;
static int pf = AF_INET;
static int cfg_sndbuf;
+static int cfg_rcvbuf;
+static bool cfg_join;
static void die_usage(void)
{
- fprintf(stderr, "Usage: mptcp_connect [-6] [-u] [-s MPTCP|TCP] [-p port] -m mode]"
- "[ -l ] [ -t timeout ] connect_address\n");
+ fprintf(stderr, "Usage: mptcp_connect [-6] [-u] [-s MPTCP|TCP] [-p port] [-m mode]"
+ "[-l] connect_address\n");
+ fprintf(stderr, "\t-6 use ipv6\n");
+ fprintf(stderr, "\t-t num -- set poll timeout to num\n");
+ fprintf(stderr, "\t-S num -- set SO_SNDBUF to num\n");
+ fprintf(stderr, "\t-R num -- set SO_RCVBUF to num\n");
+ fprintf(stderr, "\t-p num -- use port num\n");
+ fprintf(stderr, "\t-m [MPTCP|TCP] -- use tcp or mptcp sockets\n");
+ fprintf(stderr, "\t-s [mmap|poll] -- use poll (default) or mmap\n");
+ fprintf(stderr, "\t-u -- check mptcp ulp\n");
exit(1);
}
@@ -97,6 +107,17 @@ static void xgetaddrinfo(const char *node, const char *service,
}
}
+static void set_rcvbuf(int fd, unsigned int size)
+{
+ int err;
+
+ err = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &size, sizeof(size));
+ if (err) {
+ perror("set SO_RCVBUF");
+ exit(1);
+ }
+}
+
static void set_sndbuf(int fd, unsigned int size)
{
int err;
@@ -230,6 +251,7 @@ static int sock_connect_mptcp(const char * const remoteaddr,
static size_t do_rnd_write(const int fd, char *buf, const size_t len)
{
+ static bool first = true;
unsigned int do_w;
ssize_t bw;
@@ -237,10 +259,19 @@ static size_t do_rnd_write(const int fd, char *buf, const size_t len)
if (do_w == 0 || do_w > len)
do_w = len;
+ if (cfg_join && first && do_w > 100)
+ do_w = 100;
+
bw = write(fd, buf, do_w);
if (bw < 0)
perror("write");
+ /* let the join handshake complete, before going on */
+ if (cfg_join && first) {
+ usleep(200000);
+ first = false;
+ }
+
return bw;
}
@@ -365,8 +396,11 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd)
break;
/* ... but we still receive.
- * Close our write side.
+ * Close our write side, ev. give some time
+ * for address notification
*/
+ if (cfg_join)
+ usleep(400000);
shutdown(peerfd, SHUT_WR);
} else {
if (errno == EINTR)
@@ -383,6 +417,10 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd)
}
}
+ /* leave some time for late join/announce */
+ if (cfg_join)
+ usleep(400000);
+
close(peerfd);
return 0;
}
@@ -638,7 +676,7 @@ static void maybe_close(int fd)
{
unsigned int r = rand();
- if (r & 1)
+ if (!cfg_join && (r & 1))
close(fd);
}
@@ -704,6 +742,8 @@ int main_loop(void)
check_getpeername_connect(fd);
+ if (cfg_rcvbuf)
+ set_rcvbuf(fd, cfg_rcvbuf);
if (cfg_sndbuf)
set_sndbuf(fd, cfg_sndbuf);
@@ -745,7 +785,7 @@ int parse_mode(const char *mode)
return 0;
}
-int parse_sndbuf(const char *size)
+static int parse_int(const char *size)
{
unsigned long s;
@@ -765,17 +805,19 @@ int parse_sndbuf(const char *size)
die_usage();
}
- cfg_sndbuf = s;
-
- return 0;
+ return (int)s;
}
static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "6lp:s:hut:m:b:")) != -1) {
+ while ((c = getopt(argc, argv, "6jlp:s:hut:m:S:R:")) != -1) {
switch (c) {
+ case 'j':
+ cfg_join = true;
+ cfg_mode = CFG_MODE_POLL;
+ break;
case 'l':
listen_mode = true;
break;
@@ -802,8 +844,11 @@ static void parse_opts(int argc, char **argv)
case 'm':
cfg_mode = parse_mode(optarg);
break;
- case 'b':
- cfg_sndbuf = parse_sndbuf(optarg);
+ case 'S':
+ cfg_sndbuf = parse_int(optarg);
+ break;
+ case 'R':
+ cfg_rcvbuf = parse_int(optarg);
break;
}
}
@@ -831,6 +876,8 @@ int main(int argc, char *argv[])
if (fd < 0)
return 1;
+ if (cfg_rcvbuf)
+ set_rcvbuf(fd, cfg_rcvbuf);
if (cfg_sndbuf)
set_sndbuf(fd, cfg_sndbuf);
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index d573a0feb98d..acf02e156d20 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -3,7 +3,7 @@
time_start=$(date +%s)
-optstring="b:d:e:l:r:h4cm:"
+optstring="S:R:d:e:l:r:h4cm:"
ret=0
sin=""
sout=""
@@ -19,6 +19,7 @@ tc_loss=$((RANDOM%101))
tc_reorder=""
testmode=""
sndbuf=0
+rcvbuf=0
options_log=true
if [ $tc_loss -eq 100 ];then
@@ -39,7 +40,8 @@ usage() {
echo -e "\t-e: ethtool features to disable, e.g.: \"-e tso -e gso\" (default: randomly disable any of tso/gso/gro)"
echo -e "\t-4: IPv4 only: disable IPv6 tests (default: test both IPv4 and IPv6)"
echo -e "\t-c: capture packets for each test using tcpdump (default: no capture)"
- echo -e "\t-b: set sndbuf value (default: use kernel default)"
+ echo -e "\t-S: set sndbuf value (default: use kernel default)"
+ echo -e "\t-R: set rcvbuf value (default: use kernel default)"
echo -e "\t-m: test mode (poll, sendfile; default: poll)"
}
@@ -73,11 +75,19 @@ while getopts "$optstring" option;do
"c")
capture=true
;;
- "b")
+ "S")
if [ $OPTARG -ge 0 ];then
sndbuf="$OPTARG"
else
- echo "-s requires numeric argument, got \"$OPTARG\"" 1>&2
+ echo "-S requires numeric argument, got \"$OPTARG\"" 1>&2
+ exit 1
+ fi
+ ;;
+ "R")
+ if [ $OPTARG -ge 0 ];then
+ rcvbuf="$OPTARG"
+ else
+ echo "-R requires numeric argument, got \"$OPTARG\"" 1>&2
exit 1
fi
;;
@@ -342,8 +352,12 @@ do_transfer()
port=$((10000+$TEST_COUNT))
TEST_COUNT=$((TEST_COUNT+1))
+ if [ "$rcvbuf" -gt 0 ]; then
+ extra_args="$extra_args -R $rcvbuf"
+ fi
+
if [ "$sndbuf" -gt 0 ]; then
- extra_args="$extra_args -b $sndbuf"
+ extra_args="$extra_args -S $sndbuf"
fi
if [ -n "$testmode" ]; then
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
new file mode 100755
index 000000000000..dd42c2f692d0
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -0,0 +1,357 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ret=0
+sin=""
+sout=""
+cin=""
+cout=""
+ksft_skip=4
+timeout=30
+capture=0
+
+TEST_COUNT=0
+
+init()
+{
+ capout=$(mktemp)
+
+ rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
+
+ ns1="ns1-$rndh"
+ ns2="ns2-$rndh"
+
+ for netns in "$ns1" "$ns2";do
+ ip netns add $netns || exit $ksft_skip
+ ip -net $netns link set lo up
+ ip netns exec $netns sysctl -q net.mptcp.enabled=1
+ ip netns exec $netns sysctl -q net.ipv4.conf.all.rp_filter=0
+ ip netns exec $netns sysctl -q net.ipv4.conf.default.rp_filter=0
+ done
+
+ # ns1 ns2
+ # ns1eth1 ns2eth1
+ # ns1eth2 ns2eth2
+ # ns1eth3 ns2eth3
+ # ns1eth4 ns2eth4
+
+ for i in `seq 1 4`; do
+ ip link add ns1eth$i netns "$ns1" type veth peer name ns2eth$i netns "$ns2"
+ ip -net "$ns1" addr add 10.0.$i.1/24 dev ns1eth$i
+ ip -net "$ns1" addr add dead:beef:$i::1/64 dev ns1eth$i nodad
+ ip -net "$ns1" link set ns1eth$i up
+
+ ip -net "$ns2" addr add 10.0.$i.2/24 dev ns2eth$i
+ ip -net "$ns2" addr add dead:beef:$i::2/64 dev ns2eth$i nodad
+ ip -net "$ns2" link set ns2eth$i up
+
+ # let $ns2 reach any $ns1 address from any interface
+ ip -net "$ns2" route add default via 10.0.$i.1 dev ns2eth$i metric 10$i
+ done
+}
+
+cleanup_partial()
+{
+ rm -f "$capout"
+
+ for netns in "$ns1" "$ns2"; do
+ ip netns del $netns
+ done
+}
+
+cleanup()
+{
+ rm -f "$cin" "$cout"
+ rm -f "$sin" "$sout"
+ cleanup_partial
+}
+
+reset()
+{
+ cleanup_partial
+ init
+}
+
+for arg in "$@"; do
+ if [ "$arg" = "-c" ]; then
+ capture=1
+ fi
+done
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+
+check_transfer()
+{
+ in=$1
+ out=$2
+ what=$3
+
+ cmp "$in" "$out" > /dev/null 2>&1
+ if [ $? -ne 0 ] ;then
+ echo "[ FAIL ] $what does not match (in, out):"
+ print_file_err "$in"
+ print_file_err "$out"
+
+ return 1
+ fi
+
+ return 0
+}
+
+do_ping()
+{
+ listener_ns="$1"
+ connector_ns="$2"
+ connect_addr="$3"
+
+ ip netns exec ${connector_ns} ping -q -c 1 $connect_addr >/dev/null
+ if [ $? -ne 0 ] ; then
+ echo "$listener_ns -> $connect_addr connectivity [ FAIL ]" 1>&2
+ ret=1
+ fi
+}
+
+do_transfer()
+{
+ listener_ns="$1"
+ connector_ns="$2"
+ cl_proto="$3"
+ srv_proto="$4"
+ connect_addr="$5"
+
+ port=$((10000+$TEST_COUNT))
+ TEST_COUNT=$((TEST_COUNT+1))
+
+ :> "$cout"
+ :> "$sout"
+ :> "$capout"
+
+ if [ $capture -eq 1 ]; then
+ if [ -z $SUDO_USER ] ; then
+ capuser=""
+ else
+ capuser="-Z $SUDO_USER"
+ fi
+
+ capfile="mp_join-${listener_ns}.pcap"
+
+ echo "Capturing traffic for test $TEST_COUNT into $capfile"
+ ip netns exec ${listener_ns} tcpdump -i any -s 65535 -B 32768 $capuser -w $capfile > "$capout" 2>&1 &
+ cappid=$!
+
+ sleep 1
+ fi
+
+ ip netns exec ${listener_ns} ./mptcp_connect -j -t $timeout -l -p $port -s ${srv_proto} 0.0.0.0 < "$sin" > "$sout" &
+ spid=$!
+
+ sleep 1
+
+ ip netns exec ${connector_ns} ./mptcp_connect -j -t $timeout -p $port -s ${cl_proto} $connect_addr < "$cin" > "$cout" &
+ cpid=$!
+
+ wait $cpid
+ retc=$?
+ wait $spid
+ rets=$?
+
+ if [ $capture -eq 1 ]; then
+ sleep 1
+ kill $cappid
+ fi
+
+ if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
+ echo " client exit code $retc, server $rets" 1>&2
+ echo "\nnetns ${listener_ns} socket stat for $port:" 1>&2
+ ip netns exec ${listener_ns} ss -nita 1>&2 -o "sport = :$port"
+ echo "\nnetns ${connector_ns} socket stat for $port:" 1>&2
+ ip netns exec ${connector_ns} ss -nita 1>&2 -o "dport = :$port"
+
+ cat "$capout"
+ return 1
+ fi
+
+ check_transfer $sin $cout "file received by client"
+ retc=$?
+ check_transfer $cin $sout "file received by server"
+ rets=$?
+
+ if [ $retc -eq 0 ] && [ $rets -eq 0 ];then
+ cat "$capout"
+ return 0
+ fi
+
+ cat "$capout"
+ return 1
+}
+
+make_file()
+{
+ name=$1
+ who=$2
+
+ SIZE=1
+
+ dd if=/dev/urandom of="$name" bs=1024 count=$SIZE 2> /dev/null
+ echo -e "\nMPTCP_TEST_FILE_END_MARKER" >> "$name"
+
+ echo "Created $name (size $SIZE KB) containing data sent by $who"
+}
+
+run_tests()
+{
+ listener_ns="$1"
+ connector_ns="$2"
+ connect_addr="$3"
+ lret=0
+
+ do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr}
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ ret=$lret
+ return
+ fi
+}
+
+chk_join_nr()
+{
+ local msg="$1"
+ local syn_nr=$2
+ local syn_ack_nr=$3
+ local ack_nr=$4
+ local count
+ local dump_stats
+
+ printf "%-36s %s" "$msg" "syn"
+ count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinSynRx | awk '{print $2}'`
+ [ -z "$count" ] && count=0
+ if [ "$count" != "$syn_nr" ]; then
+ echo "[fail] got $count JOIN[s] syn expected $syn_nr"
+ ret=1
+ dump_stats=1
+ else
+ echo -n "[ ok ]"
+ fi
+
+ echo -n " - synack"
+ count=`ip netns exec $ns2 nstat -as | grep MPTcpExtMPJoinSynAckRx | awk '{print $2}'`
+ [ -z "$count" ] && count=0
+ if [ "$count" != "$syn_ack_nr" ]; then
+ echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr"
+ ret=1
+ dump_stats=1
+ else
+ echo -n "[ ok ]"
+ fi
+
+ echo -n " - ack"
+ count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinAckRx | awk '{print $2}'`
+ [ -z "$count" ] && count=0
+ if [ "$count" != "$ack_nr" ]; then
+ echo "[fail] got $count JOIN[s] ack expected $ack_nr"
+ ret=1
+ dump_stats=1
+ else
+ echo "[ ok ]"
+ fi
+ if [ "${dump_stats}" = 1 ]; then
+ echo Server ns stats
+ ip netns exec $ns1 nstat -as | grep MPTcp
+ echo Client ns stats
+ ip netns exec $ns2 nstat -as | grep MPTcp
+ fi
+}
+
+sin=$(mktemp)
+sout=$(mktemp)
+cin=$(mktemp)
+cout=$(mktemp)
+init
+make_file "$cin" "client"
+make_file "$sin" "server"
+trap cleanup EXIT
+
+run_tests $ns1 $ns2 10.0.1.1
+chk_join_nr "no JOIN" "0" "0" "0"
+
+# subflow limted by client
+reset
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+run_tests $ns1 $ns2 10.0.1.1
+chk_join_nr "single subflow, limited by client" 0 0 0
+
+# subflow limted by server
+reset
+ip netns exec $ns2 ./pm_nl_ctl limits 0 1
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+run_tests $ns1 $ns2 10.0.1.1
+chk_join_nr "single subflow, limited by server" 1 1 0
+
+# subflow
+reset
+ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+ip netns exec $ns2 ./pm_nl_ctl limits 0 1
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+run_tests $ns1 $ns2 10.0.1.1
+chk_join_nr "single subflow" 1 1 1
+
+# multiple subflows
+reset
+ip netns exec $ns1 ./pm_nl_ctl limits 0 2
+ip netns exec $ns2 ./pm_nl_ctl limits 0 2
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow
+run_tests $ns1 $ns2 10.0.1.1
+chk_join_nr "multiple subflows" 2 2 2
+
+# multiple subflows limited by serverf
+reset
+ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+ip netns exec $ns2 ./pm_nl_ctl limits 0 2
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow
+run_tests $ns1 $ns2 10.0.1.1
+chk_join_nr "multiple subflows, limited by server" 2 2 1
+
+# add_address, unused
+reset
+ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+run_tests $ns1 $ns2 10.0.1.1
+chk_join_nr "unused signal address" 0 0 0
+
+# accept and use add_addr
+reset
+ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+ip netns exec $ns2 ./pm_nl_ctl limits 1 1
+ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+run_tests $ns1 $ns2 10.0.1.1
+chk_join_nr "signal address" 1 1 1
+
+# accept and use add_addr with an additional subflow
+# note: signal address in server ns and local addresses in client ns must
+# belong to different subnets or one of the listed local address could be
+# used for 'add_addr' subflow
+reset
+ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+ip netns exec $ns1 ./pm_nl_ctl limits 0 2
+ip netns exec $ns2 ./pm_nl_ctl limits 1 2
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+run_tests $ns1 $ns2 10.0.1.1
+chk_join_nr "subflow and signal" 2 2 2
+
+# accept and use add_addr with additional subflows
+reset
+ip netns exec $ns1 ./pm_nl_ctl limits 0 3
+ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+ip netns exec $ns2 ./pm_nl_ctl limits 1 3
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags subflow
+run_tests $ns1 $ns2 10.0.1.1
+chk_join_nr "multiple subflows and signal" 3 3 3
+
+exit $ret
diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh
new file mode 100755
index 000000000000..9172746b6cf0
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh
@@ -0,0 +1,130 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ksft_skip=4
+ret=0
+
+usage() {
+ echo "Usage: $0 [ -h ]"
+}
+
+
+while getopts "$optstring" option;do
+ case "$option" in
+ "h")
+ usage $0
+ exit 0
+ ;;
+ "?")
+ usage $0
+ exit 1
+ ;;
+ esac
+done
+
+sec=$(date +%s)
+rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
+ns1="ns1-$rndh"
+err=$(mktemp)
+ret=0
+
+cleanup()
+{
+ rm -f $out
+ ip netns del $ns1
+}
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
+ip netns add $ns1 || exit $ksft_skip
+ip -net $ns1 link set lo up
+ip netns exec $ns1 sysctl -q net.mptcp.enabled=1
+
+check()
+{
+ local cmd="$1"
+ local expected="$2"
+ local msg="$3"
+ local out=`$cmd 2>$err`
+ local cmd_ret=$?
+
+ printf "%-50s %s" "$msg"
+ if [ $cmd_ret -ne 0 ]; then
+ echo "[FAIL] command execution '$cmd' stderr "
+ cat $err
+ ret=1
+ elif [ "$out" = "$expected" ]; then
+ echo "[ OK ]"
+ else
+ echo -n "[FAIL] "
+ echo "expected '$expected' got '$out'"
+ ret=1
+ fi
+}
+
+check "ip netns exec $ns1 ./pm_nl_ctl dump" "" "defaults addr list"
+check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0
+subflows 0" "defaults limits"
+
+ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.1
+ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.2 flags subflow dev lo
+ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.3 flags signal,backup
+check "ip netns exec $ns1 ./pm_nl_ctl get 1" "id 1 flags 10.0.1.1" "simple add/get addr"
+
+check "ip netns exec $ns1 ./pm_nl_ctl dump" \
+"id 1 flags 10.0.1.1
+id 2 flags subflow dev lo 10.0.1.2
+id 3 flags signal,backup 10.0.1.3" "dump addrs"
+
+ip netns exec $ns1 ./pm_nl_ctl del 2
+check "ip netns exec $ns1 ./pm_nl_ctl get 2" "" "simple del addr"
+check "ip netns exec $ns1 ./pm_nl_ctl dump" \
+"id 1 flags 10.0.1.1
+id 3 flags signal,backup 10.0.1.3" "dump addrs after del"
+
+ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.3
+check "ip netns exec $ns1 ./pm_nl_ctl get 4" "" "duplicate addr"
+
+ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.4 id 10 flags signal
+check "ip netns exec $ns1 ./pm_nl_ctl get 4" "id 4 flags signal 10.0.1.4" "id addr increment"
+
+for i in `seq 5 9`; do
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.$i flags signal >/dev/null 2>&1
+done
+check "ip netns exec $ns1 ./pm_nl_ctl get 9" "id 9 flags signal 10.0.1.9" "hard addr limit"
+check "ip netns exec $ns1 ./pm_nl_ctl get 10" "" "above hard addr limit"
+
+for i in `seq 9 256`; do
+ ip netns exec $ns1 ./pm_nl_ctl del $i
+ ip netns exec $ns1 ./pm_nl_ctl add 10.0.0.9
+done
+check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags 10.0.1.1
+id 3 flags signal,backup 10.0.1.3
+id 4 flags signal 10.0.1.4
+id 5 flags signal 10.0.1.5
+id 6 flags signal 10.0.1.6
+id 7 flags signal 10.0.1.7
+id 8 flags signal 10.0.1.8" "id limit"
+
+ip netns exec $ns1 ./pm_nl_ctl flush
+check "ip netns exec $ns1 ./pm_nl_ctl dump" "" "flush addrs"
+
+ip netns exec $ns1 ./pm_nl_ctl limits 9 1
+check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0
+subflows 0" "rcv addrs above hard limit"
+
+ip netns exec $ns1 ./pm_nl_ctl limits 1 9
+check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 0
+subflows 0" "subflows above hard limit"
+
+ip netns exec $ns1 ./pm_nl_ctl limits 8 8
+check "ip netns exec $ns1 ./pm_nl_ctl limits" "accept 8
+subflows 8" "set limits"
+
+exit $ret
diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
new file mode 100644
index 000000000000..b24a2f17d415
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <errno.h>
+#include <error.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <arpa/inet.h>
+#include <net/if.h>
+
+#include <linux/rtnetlink.h>
+#include <linux/genetlink.h>
+
+#include "linux/mptcp.h"
+
+#ifndef MPTCP_PM_NAME
+#define MPTCP_PM_NAME "mptcp_pm"
+#endif
+
+static void syntax(char *argv[])
+{
+ fprintf(stderr, "%s add|get|del|flush|dump|accept [<args>]\n", argv[0]);
+ fprintf(stderr, "\tadd [flags signal|subflow|backup] [id <nr>] [dev <name>] <ip>\n");
+ fprintf(stderr, "\tdel <id>\n");
+ fprintf(stderr, "\tget <id>\n");
+ fprintf(stderr, "\tflush\n");
+ fprintf(stderr, "\tdump\n");
+ fprintf(stderr, "\tlimits [<rcv addr max> <subflow max>]\n");
+ exit(0);
+}
+
+static int init_genl_req(char *data, int family, int cmd, int version)
+{
+ struct nlmsghdr *nh = (void *)data;
+ struct genlmsghdr *gh;
+ int off = 0;
+
+ nh->nlmsg_type = family;
+ nh->nlmsg_flags = NLM_F_REQUEST;
+ nh->nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
+ off += NLMSG_ALIGN(sizeof(*nh));
+
+ gh = (void *)(data + off);
+ gh->cmd = cmd;
+ gh->version = version;
+ off += NLMSG_ALIGN(sizeof(*gh));
+ return off;
+}
+
+static void nl_error(struct nlmsghdr *nh)
+{
+ struct nlmsgerr *err = (struct nlmsgerr *)NLMSG_DATA(nh);
+ int len = nh->nlmsg_len - sizeof(*nh);
+ uint32_t off;
+
+ if (len < sizeof(struct nlmsgerr))
+ error(1, 0, "netlink error message truncated %d min %ld", len,
+ sizeof(struct nlmsgerr));
+
+ if (!err->error) {
+ /* check messages from kernel */
+ struct rtattr *attrs = (struct rtattr *)NLMSG_DATA(nh);
+
+ while (RTA_OK(attrs, len)) {
+ if (attrs->rta_type == NLMSGERR_ATTR_MSG)
+ fprintf(stderr, "netlink ext ack msg: %s\n",
+ (char *)RTA_DATA(attrs));
+ if (attrs->rta_type == NLMSGERR_ATTR_OFFS) {
+ memcpy(&off, RTA_DATA(attrs), 4);
+ fprintf(stderr, "netlink err off %d\n",
+ (int)off);
+ }
+ attrs = RTA_NEXT(attrs, len);
+ }
+ } else {
+ fprintf(stderr, "netlink error %d", err->error);
+ }
+}
+
+/* do a netlink command and, if max > 0, fetch the reply */
+static int do_nl_req(int fd, struct nlmsghdr *nh, int len, int max)
+{
+ struct sockaddr_nl nladdr = { .nl_family = AF_NETLINK };
+ socklen_t addr_len;
+ void *data = nh;
+ int rem, ret;
+ int err = 0;
+
+ nh->nlmsg_len = len;
+ ret = sendto(fd, data, len, 0, (void *)&nladdr, sizeof(nladdr));
+ if (ret != len)
+ error(1, errno, "send netlink: %uB != %uB\n", ret, len);
+ if (max == 0)
+ return 0;
+
+ addr_len = sizeof(nladdr);
+ rem = ret = recvfrom(fd, data, max, 0, (void *)&nladdr, &addr_len);
+ if (ret < 0)
+ error(1, errno, "recv netlink: %uB\n", ret);
+
+ /* Beware: the NLMSG_NEXT macro updates the 'rem' argument */
+ for (; NLMSG_OK(nh, rem); nh = NLMSG_NEXT(nh, rem)) {
+ if (nh->nlmsg_type == NLMSG_ERROR) {
+ nl_error(nh);
+ err = 1;
+ }
+ }
+ if (err)
+ error(1, 0, "bailing out due to netlink error[s]");
+ return ret;
+}
+
+static int genl_parse_getfamily(struct nlmsghdr *nlh)
+{
+ struct genlmsghdr *ghdr = NLMSG_DATA(nlh);
+ int len = nlh->nlmsg_len;
+ struct rtattr *attrs;
+
+ if (nlh->nlmsg_type != GENL_ID_CTRL)
+ error(1, errno, "Not a controller message, len=%d type=0x%x\n",
+ nlh->nlmsg_len, nlh->nlmsg_type);
+
+ len -= NLMSG_LENGTH(GENL_HDRLEN);
+
+ if (len < 0)
+ error(1, errno, "wrong controller message len %d\n", len);
+
+ if (ghdr->cmd != CTRL_CMD_NEWFAMILY)
+ error(1, errno, "Unknown controller command %d\n", ghdr->cmd);
+
+ attrs = (struct rtattr *) ((char *) ghdr + GENL_HDRLEN);
+ while (RTA_OK(attrs, len)) {
+ if (attrs->rta_type == CTRL_ATTR_FAMILY_ID)
+ return *(__u16 *)RTA_DATA(attrs);
+ attrs = RTA_NEXT(attrs, len);
+ }
+
+ error(1, errno, "can't find CTRL_ATTR_FAMILY_ID attr");
+ return -1;
+}
+
+static int resolve_mptcp_pm_netlink(int fd)
+{
+ char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ 1024];
+ struct nlmsghdr *nh;
+ struct rtattr *rta;
+ int namelen;
+ int off = 0;
+
+ memset(data, 0, sizeof(data));
+ nh = (void *)data;
+ off = init_genl_req(data, GENL_ID_CTRL, CTRL_CMD_GETFAMILY, 0);
+
+ rta = (void *)(data + off);
+ namelen = strlen(MPTCP_PM_NAME) + 1;
+ rta->rta_type = CTRL_ATTR_FAMILY_NAME;
+ rta->rta_len = RTA_LENGTH(namelen);
+ memcpy(RTA_DATA(rta), MPTCP_PM_NAME, namelen);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ do_nl_req(fd, nh, off, sizeof(data));
+ return genl_parse_getfamily((void *)data);
+}
+
+int add_addr(int fd, int pm_family, int argc, char *argv[])
+{
+ char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ 1024];
+ struct rtattr *rta, *nest;
+ struct nlmsghdr *nh;
+ u_int16_t family;
+ u_int32_t flags;
+ int nest_start;
+ u_int8_t id;
+ int off = 0;
+ int arg;
+
+ memset(data, 0, sizeof(data));
+ nh = (void *)data;
+ off = init_genl_req(data, pm_family, MPTCP_PM_CMD_ADD_ADDR,
+ MPTCP_PM_VER);
+
+ if (argc < 3)
+ syntax(argv);
+
+ nest_start = off;
+ nest = (void *)(data + off);
+ nest->rta_type = NLA_F_NESTED | MPTCP_PM_ATTR_ADDR;
+ nest->rta_len = RTA_LENGTH(0);
+ off += NLMSG_ALIGN(nest->rta_len);
+
+ /* addr data */
+ rta = (void *)(data + off);
+ if (inet_pton(AF_INET, argv[2], RTA_DATA(rta))) {
+ family = AF_INET;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
+ rta->rta_len = RTA_LENGTH(4);
+ } else if (inet_pton(AF_INET6, argv[2], RTA_DATA(rta))) {
+ family = AF_INET6;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
+ rta->rta_len = RTA_LENGTH(16);
+ } else
+ error(1, errno, "can't parse ip %s", argv[2]);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ /* family */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
+ rta->rta_len = RTA_LENGTH(2);
+ memcpy(RTA_DATA(rta), &family, 2);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ for (arg = 3; arg < argc; arg++) {
+ if (!strcmp(argv[arg], "flags")) {
+ char *tok, *str;
+
+ /* flags */
+ flags = 0;
+ if (++arg >= argc)
+ error(1, 0, " missing flags value");
+
+ /* do not support flag list yet */
+ for (str = argv[arg]; (tok = strtok(str, ","));
+ str = NULL) {
+ if (!strcmp(tok, "subflow"))
+ flags |= MPTCP_PM_ADDR_FLAG_SUBFLOW;
+ else if (!strcmp(tok, "signal"))
+ flags |= MPTCP_PM_ADDR_FLAG_SIGNAL;
+ else if (!strcmp(tok, "backup"))
+ flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
+ else
+ error(1, errno,
+ "unknown flag %s", argv[arg]);
+ }
+
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_FLAGS;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &flags, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+ } else if (!strcmp(argv[arg], "id")) {
+ if (++arg >= argc)
+ error(1, 0, " missing id value");
+
+ id = atoi(argv[arg]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ID;
+ rta->rta_len = RTA_LENGTH(1);
+ memcpy(RTA_DATA(rta), &id, 1);
+ off += NLMSG_ALIGN(rta->rta_len);
+ } else if (!strcmp(argv[arg], "dev")) {
+ int32_t ifindex;
+
+ if (++arg >= argc)
+ error(1, 0, " missing dev name");
+
+ ifindex = if_nametoindex(argv[arg]);
+ if (!ifindex)
+ error(1, errno, "unknown device %s", argv[arg]);
+
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_IF_IDX;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &ifindex, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+ } else
+ error(1, 0, "unknown keyword %s", argv[arg]);
+ }
+ nest->rta_len = off - nest_start;
+
+ do_nl_req(fd, nh, off, 0);
+ return 0;
+}
+
+int del_addr(int fd, int pm_family, int argc, char *argv[])
+{
+ char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ 1024];
+ struct rtattr *rta, *nest;
+ struct nlmsghdr *nh;
+ int nest_start;
+ u_int8_t id;
+ int off = 0;
+
+ memset(data, 0, sizeof(data));
+ nh = (void *)data;
+ off = init_genl_req(data, pm_family, MPTCP_PM_CMD_DEL_ADDR,
+ MPTCP_PM_VER);
+
+ /* the only argument is the address id */
+ if (argc != 3)
+ syntax(argv);
+
+ id = atoi(argv[2]);
+
+ nest_start = off;
+ nest = (void *)(data + off);
+ nest->rta_type = NLA_F_NESTED | MPTCP_PM_ATTR_ADDR;
+ nest->rta_len = RTA_LENGTH(0);
+ off += NLMSG_ALIGN(nest->rta_len);
+
+ /* build a dummy addr with only the ID set */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ID;
+ rta->rta_len = RTA_LENGTH(1);
+ memcpy(RTA_DATA(rta), &id, 1);
+ off += NLMSG_ALIGN(rta->rta_len);
+ nest->rta_len = off - nest_start;
+
+ do_nl_req(fd, nh, off, 0);
+ return 0;
+}
+
+static void print_addr(struct rtattr *attrs, int len)
+{
+ uint16_t family = 0;
+ char str[1024];
+ uint32_t flags;
+ uint8_t id;
+
+ while (RTA_OK(attrs, len)) {
+ if (attrs->rta_type == MPTCP_PM_ADDR_ATTR_FAMILY)
+ memcpy(&family, RTA_DATA(attrs), 2);
+ if (attrs->rta_type == MPTCP_PM_ADDR_ATTR_ADDR4) {
+ if (family != AF_INET)
+ error(1, errno, "wrong IP (v4) for family %d",
+ family);
+ inet_ntop(AF_INET, RTA_DATA(attrs), str, sizeof(str));
+ printf("%s", str);
+ }
+ if (attrs->rta_type == MPTCP_PM_ADDR_ATTR_ADDR6) {
+ if (family != AF_INET6)
+ error(1, errno, "wrong IP (v6) for family %d",
+ family);
+ inet_ntop(AF_INET6, RTA_DATA(attrs), str, sizeof(str));
+ printf("%s", str);
+ }
+ if (attrs->rta_type == MPTCP_PM_ADDR_ATTR_ID) {
+ memcpy(&id, RTA_DATA(attrs), 1);
+ printf("id %d ", id);
+ }
+ if (attrs->rta_type == MPTCP_PM_ADDR_ATTR_FLAGS) {
+ memcpy(&flags, RTA_DATA(attrs), 4);
+
+ printf("flags ");
+ if (flags & MPTCP_PM_ADDR_FLAG_SIGNAL) {
+ printf("signal");
+ flags &= ~MPTCP_PM_ADDR_FLAG_SIGNAL;
+ if (flags)
+ printf(",");
+ }
+
+ if (flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
+ printf("subflow");
+ flags &= ~MPTCP_PM_ADDR_FLAG_SUBFLOW;
+ if (flags)
+ printf(",");
+ }
+
+ if (flags & MPTCP_PM_ADDR_FLAG_BACKUP) {
+ printf("backup");
+ flags &= ~MPTCP_PM_ADDR_FLAG_BACKUP;
+ if (flags)
+ printf(",");
+ }
+
+ /* bump unknown flags, if any */
+ if (flags)
+ printf("0x%x", flags);
+ printf(" ");
+ }
+ if (attrs->rta_type == MPTCP_PM_ADDR_ATTR_IF_IDX) {
+ char name[IF_NAMESIZE], *ret;
+ int32_t ifindex;
+
+ memcpy(&ifindex, RTA_DATA(attrs), 4);
+ ret = if_indextoname(ifindex, name);
+ if (ret)
+ printf("dev %s ", ret);
+ else
+ printf("dev unknown/%d", ifindex);
+ }
+
+ attrs = RTA_NEXT(attrs, len);
+ }
+ printf("\n");
+}
+
+static void print_addrs(struct nlmsghdr *nh, int pm_family, int total_len)
+{
+ struct rtattr *attrs;
+
+ for (; NLMSG_OK(nh, total_len); nh = NLMSG_NEXT(nh, total_len)) {
+ int len = nh->nlmsg_len;
+
+ if (nh->nlmsg_type == NLMSG_DONE)
+ break;
+ if (nh->nlmsg_type == NLMSG_ERROR)
+ nl_error(nh);
+ if (nh->nlmsg_type != pm_family)
+ continue;
+
+ len -= NLMSG_LENGTH(GENL_HDRLEN);
+ attrs = (struct rtattr *) ((char *) NLMSG_DATA(nh) +
+ GENL_HDRLEN);
+ while (RTA_OK(attrs, len)) {
+ if (attrs->rta_type ==
+ (MPTCP_PM_ATTR_ADDR | NLA_F_NESTED))
+ print_addr((void *)RTA_DATA(attrs),
+ attrs->rta_len);
+ attrs = RTA_NEXT(attrs, len);
+ }
+ }
+}
+
+int get_addr(int fd, int pm_family, int argc, char *argv[])
+{
+ char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ 1024];
+ struct rtattr *rta, *nest;
+ struct nlmsghdr *nh;
+ int nest_start;
+ u_int8_t id;
+ int off = 0;
+
+ memset(data, 0, sizeof(data));
+ nh = (void *)data;
+ off = init_genl_req(data, pm_family, MPTCP_PM_CMD_GET_ADDR,
+ MPTCP_PM_VER);
+
+ /* the only argument is the address id */
+ if (argc != 3)
+ syntax(argv);
+
+ id = atoi(argv[2]);
+
+ nest_start = off;
+ nest = (void *)(data + off);
+ nest->rta_type = NLA_F_NESTED | MPTCP_PM_ATTR_ADDR;
+ nest->rta_len = RTA_LENGTH(0);
+ off += NLMSG_ALIGN(nest->rta_len);
+
+ /* build a dummy addr with only the ID set */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ID;
+ rta->rta_len = RTA_LENGTH(1);
+ memcpy(RTA_DATA(rta), &id, 1);
+ off += NLMSG_ALIGN(rta->rta_len);
+ nest->rta_len = off - nest_start;
+
+ print_addrs(nh, pm_family, do_nl_req(fd, nh, off, sizeof(data)));
+ return 0;
+}
+
+int dump_addrs(int fd, int pm_family, int argc, char *argv[])
+{
+ char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ 1024];
+ pid_t pid = getpid();
+ struct nlmsghdr *nh;
+ int off = 0;
+
+ memset(data, 0, sizeof(data));
+ nh = (void *)data;
+ off = init_genl_req(data, pm_family, MPTCP_PM_CMD_GET_ADDR,
+ MPTCP_PM_VER);
+ nh->nlmsg_flags |= NLM_F_DUMP;
+ nh->nlmsg_seq = 1;
+ nh->nlmsg_pid = pid;
+ nh->nlmsg_len = off;
+
+ print_addrs(nh, pm_family, do_nl_req(fd, nh, off, sizeof(data)));
+ return 0;
+}
+
+int flush_addrs(int fd, int pm_family, int argc, char *argv[])
+{
+ char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ 1024];
+ struct nlmsghdr *nh;
+ int off = 0;
+
+ memset(data, 0, sizeof(data));
+ nh = (void *)data;
+ off = init_genl_req(data, pm_family, MPTCP_PM_CMD_FLUSH_ADDRS,
+ MPTCP_PM_VER);
+
+ do_nl_req(fd, nh, off, 0);
+ return 0;
+}
+
+static void print_limits(struct nlmsghdr *nh, int pm_family, int total_len)
+{
+ struct rtattr *attrs;
+ uint32_t max;
+
+ for (; NLMSG_OK(nh, total_len); nh = NLMSG_NEXT(nh, total_len)) {
+ int len = nh->nlmsg_len;
+
+ if (nh->nlmsg_type == NLMSG_DONE)
+ break;
+ if (nh->nlmsg_type == NLMSG_ERROR)
+ nl_error(nh);
+ if (nh->nlmsg_type != pm_family)
+ continue;
+
+ len -= NLMSG_LENGTH(GENL_HDRLEN);
+ attrs = (struct rtattr *) ((char *) NLMSG_DATA(nh) +
+ GENL_HDRLEN);
+ while (RTA_OK(attrs, len)) {
+ int type = attrs->rta_type;
+
+ if (type != MPTCP_PM_ATTR_RCV_ADD_ADDRS &&
+ type != MPTCP_PM_ATTR_SUBFLOWS)
+ goto next;
+
+ memcpy(&max, RTA_DATA(attrs), 4);
+ printf("%s %u\n", type == MPTCP_PM_ATTR_SUBFLOWS ?
+ "subflows" : "accept", max);
+
+next:
+ attrs = RTA_NEXT(attrs, len);
+ }
+ }
+}
+
+int get_set_limits(int fd, int pm_family, int argc, char *argv[])
+{
+ char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ 1024];
+ uint32_t rcv_addr = 0, subflows = 0;
+ int cmd, len = sizeof(data);
+ struct nlmsghdr *nh;
+ int off = 0;
+
+ /* limit */
+ if (argc == 4) {
+ rcv_addr = atoi(argv[2]);
+ subflows = atoi(argv[3]);
+ cmd = MPTCP_PM_CMD_SET_LIMITS;
+ } else {
+ cmd = MPTCP_PM_CMD_GET_LIMITS;
+ }
+
+ memset(data, 0, sizeof(data));
+ nh = (void *)data;
+ off = init_genl_req(data, pm_family, cmd, MPTCP_PM_VER);
+
+ /* limit */
+ if (cmd == MPTCP_PM_CMD_SET_LIMITS) {
+ struct rtattr *rta = (void *)(data + off);
+
+ rta->rta_type = MPTCP_PM_ATTR_RCV_ADD_ADDRS;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &rcv_addr, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ATTR_SUBFLOWS;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &subflows, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ /* do not expect a reply */
+ len = 0;
+ }
+
+ len = do_nl_req(fd, nh, off, len);
+ if (cmd == MPTCP_PM_CMD_GET_LIMITS)
+ print_limits(nh, pm_family, len);
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ int fd, pm_family;
+
+ if (argc < 2)
+ syntax(argv);
+
+ fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
+ if (fd == -1)
+ error(1, errno, "socket netlink");
+
+ pm_family = resolve_mptcp_pm_netlink(fd);
+
+ if (!strcmp(argv[1], "add"))
+ return add_addr(fd, pm_family, argc, argv);
+ else if (!strcmp(argv[1], "del"))
+ return del_addr(fd, pm_family, argc, argv);
+ else if (!strcmp(argv[1], "flush"))
+ return flush_addrs(fd, pm_family, argc, argv);
+ else if (!strcmp(argv[1], "get"))
+ return get_addr(fd, pm_family, argc, argv);
+ else if (!strcmp(argv[1], "dump"))
+ return dump_addrs(fd, pm_family, argc, argv);
+ else if (!strcmp(argv[1], "limits"))
+ return get_set_limits(fd, pm_family, argc, argv);
+
+ fprintf(stderr, "unknown sub-command: %s", argv[1]);
+ syntax(argv);
+ return 0;
+}
diff --git a/tools/testing/selftests/net/reuseaddr_ports_exhausted.c b/tools/testing/selftests/net/reuseaddr_ports_exhausted.c
new file mode 100644
index 000000000000..7b01b7c2ec10
--- /dev/null
+++ b/tools/testing/selftests/net/reuseaddr_ports_exhausted.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Check if we can fully utilize 4-tuples for connect().
+ *
+ * Rules to bind sockets to the same port when all ephemeral ports are
+ * exhausted.
+ *
+ * 1. if there are TCP_LISTEN sockets on the port, fail to bind.
+ * 2. if there are sockets without SO_REUSEADDR, fail to bind.
+ * 3. if SO_REUSEADDR is disabled, fail to bind.
+ * 4. if SO_REUSEADDR is enabled and SO_REUSEPORT is disabled,
+ * succeed to bind.
+ * 5. if SO_REUSEADDR and SO_REUSEPORT are enabled and
+ * there is no socket having the both options and the same EUID,
+ * succeed to bind.
+ * 6. fail to bind.
+ *
+ * Author: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
+ */
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include "../kselftest_harness.h"
+
+struct reuse_opts {
+ int reuseaddr[2];
+ int reuseport[2];
+};
+
+struct reuse_opts unreusable_opts[12] = {
+ {0, 0, 0, 0},
+ {0, 0, 0, 1},
+ {0, 0, 1, 0},
+ {0, 0, 1, 1},
+ {0, 1, 0, 0},
+ {0, 1, 0, 1},
+ {0, 1, 1, 0},
+ {0, 1, 1, 1},
+ {1, 0, 0, 0},
+ {1, 0, 0, 1},
+ {1, 0, 1, 0},
+ {1, 0, 1, 1},
+};
+
+struct reuse_opts reusable_opts[4] = {
+ {1, 1, 0, 0},
+ {1, 1, 0, 1},
+ {1, 1, 1, 0},
+ {1, 1, 1, 1},
+};
+
+int bind_port(struct __test_metadata *_metadata, int reuseaddr, int reuseport)
+{
+ struct sockaddr_in local_addr;
+ int len = sizeof(local_addr);
+ int fd, ret;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ ASSERT_NE(-1, fd) TH_LOG("failed to open socket.");
+
+ ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuseaddr, sizeof(int));
+ ASSERT_EQ(0, ret) TH_LOG("failed to setsockopt: SO_REUSEADDR.");
+
+ ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &reuseport, sizeof(int));
+ ASSERT_EQ(0, ret) TH_LOG("failed to setsockopt: SO_REUSEPORT.");
+
+ local_addr.sin_family = AF_INET;
+ local_addr.sin_addr.s_addr = inet_addr("127.0.0.1");
+ local_addr.sin_port = 0;
+
+ if (bind(fd, (struct sockaddr *)&local_addr, len) == -1) {
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+TEST(reuseaddr_ports_exhausted_unreusable)
+{
+ struct reuse_opts *opts;
+ int i, j, fd[2];
+
+ for (i = 0; i < 12; i++) {
+ opts = &unreusable_opts[i];
+
+ for (j = 0; j < 2; j++)
+ fd[j] = bind_port(_metadata, opts->reuseaddr[j], opts->reuseport[j]);
+
+ ASSERT_NE(-1, fd[0]) TH_LOG("failed to bind.");
+ EXPECT_EQ(-1, fd[1]) TH_LOG("should fail to bind.");
+
+ for (j = 0; j < 2; j++)
+ if (fd[j] != -1)
+ close(fd[j]);
+ }
+}
+
+TEST(reuseaddr_ports_exhausted_reusable_same_euid)
+{
+ struct reuse_opts *opts;
+ int i, j, fd[2];
+
+ for (i = 0; i < 4; i++) {
+ opts = &reusable_opts[i];
+
+ for (j = 0; j < 2; j++)
+ fd[j] = bind_port(_metadata, opts->reuseaddr[j], opts->reuseport[j]);
+
+ ASSERT_NE(-1, fd[0]) TH_LOG("failed to bind.");
+
+ if (opts->reuseport[0] && opts->reuseport[1]) {
+ EXPECT_EQ(-1, fd[1]) TH_LOG("should fail to bind because both sockets succeed to be listened.");
+ } else {
+ EXPECT_NE(-1, fd[1]) TH_LOG("should succeed to bind to connect to different destinations.");
+ }
+
+ for (j = 0; j < 2; j++)
+ if (fd[j] != -1)
+ close(fd[j]);
+ }
+}
+
+TEST(reuseaddr_ports_exhausted_reusable_different_euid)
+{
+ struct reuse_opts *opts;
+ int i, j, ret, fd[2];
+ uid_t euid[2] = {10, 20};
+
+ for (i = 0; i < 4; i++) {
+ opts = &reusable_opts[i];
+
+ for (j = 0; j < 2; j++) {
+ ret = seteuid(euid[j]);
+ ASSERT_EQ(0, ret) TH_LOG("failed to seteuid: %d.", euid[j]);
+
+ fd[j] = bind_port(_metadata, opts->reuseaddr[j], opts->reuseport[j]);
+
+ ret = seteuid(0);
+ ASSERT_EQ(0, ret) TH_LOG("failed to seteuid: 0.");
+ }
+
+ ASSERT_NE(-1, fd[0]) TH_LOG("failed to bind.");
+ EXPECT_NE(-1, fd[1]) TH_LOG("should succeed to bind because one socket can be bound in each euid.");
+
+ if (fd[1] != -1) {
+ ret = listen(fd[0], 5);
+ ASSERT_EQ(0, ret) TH_LOG("failed to listen.");
+
+ ret = listen(fd[1], 5);
+ EXPECT_EQ(-1, ret) TH_LOG("should fail to listen because only one uid reserves the port in TCP_LISTEN.");
+ }
+
+ for (j = 0; j < 2; j++)
+ if (fd[j] != -1)
+ close(fd[j]);
+ }
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/reuseaddr_ports_exhausted.sh b/tools/testing/selftests/net/reuseaddr_ports_exhausted.sh
new file mode 100755
index 000000000000..20e3a2913d06
--- /dev/null
+++ b/tools/testing/selftests/net/reuseaddr_ports_exhausted.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Run tests when all ephemeral ports are exhausted.
+#
+# Author: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
+
+set +x
+set -e
+
+readonly NETNS="ns-$(mktemp -u XXXXXX)"
+
+setup() {
+ ip netns add "${NETNS}"
+ ip -netns "${NETNS}" link set lo up
+ ip netns exec "${NETNS}" \
+ sysctl -w net.ipv4.ip_local_port_range="32768 32768" \
+ > /dev/null 2>&1
+ ip netns exec "${NETNS}" \
+ sysctl -w net.ipv4.ip_autobind_reuse=1 > /dev/null 2>&1
+}
+
+cleanup() {
+ ip netns del "${NETNS}"
+}
+
+trap cleanup EXIT
+setup
+
+do_test() {
+ ip netns exec "${NETNS}" ./reuseaddr_ports_exhausted
+}
+
+do_test
+echo "tests done"
diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/net/rxtimestamp.c
index 6dee9e636a95..6dee9e636a95 100644
--- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c
+++ b/tools/testing/selftests/net/rxtimestamp.c
diff --git a/tools/testing/selftests/networking/timestamping/timestamping.c b/tools/testing/selftests/net/timestamping.c
index aca3491174a1..aca3491174a1 100644
--- a/tools/testing/selftests/networking/timestamping/timestamping.c
+++ b/tools/testing/selftests/net/timestamping.c
diff --git a/tools/testing/selftests/networking/timestamping/txtimestamp.c b/tools/testing/selftests/net/txtimestamp.c
index 7e386be47120..011b0da6b033 100644
--- a/tools/testing/selftests/networking/timestamping/txtimestamp.c
+++ b/tools/testing/selftests/net/txtimestamp.c
@@ -41,6 +41,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/epoll.h>
#include <sys/ioctl.h>
#include <sys/select.h>
#include <sys/socket.h>
@@ -49,6 +50,10 @@
#include <time.h>
#include <unistd.h>
+#define NSEC_PER_USEC 1000L
+#define USEC_PER_SEC 1000000L
+#define NSEC_PER_SEC 1000000000LL
+
/* command line parameters */
static int cfg_proto = SOCK_STREAM;
static int cfg_ipproto = IPPROTO_TCP;
@@ -61,12 +66,16 @@ static int cfg_delay_snd;
static int cfg_delay_ack;
static bool cfg_show_payload;
static bool cfg_do_pktinfo;
+static bool cfg_busy_poll;
+static int cfg_sleep_usec = 50 * 1000;
static bool cfg_loop_nodata;
-static bool cfg_no_delay;
static bool cfg_use_cmsg;
static bool cfg_use_pf_packet;
+static bool cfg_use_epoll;
+static bool cfg_epollet;
static bool cfg_do_listen;
static uint16_t dest_port = 9000;
+static bool cfg_print_nsec;
static struct sockaddr_in daddr;
static struct sockaddr_in6 daddr6;
@@ -75,11 +84,48 @@ static struct timespec ts_usr;
static int saved_tskey = -1;
static int saved_tskey_type = -1;
+struct timing_event {
+ int64_t min;
+ int64_t max;
+ int64_t total;
+ int count;
+};
+
+static struct timing_event usr_enq;
+static struct timing_event usr_snd;
+static struct timing_event usr_ack;
+
static bool test_failed;
+static int64_t timespec_to_ns64(struct timespec *ts)
+{
+ return ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec;
+}
+
static int64_t timespec_to_us64(struct timespec *ts)
{
- return ts->tv_sec * 1000 * 1000 + ts->tv_nsec / 1000;
+ return ts->tv_sec * USEC_PER_SEC + ts->tv_nsec / NSEC_PER_USEC;
+}
+
+static void init_timing_event(struct timing_event *te)
+{
+ te->min = INT64_MAX;
+ te->max = 0;
+ te->total = 0;
+ te->count = 0;
+}
+
+static void add_timing_event(struct timing_event *te,
+ struct timespec *t_start, struct timespec *t_end)
+{
+ int64_t ts_delta = timespec_to_ns64(t_end) - timespec_to_ns64(t_start);
+
+ te->count++;
+ if (ts_delta < te->min)
+ te->min = ts_delta;
+ if (ts_delta > te->max)
+ te->max = ts_delta;
+ te->total += ts_delta;
}
static void validate_key(int tskey, int tstype)
@@ -113,25 +159,43 @@ static void validate_timestamp(struct timespec *cur, int min_delay)
start64 = timespec_to_us64(&ts_usr);
if (cur64 < start64 + min_delay || cur64 > start64 + max_delay) {
- fprintf(stderr, "ERROR: delay %lu expected between %d and %d\n",
+ fprintf(stderr, "ERROR: %lu us expected between %d and %d\n",
cur64 - start64, min_delay, max_delay);
test_failed = true;
}
}
+static void __print_ts_delta_formatted(int64_t ts_delta)
+{
+ if (cfg_print_nsec)
+ fprintf(stderr, "%lu ns", ts_delta);
+ else
+ fprintf(stderr, "%lu us", ts_delta / NSEC_PER_USEC);
+}
+
static void __print_timestamp(const char *name, struct timespec *cur,
uint32_t key, int payload_len)
{
+ int64_t ts_delta;
+
if (!(cur->tv_sec | cur->tv_nsec))
return;
- fprintf(stderr, " %s: %lu s %lu us (seq=%u, len=%u)",
- name, cur->tv_sec, cur->tv_nsec / 1000,
- key, payload_len);
-
- if (cur != &ts_usr)
- fprintf(stderr, " (USR %+" PRId64 " us)",
- timespec_to_us64(cur) - timespec_to_us64(&ts_usr));
+ if (cfg_print_nsec)
+ fprintf(stderr, " %s: %lu s %lu ns (seq=%u, len=%u)",
+ name, cur->tv_sec, cur->tv_nsec,
+ key, payload_len);
+ else
+ fprintf(stderr, " %s: %lu s %lu us (seq=%u, len=%u)",
+ name, cur->tv_sec, cur->tv_nsec / NSEC_PER_USEC,
+ key, payload_len);
+
+ if (cur != &ts_usr) {
+ ts_delta = timespec_to_ns64(cur) - timespec_to_ns64(&ts_usr);
+ fprintf(stderr, " (USR +");
+ __print_ts_delta_formatted(ts_delta);
+ fprintf(stderr, ")");
+ }
fprintf(stderr, "\n");
}
@@ -155,14 +219,17 @@ static void print_timestamp(struct scm_timestamping *tss, int tstype,
case SCM_TSTAMP_SCHED:
tsname = " ENQ";
validate_timestamp(&tss->ts[0], 0);
+ add_timing_event(&usr_enq, &ts_usr, &tss->ts[0]);
break;
case SCM_TSTAMP_SND:
tsname = " SND";
validate_timestamp(&tss->ts[0], cfg_delay_snd);
+ add_timing_event(&usr_snd, &ts_usr, &tss->ts[0]);
break;
case SCM_TSTAMP_ACK:
tsname = " ACK";
validate_timestamp(&tss->ts[0], cfg_delay_ack);
+ add_timing_event(&usr_ack, &ts_usr, &tss->ts[0]);
break;
default:
error(1, 0, "unknown timestamp type: %u",
@@ -171,6 +238,21 @@ static void print_timestamp(struct scm_timestamping *tss, int tstype,
__print_timestamp(tsname, &tss->ts[0], tskey, payload_len);
}
+static void print_timing_event(char *name, struct timing_event *te)
+{
+ if (!te->count)
+ return;
+
+ fprintf(stderr, " %s: count=%d", name, te->count);
+ fprintf(stderr, ", avg=");
+ __print_ts_delta_formatted((int64_t)(te->total / te->count));
+ fprintf(stderr, ", min=");
+ __print_ts_delta_formatted(te->min);
+ fprintf(stderr, ", max=");
+ __print_ts_delta_formatted(te->max);
+ fprintf(stderr, "\n");
+}
+
/* TODO: convert to check_and_print payload once API is stable */
static void print_payload(char *data, int len)
{
@@ -198,6 +280,17 @@ static void print_pktinfo(int family, int ifindex, void *saddr, void *daddr)
daddr ? inet_ntop(family, daddr, da, sizeof(da)) : "unknown");
}
+static void __epoll(int epfd)
+{
+ struct epoll_event events;
+ int ret;
+
+ memset(&events, 0, sizeof(events));
+ ret = epoll_wait(epfd, &events, 1, cfg_poll_timeout);
+ if (ret != 1)
+ error(1, errno, "epoll_wait");
+}
+
static void __poll(int fd)
{
struct pollfd pollfd;
@@ -391,7 +484,11 @@ static void do_test(int family, unsigned int report_opt)
struct msghdr msg;
struct iovec iov;
char *buf;
- int fd, i, val = 1, total_len;
+ int fd, i, val = 1, total_len, epfd = 0;
+
+ init_timing_event(&usr_enq);
+ init_timing_event(&usr_snd);
+ init_timing_event(&usr_ack);
total_len = cfg_payload_len;
if (cfg_use_pf_packet || cfg_proto == SOCK_RAW) {
@@ -418,6 +515,20 @@ static void do_test(int family, unsigned int report_opt)
if (fd < 0)
error(1, errno, "socket");
+ if (cfg_use_epoll) {
+ struct epoll_event ev;
+
+ memset(&ev, 0, sizeof(ev));
+ ev.data.fd = fd;
+ if (cfg_epollet)
+ ev.events |= EPOLLET;
+ epfd = epoll_create(1);
+ if (epfd <= 0)
+ error(1, errno, "epoll_create");
+ if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &ev))
+ error(1, errno, "epoll_ctl");
+ }
+
/* reset expected key on each new socket */
saved_tskey = -1;
@@ -525,19 +636,28 @@ static void do_test(int family, unsigned int report_opt)
error(1, errno, "send");
/* wait for all errors to be queued, else ACKs arrive OOO */
- if (!cfg_no_delay)
- usleep(50 * 1000);
+ if (cfg_sleep_usec)
+ usleep(cfg_sleep_usec);
- __poll(fd);
+ if (!cfg_busy_poll) {
+ if (cfg_use_epoll)
+ __epoll(epfd);
+ else
+ __poll(fd);
+ }
while (!recv_errmsg(fd)) {}
}
+ print_timing_event("USR-ENQ", &usr_enq);
+ print_timing_event("USR-SND", &usr_snd);
+ print_timing_event("USR-ACK", &usr_ack);
+
if (close(fd))
error(1, errno, "close");
free(buf);
- usleep(100 * 1000);
+ usleep(100 * NSEC_PER_USEC);
}
static void __attribute__((noreturn)) usage(const char *filepath)
@@ -547,18 +667,22 @@ static void __attribute__((noreturn)) usage(const char *filepath)
" -4: only IPv4\n"
" -6: only IPv6\n"
" -h: show this message\n"
+ " -b: busy poll to read from error queue\n"
" -c N: number of packets for each test\n"
" -C: use cmsg to set tstamp recording options\n"
- " -D: no delay between packets\n"
- " -F: poll() waits forever for an event\n"
+ " -e: use level-triggered epoll() instead of poll()\n"
+ " -E: use event-triggered epoll() instead of poll()\n"
+ " -F: poll()/epoll() waits forever for an event\n"
" -I: request PKTINFO\n"
" -l N: send N bytes at a time\n"
" -L listen on hostname and port\n"
" -n: set no-payload option\n"
+ " -N: print timestamps and durations in nsec (instead of usec)\n"
" -p N: connect to port N\n"
" -P: use PF_PACKET\n"
" -r: use raw\n"
" -R: use raw (IP_HDRINCL)\n"
+ " -S N: usec to sleep before reading error queue\n"
" -u: use udp\n"
" -v: validate SND delay (usec)\n"
" -V: validate ACK delay (usec)\n"
@@ -572,7 +696,8 @@ static void parse_opt(int argc, char **argv)
int proto_count = 0;
int c;
- while ((c = getopt(argc, argv, "46c:CDFhIl:Lnp:PrRuv:V:x")) != -1) {
+ while ((c = getopt(argc, argv,
+ "46bc:CeEFhIl:LnNp:PrRS:uv:V:x")) != -1) {
switch (c) {
case '4':
do_ipv6 = 0;
@@ -580,15 +705,21 @@ static void parse_opt(int argc, char **argv)
case '6':
do_ipv4 = 0;
break;
+ case 'b':
+ cfg_busy_poll = true;
+ break;
case 'c':
cfg_num_pkts = strtoul(optarg, NULL, 10);
break;
case 'C':
cfg_use_cmsg = true;
break;
- case 'D':
- cfg_no_delay = true;
+ case 'e':
+ cfg_use_epoll = true;
break;
+ case 'E':
+ cfg_use_epoll = true;
+ cfg_epollet = true;
case 'F':
cfg_poll_timeout = -1;
break;
@@ -604,6 +735,9 @@ static void parse_opt(int argc, char **argv)
case 'n':
cfg_loop_nodata = true;
break;
+ case 'N':
+ cfg_print_nsec = true;
+ break;
case 'p':
dest_port = strtoul(optarg, NULL, 10);
break;
@@ -623,6 +757,9 @@ static void parse_opt(int argc, char **argv)
cfg_proto = SOCK_RAW;
cfg_ipproto = IPPROTO_RAW;
break;
+ case 'S':
+ cfg_sleep_usec = strtoul(optarg, NULL, 10);
+ break;
case 'u':
proto_count++;
cfg_proto = SOCK_DGRAM;
@@ -653,6 +790,8 @@ static void parse_opt(int argc, char **argv)
error(1, 0, "pass -P, -r, -R or -u, not multiple");
if (cfg_do_pktinfo && cfg_use_pf_packet)
error(1, 0, "cannot ask for pktinfo over pf_packet");
+ if (cfg_busy_poll && cfg_use_epoll)
+ error(1, 0, "pass epoll or busy_poll, not both");
if (optind != argc - 1)
error(1, 0, "missing required hostname argument");
diff --git a/tools/testing/selftests/networking/timestamping/txtimestamp.sh b/tools/testing/selftests/net/txtimestamp.sh
index df0d86ca72b7..eea6f5193693 100755
--- a/tools/testing/selftests/networking/timestamping/txtimestamp.sh
+++ b/tools/testing/selftests/net/txtimestamp.sh
@@ -43,15 +43,40 @@ run_test_tcpudpraw() {
}
run_test_all() {
+ setup
run_test_tcpudpraw # setsockopt
run_test_tcpudpraw -C # cmsg
run_test_tcpudpraw -n # timestamp w/o data
+ echo "OK. All tests passed"
+}
+
+run_test_one() {
+ setup
+ ./txtimestamp $@
+}
+
+usage() {
+ echo "Usage: $0 [ -r | --run ] <txtimestamp args> | [ -h | --help ]"
+ echo " (no args) Run all tests"
+ echo " -r|--run Run an individual test with arguments"
+ echo " -h|--help Help"
+}
+
+main() {
+ if [[ $# -eq 0 ]]; then
+ run_test_all
+ else
+ if [[ "$1" = "-r" || "$1" == "--run" ]]; then
+ shift
+ run_test_one $@
+ else
+ usage
+ fi
+ fi
}
if [[ "$(ip netns identify)" == "root" ]]; then
- ../../net/in_netns.sh $0 $@
+ ./in_netns.sh $0 $@
else
- setup
- run_test_all
- echo "OK. All tests passed"
+ main $@
fi
diff --git a/tools/testing/selftests/networking/timestamping/.gitignore b/tools/testing/selftests/networking/timestamping/.gitignore
deleted file mode 100644
index d9355035e746..000000000000
--- a/tools/testing/selftests/networking/timestamping/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-timestamping
-rxtimestamp
-txtimestamp
-hwtstamp_config
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
deleted file mode 100644
index 1de8bd8ccf5d..000000000000
--- a/tools/testing/selftests/networking/timestamping/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-CFLAGS += -I../../../../../usr/include
-
-TEST_GEN_FILES := hwtstamp_config rxtimestamp timestamping txtimestamp
-TEST_PROGS := txtimestamp.sh
-
-all: $(TEST_PROGS)
-
-top_srcdir = ../../../../..
-KSFT_KHDR_INSTALL := 1
-include ../../lib.mk
diff --git a/tools/testing/selftests/networking/timestamping/config b/tools/testing/selftests/networking/timestamping/config
deleted file mode 100644
index a13e3169b0a4..000000000000
--- a/tools/testing/selftests/networking/timestamping/config
+++ /dev/null
@@ -1,2 +0,0 @@
-CONFIG_IFB=y
-CONFIG_NET_SCH_NETEM=y
diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config
index c03af4600281..c33a7aac27ff 100644
--- a/tools/testing/selftests/tc-testing/config
+++ b/tools/testing/selftests/tc-testing/config
@@ -31,6 +31,7 @@ CONFIG_NET_EMATCH_U32=m
CONFIG_NET_EMATCH_META=m
CONFIG_NET_EMATCH_TEXT=m
CONFIG_NET_EMATCH_IPSET=m
+CONFIG_NET_EMATCH_CANID=m
CONFIG_NET_EMATCH_IPT=m
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=m
@@ -58,3 +59,8 @@ CONFIG_NET_IFE_SKBPRIO=m
CONFIG_NET_IFE_SKBTCINDEX=m
CONFIG_NET_SCH_FIFO=y
CONFIG_NET_SCH_ETS=m
+
+#
+## Network testing
+#
+CONFIG_CAN=m
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json b/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
index 98a20faf3198..e788c114a484 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/basic.json
@@ -372,5 +372,907 @@
"teardown": [
"$TC qdisc del dev $DEV1 ingress"
]
+ },
+ {
+ "id": "bae4",
+ "name": "Add basic filter with u32 ematch u8/zero offset and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0x11 0x0f at 0)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(01000000/0f000000 at 0\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "e6cb",
+ "name": "Add basic filter with u32 ematch u8/zero offset and invalid value >0xFF",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0x1122 0x0f at 0)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(11220000/0f000000 at 0\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "7727",
+ "name": "Add basic filter with u32 ematch u8/positive offset and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0x77 0x1f at 12)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(17000000/1f000000 at 12\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "a429",
+ "name": "Add basic filter with u32 ematch u8/invalid mask >0xFF",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0x77 0xff00 at 12)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(77000000/ff000000 at 12\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "8373",
+ "name": "Add basic filter with u32 ematch u8/missing offset",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0x77 0xff at)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(77000000 at 12\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "ab8e",
+ "name": "Add basic filter with u32 ematch u8/missing AT keyword",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0x77 0xff 0)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(77000000 at 12\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "712d",
+ "name": "Add basic filter with u32 ematch u8/missing value",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 at 12)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(at 12\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "350f",
+ "name": "Add basic filter with u32 ematch u8/non-numeric value",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 zero 0xff at 0)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(00000000/ff000000 at 0\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "e28f",
+ "name": "Add basic filter with u32 ematch u8/non-numeric mask",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0x11 mask at 0)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(11000000/00000000 at 0\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "6d5f",
+ "name": "Add basic filter with u32 ematch u8/negative offset and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0xaa 0xf0 at -14)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(0000a000/0000f000 at -16\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "12dc",
+ "name": "Add basic filter with u32 ematch u8/nexthdr+ offset and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0xaa 0xf0 at nexthdr+0)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(a0000000/f0000000 at nexthdr\\+0\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "1d85",
+ "name": "Add basic filter with u32 ematch u16/zero offset and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u16 0x1122 0xffff at 0)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(11220000/ffff0000 at 0\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "3672",
+ "name": "Add basic filter with u32 ematch u16/zero offset and invalid value >0xFFFF",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u16 0x112233 0xffff at 0)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(11223300/ffff0000 at 0\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "7fb0",
+ "name": "Add basic filter with u32 ematch u16/positive offset and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u16 0x7788 0x1fff at 12)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(17880000/1fff0000 at 12\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "19af",
+ "name": "Add basic filter with u32 ematch u16/invalid mask >0xFFFF",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u16 0x7788 0xffffffff at 12)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(77880000/ffffffff at 12\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "446d",
+ "name": "Add basic filter with u32 ematch u16/missing offset",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u16 0x7788 0xffff at)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(77880000 at 12\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "151b",
+ "name": "Add basic filter with u32 ematch u16/missing AT keyword",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u16 0x7788 0xffff 0)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(77880000/ffff0000 at 0\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "bb23",
+ "name": "Add basic filter with u32 ematch u16/missing value",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u16 at 12)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(at 12\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "decc",
+ "name": "Add basic filter with u32 ematch u16/non-numeric value",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u16 zero 0xffff at 0)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(00000000/ffff0000 at 0\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "e988",
+ "name": "Add basic filter with u32 ematch u16/non-numeric mask",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u8 0x1122 mask at 0)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(11220000/00000000 at 0\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "07d8",
+ "name": "Add basic filter with u32 ematch u16/negative offset and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u16 0xaabb 0xffff at -12)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(aabb0000/ffff0000 at -12\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "f474",
+ "name": "Add basic filter with u32 ematch u16/nexthdr+ offset and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u16 0xaabb 0xf0f0 at nexthdr+0)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(a0b00000/f0f00000 at nexthdr\\+0\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "47a0",
+ "name": "Add basic filter with u32 ematch u32/zero offset and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u32 0xaabbccdd 0xffffffff at 0)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(aabbccdd/ffffffff at 0\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "849f",
+ "name": "Add basic filter with u32 ematch u32/positive offset and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u32 0x11227788 0x1ffff0f0 at 12)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(11227080/1ffff0f0 at 12\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "d288",
+ "name": "Add basic filter with u32 ematch u32/missing offset",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u32 0x11227788 0xffffffff at)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(11227788/ffffffff at 12\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "4998",
+ "name": "Add basic filter with u32 ematch u32/missing AT keyword",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u32 0x77889900 0xfffff0f0 0)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(77889900/fffff0f0 at 0\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "1f0a",
+ "name": "Add basic filter with u32 ematch u32/missing value",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u32 at 12)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(at 12\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "848e",
+ "name": "Add basic filter with u32 ematch u32/non-numeric value",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u32 zero 0xffff at 0)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(00000000/ffff0000 at 0\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "f748",
+ "name": "Add basic filter with u32 ematch u32/non-numeric mask",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u32 0x11223344 mask at 0)' classid 1:1",
+ "expExitCode": "1",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(11223344/00000000 at 0\\)",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "55a6",
+ "name": "Add basic filter with u32 ematch u32/negative offset and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u32 0xaabbccdd 0xff00ff00 at -12)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(aa00cc00/ff00ff00 at -12\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "7282",
+ "name": "Add basic filter with u32 ematch u32/nexthdr+ offset and default action",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'u32(u32 0xaabbccdd 0xffffffff at nexthdr+0)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*u32\\(aabbccdd/ffffffff at nexthdr\\+0\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "b2b6",
+ "name": "Add basic filter with canid ematch and single SFF",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(sff 1)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(sff 0x1\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "f67f",
+ "name": "Add basic filter with canid ematch and single SFF with mask",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(sff 0xaabb:0x00ff)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(sff 0x2BB:0xFF\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "bd5c",
+ "name": "Add basic filter with canid ematch and multiple SFF",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(sff 1 sff 2 sff 3)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(sff 0x1 sff 0x2 sff 0x3\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "83c7",
+ "name": "Add basic filter with canid ematch and multiple SFF with masks",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(sff 0xaa:0x01 sff 0xbb:0x02 sff 0xcc:0x03)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(sff 0xAA:0x1 sff 0xBB:0x2 sff 0xCC:0x3\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "a8f5",
+ "name": "Add basic filter with canid ematch and single EFF",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(eff 1)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(eff 0x1\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "98ae",
+ "name": "Add basic filter with canid ematch and single EFF with mask",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(eff 0xaabb:0xf1f1)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(eff 0xAABB:0xF1F1\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "6056",
+ "name": "Add basic filter with canid ematch and multiple EFF",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(eff 1 eff 2 eff 3)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(eff 0x1 eff 0x2 eff 0x3\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "d188",
+ "name": "Add basic filter with canid ematch and multiple EFF with masks",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(eff 0xaa:0x01 eff 0xbb:0x02 eff 0xcc:0x03)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(eff 0xAA:0x1 eff 0xBB:0x2 eff 0xCC:0x3\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "25d1",
+ "name": "Add basic filter with canid ematch and a combination of SFF/EFF",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(sff 0x01 eff 0x02)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(eff 0x2 sff 0x1\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
+ },
+ {
+ "id": "b438",
+ "name": "Add basic filter with canid ematch and a combination of SFF/EFF with masks",
+ "category": [
+ "filter",
+ "basic"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$TC qdisc add dev $DEV1 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 basic match 'canid(sff 0x01:0xf eff 0x02:0xf)' classid 1:1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip basic",
+ "matchPattern": "^filter parent ffff: protocol ip pref 1 basic.*handle 0x1 flowid 1:1.*canid\\(eff 0x2:0xF sff 0x1:0xF\\)",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/red.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/red.json
new file mode 100644
index 000000000000..0703a2a255eb
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/red.json
@@ -0,0 +1,185 @@
+[
+ {
+ "id": "8b6e",
+ "name": "Create RED with no flags",
+ "category": [
+ "qdisc",
+ "red"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link add dev $DUMMY type dummy || /bin/true"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red limit 1M avpkt 1500 min 100K max 300K",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc red 1: root .* limit 1Mb min 100Kb max 300Kb $",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root",
+ "$IP link del dev $DUMMY type dummy"
+ ]
+ },
+ {
+ "id": "342e",
+ "name": "Create RED with adaptive flag",
+ "category": [
+ "qdisc",
+ "red"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link add dev $DUMMY type dummy || /bin/true"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red adaptive limit 1M avpkt 1500 min 100K max 300K",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc red 1: root .* limit 1Mb min 100Kb max 300Kb adaptive $",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root",
+ "$IP link del dev $DUMMY type dummy"
+ ]
+ },
+ {
+ "id": "2d4b",
+ "name": "Create RED with ECN flag",
+ "category": [
+ "qdisc",
+ "red"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link add dev $DUMMY type dummy || /bin/true"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red ecn limit 1M avpkt 1500 min 100K max 300K",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc red 1: root .* limit 1Mb min 100Kb max 300Kb ecn $",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root",
+ "$IP link del dev $DUMMY type dummy"
+ ]
+ },
+ {
+ "id": "650f",
+ "name": "Create RED with flags ECN, adaptive",
+ "category": [
+ "qdisc",
+ "red"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link add dev $DUMMY type dummy || /bin/true"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red ecn adaptive limit 1M avpkt 1500 min 100K max 300K",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc red 1: root .* limit 1Mb min 100Kb max 300Kb ecn adaptive $",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root",
+ "$IP link del dev $DUMMY type dummy"
+ ]
+ },
+ {
+ "id": "5f15",
+ "name": "Create RED with flags ECN, harddrop",
+ "category": [
+ "qdisc",
+ "red"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link add dev $DUMMY type dummy || /bin/true"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red ecn harddrop limit 1M avpkt 1500 min 100K max 300K",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc red 1: root .* limit 1Mb min 100Kb max 300Kb ecn harddrop $",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root",
+ "$IP link del dev $DUMMY type dummy"
+ ]
+ },
+ {
+ "id": "53e8",
+ "name": "Create RED with flags ECN, nodrop",
+ "category": [
+ "qdisc",
+ "red"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link add dev $DUMMY type dummy || /bin/true"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red ecn nodrop limit 1M avpkt 1500 min 100K max 300K",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc red 1: root .* limit 1Mb min 100Kb max 300Kb ecn nodrop $",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root",
+ "$IP link del dev $DUMMY type dummy"
+ ]
+ },
+ {
+ "id": "d091",
+ "name": "Fail to create RED with only nodrop flag",
+ "category": [
+ "qdisc",
+ "red"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link add dev $DUMMY type dummy || /bin/true"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red nodrop limit 1M avpkt 1500 min 100K max 300K",
+ "expExitCode": "2",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc red",
+ "matchCount": "0",
+ "teardown": [
+ "$IP link del dev $DUMMY type dummy"
+ ]
+ },
+ {
+ "id": "af8e",
+ "name": "Create RED with flags ECN, nodrop, harddrop",
+ "category": [
+ "qdisc",
+ "red"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "$IP link add dev $DUMMY type dummy || /bin/true"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root red ecn harddrop nodrop limit 1M avpkt 1500 min 100K max 300K",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc red 1: root .* limit 1Mb min 100Kb max 300Kb ecn harddrop nodrop $",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root",
+ "$IP link del dev $DUMMY type dummy"
+ ]
+ }
+]
diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
index 1d8b93f1af31..5a4fb80fa832 100644
--- a/tools/testing/vsock/vsock_test.c
+++ b/tools/testing/vsock/vsock_test.c
@@ -55,6 +55,78 @@ static void test_stream_connection_reset(const struct test_opts *opts)
close(fd);
}
+static void test_stream_bind_only_client(const struct test_opts *opts)
+{
+ union {
+ struct sockaddr sa;
+ struct sockaddr_vm svm;
+ } addr = {
+ .svm = {
+ .svm_family = AF_VSOCK,
+ .svm_port = 1234,
+ .svm_cid = opts->peer_cid,
+ },
+ };
+ int ret;
+ int fd;
+
+ /* Wait for the server to be ready */
+ control_expectln("BIND");
+
+ fd = socket(AF_VSOCK, SOCK_STREAM, 0);
+
+ timeout_begin(TIMEOUT);
+ do {
+ ret = connect(fd, &addr.sa, sizeof(addr.svm));
+ timeout_check("connect");
+ } while (ret < 0 && errno == EINTR);
+ timeout_end();
+
+ if (ret != -1) {
+ fprintf(stderr, "expected connect(2) failure, got %d\n", ret);
+ exit(EXIT_FAILURE);
+ }
+ if (errno != ECONNRESET) {
+ fprintf(stderr, "unexpected connect(2) errno %d\n", errno);
+ exit(EXIT_FAILURE);
+ }
+
+ /* Notify the server that the client has finished */
+ control_writeln("DONE");
+
+ close(fd);
+}
+
+static void test_stream_bind_only_server(const struct test_opts *opts)
+{
+ union {
+ struct sockaddr sa;
+ struct sockaddr_vm svm;
+ } addr = {
+ .svm = {
+ .svm_family = AF_VSOCK,
+ .svm_port = 1234,
+ .svm_cid = VMADDR_CID_ANY,
+ },
+ };
+ int fd;
+
+ fd = socket(AF_VSOCK, SOCK_STREAM, 0);
+
+ if (bind(fd, &addr.sa, sizeof(addr.svm)) < 0) {
+ perror("bind");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Notify the client that the server is ready */
+ control_writeln("BIND");
+
+ /* Wait for the client to finish */
+ control_expectln("DONE");
+
+ close(fd);
+}
+
static void test_stream_client_close_client(const struct test_opts *opts)
{
int fd;
@@ -213,6 +285,11 @@ static struct test_case test_cases[] = {
.run_client = test_stream_connection_reset,
},
{
+ .name = "SOCK_STREAM bind only",
+ .run_client = test_stream_bind_only_client,
+ .run_server = test_stream_bind_only_server,
+ },
+ {
.name = "SOCK_STREAM client close",
.run_client = test_stream_client_close_client,
.run_server = test_stream_client_close_server,